summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMatt Clay <mclay@redhat.com>2020-02-28 20:37:50 -0800
committerGitHub <noreply@github.com>2020-02-28 20:37:50 -0800
commit7c8b046b5fac7342fcf7882d4c84efc089e7866f (patch)
tree22c3b9ac4cb671951e7f20457269f63c1caf91f8
parent04666c9fa1eb2f367cf3b1c9c6853828bdcb813f (diff)
downloadansible-7c8b046b5fac7342fcf7882d4c84efc089e7866f.tar.gz
Fourth batch of incidental integration tests. (#67873)
* Copy in incidental posix tests. * Update incidental test aliases. * Update target names. * Add support plugins. * Fix paths. * Update ignores. * Update integration-aliases sanity test. * Add incidental tests to CI.
-rw-r--r--shippable.yml16
-rw-r--r--test/integration/targets/incidental_assemble/aliases1
-rw-r--r--test/integration/targets/incidental_assemble/files/fragment11
-rw-r--r--test/integration/targets/incidental_assemble/files/fragment21
-rw-r--r--test/integration/targets/incidental_assemble/files/fragment31
-rw-r--r--test/integration/targets/incidental_assemble/files/fragment41
-rw-r--r--test/integration/targets/incidental_assemble/files/fragment51
-rw-r--r--test/integration/targets/incidental_assemble/tasks/main.yml101
-rw-r--r--test/integration/targets/incidental_cloud_init_data_facts/aliases5
-rw-r--r--test/integration/targets/incidental_cloud_init_data_facts/tasks/main.yml50
-rw-r--r--test/integration/targets/incidental_connection_chroot/aliases3
-rwxr-xr-xtest/integration/targets/incidental_connection_chroot/runme.sh18
-rw-r--r--test/integration/targets/incidental_connection_chroot/test_connection.inventory7
-rw-r--r--test/integration/targets/incidental_consul/aliases3
-rw-r--r--test/integration/targets/incidental_consul/meta/main.yml3
-rw-r--r--test/integration/targets/incidental_consul/tasks/consul_session.yml162
-rw-r--r--test/integration/targets/incidental_consul/tasks/main.yml97
-rw-r--r--test/integration/targets/incidental_consul/templates/consul_config.hcl.j213
-rw-r--r--test/integration/targets/incidental_deploy_helper/aliases1
-rw-r--r--test/integration/targets/incidental_deploy_helper/tasks/main.yml149
-rw-r--r--test/integration/targets/incidental_flatpak_remote/aliases7
-rw-r--r--test/integration/targets/incidental_flatpak_remote/meta/main.yml2
-rw-r--r--test/integration/targets/incidental_flatpak_remote/tasks/check_mode.yml101
-rw-r--r--test/integration/targets/incidental_flatpak_remote/tasks/main.yml57
-rw-r--r--test/integration/targets/incidental_flatpak_remote/tasks/setup.yml27
-rw-r--r--test/integration/targets/incidental_flatpak_remote/tasks/test.yml72
-rw-r--r--test/integration/targets/incidental_inventory_docker_swarm/aliases11
-rw-r--r--test/integration/targets/incidental_inventory_docker_swarm/inventory_1.docker_swarm.yml3
-rw-r--r--test/integration/targets/incidental_inventory_docker_swarm/inventory_2.docker_swarm.yml5
-rw-r--r--test/integration/targets/incidental_inventory_docker_swarm/meta/main.yml3
-rw-r--r--test/integration/targets/incidental_inventory_docker_swarm/playbooks/swarm_cleanup.yml18
-rw-r--r--test/integration/targets/incidental_inventory_docker_swarm/playbooks/swarm_setup.yml15
-rw-r--r--test/integration/targets/incidental_inventory_docker_swarm/playbooks/test_inventory_1.yml58
-rw-r--r--test/integration/targets/incidental_inventory_docker_swarm/playbooks/test_inventory_2.yml35
-rwxr-xr-xtest/integration/targets/incidental_inventory_docker_swarm/runme.sh23
-rw-r--r--test/integration/targets/incidental_lookup_hashi_vault/aliases6
-rw-r--r--test/integration/targets/incidental_lookup_hashi_vault/lookup_hashi_vault/defaults/main.yml4
-rw-r--r--test/integration/targets/incidental_lookup_hashi_vault/lookup_hashi_vault/tasks/approle_setup.yml21
-rw-r--r--test/integration/targets/incidental_lookup_hashi_vault/lookup_hashi_vault/tasks/approle_test.yml45
-rw-r--r--test/integration/targets/incidental_lookup_hashi_vault/lookup_hashi_vault/tasks/main.yml155
-rw-r--r--test/integration/targets/incidental_lookup_hashi_vault/lookup_hashi_vault/tasks/tests.yml35
-rw-r--r--test/integration/targets/incidental_lookup_hashi_vault/lookup_hashi_vault/tasks/token_setup.yml3
-rw-r--r--test/integration/targets/incidental_lookup_hashi_vault/lookup_hashi_vault/tasks/token_test.yml58
-rw-r--r--test/integration/targets/incidental_lookup_hashi_vault/lookup_hashi_vault/templates/vault_config.hcl.j210
-rw-r--r--test/integration/targets/incidental_lookup_hashi_vault/playbooks/install_dependencies.yml19
-rw-r--r--test/integration/targets/incidental_lookup_hashi_vault/playbooks/test_lookup_hashi_vault.yml9
-rwxr-xr-xtest/integration/targets/incidental_lookup_hashi_vault/runme.sh23
-rw-r--r--test/integration/targets/incidental_lookup_rabbitmq/aliases6
-rw-r--r--test/integration/targets/incidental_lookup_rabbitmq/meta/main.yml2
-rw-r--r--test/integration/targets/incidental_lookup_rabbitmq/tasks/main.yml5
-rw-r--r--test/integration/targets/incidental_lookup_rabbitmq/tasks/ubuntu.yml138
-rw-r--r--test/integration/targets/incidental_lvg/aliases6
-rw-r--r--test/integration/targets/incidental_lvg/meta/main.yml2
-rw-r--r--test/integration/targets/incidental_lvg/tasks/main.yml15
-rw-r--r--test/integration/targets/incidental_lvg/tasks/setup.yml13
-rw-r--r--test/integration/targets/incidental_lvg/tasks/teardown.yml17
-rw-r--r--test/integration/targets/incidental_lvg/tasks/test_grow_reduce.yml33
-rw-r--r--test/integration/targets/incidental_lvg/tasks/test_indempotency.yml15
-rw-r--r--test/integration/targets/incidental_mongodb_parameter/aliases7
-rw-r--r--test/integration/targets/incidental_mongodb_parameter/defaults/main.yml21
-rw-r--r--test/integration/targets/incidental_mongodb_parameter/meta/main.yml3
-rw-r--r--test/integration/targets/incidental_mongodb_parameter/tasks/main.yml143
-rw-r--r--test/integration/targets/incidental_mongodb_parameter/tasks/mongod_singlenode.yml55
-rw-r--r--test/integration/targets/incidental_mongodb_parameter/tasks/mongod_teardown.yml25
-rw-r--r--test/integration/targets/incidental_postgresql_user/aliases4
-rw-r--r--test/integration/targets/incidental_postgresql_user/defaults/main.yml3
-rw-r--r--test/integration/targets/incidental_postgresql_user/meta/main.yml2
-rw-r--r--test/integration/targets/incidental_postgresql_user/tasks/main.yml7
-rw-r--r--test/integration/targets/incidental_postgresql_user/tasks/postgresql_user_general.yml741
-rw-r--r--test/integration/targets/incidental_postgresql_user/tasks/postgresql_user_initial.yml153
-rw-r--r--test/integration/targets/incidental_postgresql_user/tasks/test_no_password_change.yml167
-rw-r--r--test/integration/targets/incidental_postgresql_user/tasks/test_password.yml336
-rw-r--r--test/integration/targets/incidental_selinux/aliases3
-rw-r--r--test/integration/targets/incidental_selinux/tasks/main.yml36
-rw-r--r--test/integration/targets/incidental_selinux/tasks/selinux.yml364
-rw-r--r--test/integration/targets/incidental_selinux/tasks/selogin.yml81
-rw-r--r--test/integration/targets/incidental_setup_docker/aliases2
-rw-r--r--test/integration/targets/incidental_setup_docker/defaults/main.yml16
-rw-r--r--test/integration/targets/incidental_setup_docker/handlers/main.yml14
-rw-r--r--test/integration/targets/incidental_setup_docker/meta/main.yml2
-rw-r--r--test/integration/targets/incidental_setup_docker/tasks/Debian.yml43
-rw-r--r--test/integration/targets/incidental_setup_docker/tasks/Fedora.yml21
-rw-r--r--test/integration/targets/incidental_setup_docker/tasks/RedHat-7.yml39
-rw-r--r--test/integration/targets/incidental_setup_docker/tasks/RedHat-8.yml29
-rw-r--r--test/integration/targets/incidental_setup_docker/tasks/Suse.yml7
-rw-r--r--test/integration/targets/incidental_setup_docker/tasks/main.yml113
-rw-r--r--test/integration/targets/incidental_setup_docker/vars/Debian.yml5
-rw-r--r--test/integration/targets/incidental_setup_docker/vars/Fedora.yml4
-rw-r--r--test/integration/targets/incidental_setup_docker/vars/RedHat-7.yml8
-rw-r--r--test/integration/targets/incidental_setup_docker/vars/RedHat-8.yml9
-rw-r--r--test/integration/targets/incidental_setup_docker/vars/Suse.yml2
-rw-r--r--test/integration/targets/incidental_setup_docker/vars/Ubuntu-14.yml5
-rw-r--r--test/integration/targets/incidental_setup_docker/vars/default.yml0
-rw-r--r--test/integration/targets/incidental_setup_flatpak_remote/README.md138
-rw-r--r--test/integration/targets/incidental_setup_flatpak_remote/aliases1
-rw-r--r--test/integration/targets/incidental_setup_flatpak_remote/files/repo.tar.xzbin0 -> 15496 bytes
-rw-r--r--test/integration/targets/incidental_setup_flatpak_remote/handlers/main.yaml4
-rw-r--r--test/integration/targets/incidental_setup_flatpak_remote/meta/main.yaml2
-rw-r--r--test/integration/targets/incidental_setup_flatpak_remote/tasks/main.yaml22
-rw-r--r--test/integration/targets/incidental_setup_mongodb/aliases1
-rw-r--r--test/integration/targets/incidental_setup_mongodb/defaults/main.yml46
-rw-r--r--test/integration/targets/incidental_setup_mongodb/handlers/main.yml24
-rw-r--r--test/integration/targets/incidental_setup_mongodb/tasks/main.yml166
-rw-r--r--test/integration/targets/incidental_setup_mysql_db/aliases1
-rw-r--r--test/integration/targets/incidental_setup_mysql_db/defaults/main.yml18
-rw-r--r--test/integration/targets/incidental_setup_mysql_db/handlers/main.yml25
-rw-r--r--test/integration/targets/incidental_setup_mysql_db/tasks/main.yml105
-rw-r--r--test/integration/targets/incidental_setup_mysql_db/vars/Debian.yml16
-rw-r--r--test/integration/targets/incidental_setup_mysql_db/vars/Fedora-py3.yml6
-rw-r--r--test/integration/targets/incidental_setup_mysql_db/vars/Fedora.yml6
-rw-r--r--test/integration/targets/incidental_setup_mysql_db/vars/FreeBSD.yml5
-rw-r--r--test/integration/targets/incidental_setup_mysql_db/vars/RedHat-7.yml6
-rw-r--r--test/integration/targets/incidental_setup_mysql_db/vars/RedHat-8.yml6
-rw-r--r--test/integration/targets/incidental_setup_mysql_db/vars/RedHat.yml6
-rw-r--r--test/integration/targets/incidental_setup_mysql_db/vars/Suse-py3.yml6
-rw-r--r--test/integration/targets/incidental_setup_mysql_db/vars/Suse.yml6
-rw-r--r--test/integration/targets/incidental_setup_mysql_db/vars/Ubuntu-py3.yml16
-rw-r--r--test/integration/targets/incidental_setup_mysql_db/vars/default-py3.yml0
-rw-r--r--test/integration/targets/incidental_setup_mysql_db/vars/default.yml0
-rw-r--r--test/integration/targets/incidental_setup_openssl/aliases2
-rw-r--r--test/integration/targets/incidental_setup_openssl/tasks/main.yml40
-rw-r--r--test/integration/targets/incidental_setup_openssl/vars/Debian.yml3
-rw-r--r--test/integration/targets/incidental_setup_openssl/vars/FreeBSD.yml3
-rw-r--r--test/integration/targets/incidental_setup_openssl/vars/RedHat.yml3
-rw-r--r--test/integration/targets/incidental_setup_openssl/vars/Suse.yml3
-rw-r--r--test/integration/targets/incidental_setup_postgresql_db/aliases1
-rw-r--r--test/integration/targets/incidental_setup_postgresql_db/defaults/main.yml17
-rw-r--r--test/integration/targets/incidental_setup_postgresql_db/files/dummy--1.0.sql2
-rw-r--r--test/integration/targets/incidental_setup_postgresql_db/files/dummy--2.0.sql2
-rw-r--r--test/integration/targets/incidental_setup_postgresql_db/files/dummy--3.0.sql2
-rw-r--r--test/integration/targets/incidental_setup_postgresql_db/files/dummy.control3
-rw-r--r--test/integration/targets/incidental_setup_postgresql_db/files/pg_hba.conf10
-rw-r--r--test/integration/targets/incidental_setup_postgresql_db/tasks/main.yml215
-rw-r--r--test/integration/targets/incidental_setup_postgresql_db/tasks/ssl.yml81
-rw-r--r--test/integration/targets/incidental_setup_postgresql_db/vars/Debian-8.yml8
-rw-r--r--test/integration/targets/incidental_setup_postgresql_db/vars/FreeBSD-11-py3.yml12
-rw-r--r--test/integration/targets/incidental_setup_postgresql_db/vars/FreeBSD-11.yml12
-rw-r--r--test/integration/targets/incidental_setup_postgresql_db/vars/FreeBSD-12.0-py3.yml12
-rw-r--r--test/integration/targets/incidental_setup_postgresql_db/vars/FreeBSD-12.0.yml12
-rw-r--r--test/integration/targets/incidental_setup_postgresql_db/vars/FreeBSD-12.1-py3.yml12
-rw-r--r--test/integration/targets/incidental_setup_postgresql_db/vars/FreeBSD-12.1.yml12
-rw-r--r--test/integration/targets/incidental_setup_postgresql_db/vars/RedHat-py3.yml8
-rw-r--r--test/integration/targets/incidental_setup_postgresql_db/vars/RedHat.yml7
-rw-r--r--test/integration/targets/incidental_setup_postgresql_db/vars/Ubuntu-12.yml8
-rw-r--r--test/integration/targets/incidental_setup_postgresql_db/vars/Ubuntu-14.yml8
-rw-r--r--test/integration/targets/incidental_setup_postgresql_db/vars/Ubuntu-16-py3.yml8
-rw-r--r--test/integration/targets/incidental_setup_postgresql_db/vars/Ubuntu-16.yml8
-rw-r--r--test/integration/targets/incidental_setup_postgresql_db/vars/Ubuntu-18-py3.yml8
-rw-r--r--test/integration/targets/incidental_setup_postgresql_db/vars/default-py3.yml6
-rw-r--r--test/integration/targets/incidental_setup_postgresql_db/vars/default.yml6
-rw-r--r--test/integration/targets/incidental_setup_rabbitmq/aliases1
-rw-r--r--test/integration/targets/incidental_setup_rabbitmq/files/rabbitmq.conf8
-rw-r--r--test/integration/targets/incidental_setup_rabbitmq/meta/main.yml3
-rw-r--r--test/integration/targets/incidental_setup_rabbitmq/tasks/main.yml3
-rw-r--r--test/integration/targets/incidental_setup_rabbitmq/tasks/ubuntu.yml63
-rw-r--r--test/integration/targets/incidental_setup_tls/aliases1
-rw-r--r--test/integration/targets/incidental_setup_tls/files/ca_certificate.pem19
-rw-r--r--test/integration/targets/incidental_setup_tls/files/ca_key.pem28
-rw-r--r--test/integration/targets/incidental_setup_tls/files/client_certificate.pem20
-rw-r--r--test/integration/targets/incidental_setup_tls/files/client_key.pem27
-rw-r--r--test/integration/targets/incidental_setup_tls/files/server_certificate.pem20
-rw-r--r--test/integration/targets/incidental_setup_tls/files/server_key.pem27
-rw-r--r--test/integration/targets/incidental_setup_tls/tasks/main.yml21
-rw-r--r--test/integration/targets/incidental_setup_zabbix/aliases6
-rw-r--r--test/integration/targets/incidental_setup_zabbix/defaults/main.yml13
-rw-r--r--test/integration/targets/incidental_setup_zabbix/handlers/main.yml15
-rw-r--r--test/integration/targets/incidental_setup_zabbix/meta/main.yml2
-rw-r--r--test/integration/targets/incidental_setup_zabbix/tasks/main.yml3
-rw-r--r--test/integration/targets/incidental_setup_zabbix/tasks/setup.yml89
-rw-r--r--test/integration/targets/incidental_setup_zabbix/templates/zabbix.conf.php.j220
-rw-r--r--test/integration/targets/incidental_setup_zabbix/templates/zabbix_server.conf.j27
-rw-r--r--test/integration/targets/incidental_synchronize/aliases1
-rw-r--r--test/integration/targets/incidental_synchronize/files/bar.txt1
-rw-r--r--test/integration/targets/incidental_synchronize/files/foo.txt1
-rw-r--r--test/integration/targets/incidental_synchronize/tasks/main.yml273
-rw-r--r--test/integration/targets/incidental_timezone/aliases4
-rw-r--r--test/integration/targets/incidental_timezone/tasks/main.yml68
-rw-r--r--test/integration/targets/incidental_timezone/tasks/test.yml607
-rw-r--r--test/integration/targets/incidental_ufw/aliases11
-rw-r--r--test/integration/targets/incidental_ufw/tasks/main.yml34
-rw-r--r--test/integration/targets/incidental_ufw/tasks/run-test.yml21
-rw-r--r--test/integration/targets/incidental_ufw/tasks/tests/basic.yml402
-rw-r--r--test/integration/targets/incidental_ufw/tasks/tests/global-state.yml150
-rw-r--r--test/integration/targets/incidental_ufw/tasks/tests/insert_relative_to.yml80
-rw-r--r--test/integration/targets/incidental_ufw/tasks/tests/interface.yml81
-rw-r--r--test/integration/targets/incidental_x509_crl/aliases4
-rw-r--r--test/integration/targets/incidental_x509_crl/meta/main.yml2
-rw-r--r--test/integration/targets/incidental_x509_crl/tasks/impl.yml289
-rw-r--r--test/integration/targets/incidental_x509_crl/tasks/main.yml83
-rw-r--r--test/integration/targets/incidental_x509_crl/tests/validate.yml61
-rw-r--r--test/integration/targets/incidental_xml/aliases3
-rw-r--r--test/integration/targets/incidental_xml/fixtures/ansible-xml-beers-unicode.xml13
-rw-r--r--test/integration/targets/incidental_xml/fixtures/ansible-xml-beers.xml14
-rw-r--r--test/integration/targets/incidental_xml/fixtures/ansible-xml-namespaced-beers.xml14
-rw-r--r--test/integration/targets/incidental_xml/results/test-add-children-elements-unicode.xml14
-rw-r--r--test/integration/targets/incidental_xml/results/test-add-children-elements.xml14
-rw-r--r--test/integration/targets/incidental_xml/results/test-add-children-from-groupvars.xml14
-rw-r--r--test/integration/targets/incidental_xml/results/test-add-children-insertafter.xml17
-rw-r--r--test/integration/targets/incidental_xml/results/test-add-children-insertbefore.xml17
-rw-r--r--test/integration/targets/incidental_xml/results/test-add-children-with-attributes-unicode.xml14
-rw-r--r--test/integration/targets/incidental_xml/results/test-add-children-with-attributes.xml14
-rw-r--r--test/integration/targets/incidental_xml/results/test-add-element-implicitly.yml32
-rw-r--r--test/integration/targets/incidental_xml/results/test-add-namespaced-children-elements.xml14
-rw-r--r--test/integration/targets/incidental_xml/results/test-pretty-print-only.xml14
-rw-r--r--test/integration/targets/incidental_xml/results/test-pretty-print.xml15
-rw-r--r--test/integration/targets/incidental_xml/results/test-remove-attribute.xml14
-rw-r--r--test/integration/targets/incidental_xml/results/test-remove-element.xml13
-rw-r--r--test/integration/targets/incidental_xml/results/test-remove-namespaced-attribute.xml14
-rw-r--r--test/integration/targets/incidental_xml/results/test-remove-namespaced-element.xml13
-rw-r--r--test/integration/targets/incidental_xml/results/test-set-attribute-value-unicode.xml14
-rw-r--r--test/integration/targets/incidental_xml/results/test-set-attribute-value.xml14
-rw-r--r--test/integration/targets/incidental_xml/results/test-set-children-elements-level.xml11
-rw-r--r--test/integration/targets/incidental_xml/results/test-set-children-elements-unicode.xml11
-rw-r--r--test/integration/targets/incidental_xml/results/test-set-children-elements.xml11
-rw-r--r--test/integration/targets/incidental_xml/results/test-set-element-value-empty.xml14
-rw-r--r--test/integration/targets/incidental_xml/results/test-set-element-value-unicode.xml14
-rw-r--r--test/integration/targets/incidental_xml/results/test-set-element-value.xml14
-rw-r--r--test/integration/targets/incidental_xml/results/test-set-namespaced-attribute-value.xml14
-rw-r--r--test/integration/targets/incidental_xml/results/test-set-namespaced-element-value.xml14
-rw-r--r--test/integration/targets/incidental_xml/tasks/main.yml67
-rw-r--r--test/integration/targets/incidental_xml/tasks/test-add-children-elements-unicode.yml29
-rw-r--r--test/integration/targets/incidental_xml/tasks/test-add-children-elements.yml29
-rw-r--r--test/integration/targets/incidental_xml/tasks/test-add-children-from-groupvars.yml28
-rw-r--r--test/integration/targets/incidental_xml/tasks/test-add-children-insertafter.yml32
-rw-r--r--test/integration/targets/incidental_xml/tasks/test-add-children-insertbefore.yml32
-rw-r--r--test/integration/targets/incidental_xml/tasks/test-add-children-with-attributes-unicode.yml31
-rw-r--r--test/integration/targets/incidental_xml/tasks/test-add-children-with-attributes.yml35
-rw-r--r--test/integration/targets/incidental_xml/tasks/test-add-element-implicitly.yml237
-rw-r--r--test/integration/targets/incidental_xml/tasks/test-add-namespaced-children-elements.yml32
-rw-r--r--test/integration/targets/incidental_xml/tasks/test-children-elements-xml.yml30
-rw-r--r--test/integration/targets/incidental_xml/tasks/test-count-unicode.yml19
-rw-r--r--test/integration/targets/incidental_xml/tasks/test-count.yml19
-rw-r--r--test/integration/targets/incidental_xml/tasks/test-get-element-content-unicode.yml32
-rw-r--r--test/integration/targets/incidental_xml/tasks/test-get-element-content.yml52
-rw-r--r--test/integration/targets/incidental_xml/tasks/test-mutually-exclusive-attributes.yml22
-rw-r--r--test/integration/targets/incidental_xml/tasks/test-pretty-print-only.yml29
-rw-r--r--test/integration/targets/incidental_xml/tasks/test-pretty-print.yml30
-rw-r--r--test/integration/targets/incidental_xml/tasks/test-remove-attribute.yml28
-rw-r--r--test/integration/targets/incidental_xml/tasks/test-remove-element.yml28
-rw-r--r--test/integration/targets/incidental_xml/tasks/test-remove-namespaced-attribute.yml33
-rw-r--r--test/integration/targets/incidental_xml/tasks/test-remove-namespaced-element.yml33
-rw-r--r--test/integration/targets/incidental_xml/tasks/test-set-attribute-value-unicode.yml29
-rw-r--r--test/integration/targets/incidental_xml/tasks/test-set-attribute-value.yml29
-rw-r--r--test/integration/targets/incidental_xml/tasks/test-set-children-elements-level.yml74
-rw-r--r--test/integration/targets/incidental_xml/tasks/test-set-children-elements-unicode.yml46
-rw-r--r--test/integration/targets/incidental_xml/tasks/test-set-children-elements.yml53
-rw-r--r--test/integration/targets/incidental_xml/tasks/test-set-element-value-empty.yml28
-rw-r--r--test/integration/targets/incidental_xml/tasks/test-set-element-value-unicode.yml43
-rw-r--r--test/integration/targets/incidental_xml/tasks/test-set-element-value.yml43
-rw-r--r--test/integration/targets/incidental_xml/tasks/test-set-namespaced-attribute-value.yml34
-rw-r--r--test/integration/targets/incidental_xml/tasks/test-set-namespaced-children-elements.yml57
-rw-r--r--test/integration/targets/incidental_xml/tasks/test-set-namespaced-element-value.yml46
-rw-r--r--test/integration/targets/incidental_xml/tasks/test-xmlstring.yml81
-rw-r--r--test/integration/targets/incidental_xml/vars/main.yml6
-rw-r--r--test/integration/targets/incidental_zabbix_host/aliases6
-rw-r--r--test/integration/targets/incidental_zabbix_host/defaults/main.yml5
-rw-r--r--test/integration/targets/incidental_zabbix_host/meta/main.yml2
-rw-r--r--test/integration/targets/incidental_zabbix_host/tasks/main.yml16
-rw-r--r--test/integration/targets/incidental_zabbix_host/tasks/zabbix_host_doc.yml83
-rw-r--r--test/integration/targets/incidental_zabbix_host/tasks/zabbix_host_setup.yml20
-rw-r--r--test/integration/targets/incidental_zabbix_host/tasks/zabbix_host_teardown.yml10
-rw-r--r--test/integration/targets/incidental_zabbix_host/tasks/zabbix_host_tests.yml1169
-rw-r--r--test/lib/ansible_test/_internal/sanity/integration_aliases.py1
-rw-r--r--test/sanity/ignore.txt9
-rw-r--r--test/support/integration/plugins/action/assemble.py165
-rw-r--r--test/support/integration/plugins/connection/chroot.py208
-rw-r--r--test/support/integration/plugins/lookup/hashi_vault.py302
-rw-r--r--test/support/integration/plugins/lookup/rabbitmq.py190
-rw-r--r--test/support/integration/plugins/module_utils/crypto.py2125
-rw-r--r--test/support/integration/plugins/module_utils/database.py142
-rw-r--r--test/support/integration/plugins/module_utils/docker/common.py1022
-rw-r--r--test/support/integration/plugins/module_utils/docker/swarm.py280
-rw-r--r--test/support/integration/plugins/module_utils/mysql.py106
-rw-r--r--test/support/integration/plugins/module_utils/postgres.py330
-rw-r--r--test/support/integration/plugins/module_utils/rabbitmq.py220
-rw-r--r--test/support/integration/plugins/modules/assemble.py261
-rw-r--r--test/support/integration/plugins/modules/cloud_init_data_facts.py134
-rw-r--r--test/support/integration/plugins/modules/consul_session.py284
-rw-r--r--test/support/integration/plugins/modules/deploy_helper.py521
-rw-r--r--test/support/integration/plugins/modules/docker_swarm.py681
-rw-r--r--test/support/integration/plugins/modules/flatpak_remote.py243
-rw-r--r--test/support/integration/plugins/modules/locale_gen.py237
-rw-r--r--test/support/integration/plugins/modules/lvg.py295
-rw-r--r--test/support/integration/plugins/modules/mongodb_parameter.py223
-rw-r--r--test/support/integration/plugins/modules/mongodb_user.py474
-rw-r--r--test/support/integration/plugins/modules/mysql_db.py617
-rw-r--r--test/support/integration/plugins/modules/mysql_user.py815
-rw-r--r--test/support/integration/plugins/modules/openssl_certificate.py2756
-rw-r--r--test/support/integration/plugins/modules/openssl_certificate_info.py863
-rw-r--r--test/support/integration/plugins/modules/openssl_csr.py1159
-rw-r--r--test/support/integration/plugins/modules/openssl_privatekey.py943
-rw-r--r--test/support/integration/plugins/modules/pids.py89
-rw-r--r--test/support/integration/plugins/modules/pkgng.py406
-rw-r--r--test/support/integration/plugins/modules/postgresql_db.py657
-rw-r--r--test/support/integration/plugins/modules/postgresql_privs.py1097
-rw-r--r--test/support/integration/plugins/modules/postgresql_query.py364
-rw-r--r--test/support/integration/plugins/modules/postgresql_set.py434
-rw-r--r--test/support/integration/plugins/modules/postgresql_table.py601
-rw-r--r--test/support/integration/plugins/modules/postgresql_user.py927
-rw-r--r--test/support/integration/plugins/modules/rabbitmq_plugin.py180
-rw-r--r--test/support/integration/plugins/modules/rabbitmq_queue.py257
-rw-r--r--test/support/integration/plugins/modules/selinux.py266
-rw-r--r--test/support/integration/plugins/modules/selogin.py260
-rw-r--r--test/support/integration/plugins/modules/synchronize.py618
-rw-r--r--test/support/integration/plugins/modules/timezone.py909
-rw-r--r--test/support/integration/plugins/modules/ufw.py598
-rw-r--r--test/support/integration/plugins/modules/x509_crl.py783
-rw-r--r--test/support/integration/plugins/modules/x509_crl_info.py281
-rw-r--r--test/support/integration/plugins/modules/xml.py965
-rw-r--r--test/support/integration/plugins/modules/zabbix_host.py1075
-rw-r--r--test/support/integration/plugins/modules/zabbix_proxy.py472
-rw-r--r--test/support/integration/plugins/modules/zypper.py540
l---------test/utils/shippable/incidental/aix.sh1
l---------test/utils/shippable/incidental/freebsd.sh1
-rwxr-xr-xtest/utils/shippable/incidental/linux.sh15
l---------test/utils/shippable/incidental/osx.sh1
-rwxr-xr-xtest/utils/shippable/incidental/remote.sh19
l---------test/utils/shippable/incidental/rhel.sh1
318 files changed, 38534 insertions, 0 deletions
diff --git a/shippable.yml b/shippable.yml
index 8a43d047bf..966ea1cdf3 100644
--- a/shippable.yml
+++ b/shippable.yml
@@ -158,6 +158,22 @@ matrix:
- env: T=linux/ubuntu1604/5
- env: T=linux/ubuntu1804/5
+ - env: T=i/aix/7.2
+ - env: T=i/osx/10.11
+ - env: T=i/rhel/7.6
+ - env: T=i/rhel/8.1
+ - env: T=i/freebsd/11.1
+ - env: T=i/freebsd/12.1
+ - env: T=i/linux/centos6
+ - env: T=i/linux/centos7
+ - env: T=i/linux/centos8
+ - env: T=i/linux/fedora30
+ - env: T=i/linux/fedora31
+ - env: T=i/linux/opensuse15py2
+ - env: T=i/linux/opensuse15
+ - env: T=i/linux/ubuntu1604
+ - env: T=i/linux/ubuntu1804
+
- env: T=aws/2.7/1
- env: T=aws/3.6/1
diff --git a/test/integration/targets/incidental_assemble/aliases b/test/integration/targets/incidental_assemble/aliases
new file mode 100644
index 0000000000..31c6a8b454
--- /dev/null
+++ b/test/integration/targets/incidental_assemble/aliases
@@ -0,0 +1 @@
+shippable/posix/incidental
diff --git a/test/integration/targets/incidental_assemble/files/fragment1 b/test/integration/targets/incidental_assemble/files/fragment1
new file mode 100644
index 0000000000..a00d3ea04a
--- /dev/null
+++ b/test/integration/targets/incidental_assemble/files/fragment1
@@ -0,0 +1 @@
+this is fragment 1
diff --git a/test/integration/targets/incidental_assemble/files/fragment2 b/test/integration/targets/incidental_assemble/files/fragment2
new file mode 100644
index 0000000000..860f760388
--- /dev/null
+++ b/test/integration/targets/incidental_assemble/files/fragment2
@@ -0,0 +1 @@
+this is fragment 2
diff --git a/test/integration/targets/incidental_assemble/files/fragment3 b/test/integration/targets/incidental_assemble/files/fragment3
new file mode 100644
index 0000000000..df95b24bb6
--- /dev/null
+++ b/test/integration/targets/incidental_assemble/files/fragment3
@@ -0,0 +1 @@
+this is fragment 3
diff --git a/test/integration/targets/incidental_assemble/files/fragment4 b/test/integration/targets/incidental_assemble/files/fragment4
new file mode 100644
index 0000000000..c83252bb8e
--- /dev/null
+++ b/test/integration/targets/incidental_assemble/files/fragment4
@@ -0,0 +1 @@
+this is fragment 4
diff --git a/test/integration/targets/incidental_assemble/files/fragment5 b/test/integration/targets/incidental_assemble/files/fragment5
new file mode 100644
index 0000000000..8a527d15f7
--- /dev/null
+++ b/test/integration/targets/incidental_assemble/files/fragment5
@@ -0,0 +1 @@
+this is fragment 5
diff --git a/test/integration/targets/incidental_assemble/tasks/main.yml b/test/integration/targets/incidental_assemble/tasks/main.yml
new file mode 100644
index 0000000000..72f0c111d7
--- /dev/null
+++ b/test/integration/targets/incidental_assemble/tasks/main.yml
@@ -0,0 +1,101 @@
+# test code for the assemble module
+# (c) 2014, James Cammarata <jcammarata@ansible.com>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+- name: create a new directory for file source
+ file: dest="{{output_dir}}/src" state=directory
+ register: result
+
+- name: assert the directory was created
+ assert:
+ that:
+ - "result.state == 'directory'"
+
+- name: copy the files to a new directory
+ copy: src="./" dest="{{output_dir}}/src"
+ register: result
+
+- name: create unicode file for test
+ shell: echo "π" > {{ output_dir }}/src/ßΩ.txt
+ register: result
+
+- name: assert that the new file was created
+ assert:
+ that:
+ - "result.changed == true"
+
+- name: test assemble with all fragments
+ assemble: src="{{output_dir}}/src" dest="{{output_dir}}/assembled1"
+ register: result
+
+- name: assert the fragments were assembled
+ assert:
+ that:
+ - "result.state == 'file'"
+ - "result.changed == True"
+ - "result.checksum == '74152e9224f774191bc0bedf460d35de86ad90e6'"
+
+- name: test assemble with all fragments
+ assemble: src="{{output_dir}}/src" dest="{{output_dir}}/assembled1"
+ register: result
+
+- name: assert that the same assemble made no changes
+ assert:
+ that:
+ - "result.state == 'file'"
+ - "result.changed == False"
+ - "result.checksum == '74152e9224f774191bc0bedf460d35de86ad90e6'"
+
+- name: test assemble with fragments matching a regex
+ assemble: src="{{output_dir}}/src" dest="{{output_dir}}/assembled2" regexp="^fragment[1-3]$"
+ register: result
+
+- name: assert the fragments were assembled with a regex
+ assert:
+ that:
+ - "result.state == 'file'"
+ - "result.checksum == 'edfe2d7487ef8f5ebc0f1c4dc57ba7b70a7b8e2b'"
+
+- name: test assemble with a delimiter
+ assemble: src="{{output_dir}}/src" dest="{{output_dir}}/assembled3" delimiter="#--- delimiter ---#"
+ register: result
+
+- name: assert the fragments were assembled with a delimiter
+ assert:
+ that:
+ - "result.state == 'file'"
+ - "result.checksum == 'd986cefb82e34e4cf14d33a3cda132ff45aa2980'"
+
+- name: test assemble with remote_src=False
+ assemble: src="./" dest="{{output_dir}}/assembled4" remote_src=no
+ register: result
+
+- name: assert the fragments were assembled without remote
+ assert:
+ that:
+ - "result.state == 'file'"
+ - "result.checksum == '048a1bd1951aa5ccc427eeb4ca19aee45e9c68b3'"
+
+- name: test assemble with remote_src=False and a delimiter
+ assemble: src="./" dest="{{output_dir}}/assembled5" remote_src=no delimiter="#--- delimiter ---#"
+ register: result
+
+- name: assert the fragments were assembled without remote
+ assert:
+ that:
+ - "result.state == 'file'"
+ - "result.checksum == '505359f48c65b3904127cf62b912991d4da7ed6d'"
diff --git a/test/integration/targets/incidental_cloud_init_data_facts/aliases b/test/integration/targets/incidental_cloud_init_data_facts/aliases
new file mode 100644
index 0000000000..e99ab32a62
--- /dev/null
+++ b/test/integration/targets/incidental_cloud_init_data_facts/aliases
@@ -0,0 +1,5 @@
+destructive
+shippable/posix/incidental
+skip/aix
+skip/osx
+skip/freebsd
diff --git a/test/integration/targets/incidental_cloud_init_data_facts/tasks/main.yml b/test/integration/targets/incidental_cloud_init_data_facts/tasks/main.yml
new file mode 100644
index 0000000000..eca905c6c2
--- /dev/null
+++ b/test/integration/targets/incidental_cloud_init_data_facts/tasks/main.yml
@@ -0,0 +1,50 @@
+---
+- name: test cloud-init
+ # TODO: check for a workaround
+ # install 'cloud-init'' failed: dpkg-divert: error: `diversion of /etc/init/ureadahead.conf
+ # to /etc/init/ureadahead.conf.disabled by cloud-init' clashes with `local diversion of
+ # /etc/init/ureadahead.conf to /etc/init/ureadahead.conf.distrib
+ # https://bugs.launchpad.net/ubuntu/+source/ureadahead/+bug/997838
+ # Will also have to skip on OpenSUSE when running on Python 2 on newer Leap versions
+ # (!= 42 and >= 15) ascloud-init will install the Python 3 package, breaking our build on py2.
+ when:
+ - not (ansible_distribution == "Ubuntu" and ansible_distribution_major_version|int == 14)
+ - not (ansible_os_family == "Suse" and ansible_distribution_major_version|int != 42 and ansible_python.version.major != 3)
+ block:
+ - name: setup install cloud-init
+ package:
+ name:
+ - cloud-init
+ - udev
+
+ - name: setup run cloud-init
+ service:
+ name: cloud-init-local
+ state: restarted
+
+ - name: test gather cloud-init facts in check mode
+ cloud_init_data_facts:
+ check_mode: yes
+ register: result
+ - name: verify test gather cloud-init facts in check mode
+ assert:
+ that:
+ - result.cloud_init_data_facts.status.v1 is defined
+ - result.cloud_init_data_facts.status.v1.stage is defined
+ - not result.cloud_init_data_facts.status.v1.stage
+ - cloud_init_data_facts.status.v1 is defined
+ - cloud_init_data_facts.status.v1.stage is defined
+ - not cloud_init_data_facts.status.v1.stage
+
+ - name: test gather cloud-init facts
+ cloud_init_data_facts:
+ register: result
+ - name: verify test gather cloud-init facts
+ assert:
+ that:
+ - result.cloud_init_data_facts.status.v1 is defined
+ - result.cloud_init_data_facts.status.v1.stage is defined
+ - not result.cloud_init_data_facts.status.v1.stage
+ - cloud_init_data_facts.status.v1 is defined
+ - cloud_init_data_facts.status.v1.stage is defined
+ - not cloud_init_data_facts.status.v1.stage
diff --git a/test/integration/targets/incidental_connection_chroot/aliases b/test/integration/targets/incidental_connection_chroot/aliases
new file mode 100644
index 0000000000..01f0bd4e61
--- /dev/null
+++ b/test/integration/targets/incidental_connection_chroot/aliases
@@ -0,0 +1,3 @@
+needs/root
+shippable/posix/incidental
+needs/target/connection
diff --git a/test/integration/targets/incidental_connection_chroot/runme.sh b/test/integration/targets/incidental_connection_chroot/runme.sh
new file mode 100755
index 0000000000..e7eb01d3c7
--- /dev/null
+++ b/test/integration/targets/incidental_connection_chroot/runme.sh
@@ -0,0 +1,18 @@
+#!/usr/bin/env bash
+
+set -eux
+
+# Connection tests for POSIX platforms use this script by linking to it from the appropriate 'connection_' target dir.
+# The name of the inventory group to test is extracted from the directory name following the 'connection_' prefix.
+
+group=$(python -c \
+ "from os import path; print(path.basename(path.abspath(path.dirname('$0'))).replace('incidental_connection_', ''))")
+
+cd ../connection
+
+INVENTORY="../incidental_connection_${group}/test_connection.inventory" ./test.sh \
+ -e target_hosts="${group}" \
+ -e action_prefix= \
+ -e local_tmp=/tmp/ansible-local \
+ -e remote_tmp=/tmp/ansible-remote \
+ "$@"
diff --git a/test/integration/targets/incidental_connection_chroot/test_connection.inventory b/test/integration/targets/incidental_connection_chroot/test_connection.inventory
new file mode 100644
index 0000000000..5f78393f21
--- /dev/null
+++ b/test/integration/targets/incidental_connection_chroot/test_connection.inventory
@@ -0,0 +1,7 @@
+[chroot]
+chroot-pipelining ansible_ssh_pipelining=true
+chroot-no-pipelining ansible_ssh_pipelining=false
+[chroot:vars]
+ansible_host=/
+ansible_connection=chroot
+ansible_python_interpreter="{{ ansible_playbook_python }}"
diff --git a/test/integration/targets/incidental_consul/aliases b/test/integration/targets/incidental_consul/aliases
new file mode 100644
index 0000000000..87066db952
--- /dev/null
+++ b/test/integration/targets/incidental_consul/aliases
@@ -0,0 +1,3 @@
+shippable/posix/incidental
+destructive
+skip/aix
diff --git a/test/integration/targets/incidental_consul/meta/main.yml b/test/integration/targets/incidental_consul/meta/main.yml
new file mode 100644
index 0000000000..1039151126
--- /dev/null
+++ b/test/integration/targets/incidental_consul/meta/main.yml
@@ -0,0 +1,3 @@
+---
+dependencies:
+ - incidental_setup_openssl
diff --git a/test/integration/targets/incidental_consul/tasks/consul_session.yml b/test/integration/targets/incidental_consul/tasks/consul_session.yml
new file mode 100644
index 0000000000..a5490ec6c2
--- /dev/null
+++ b/test/integration/targets/incidental_consul/tasks/consul_session.yml
@@ -0,0 +1,162 @@
+- name: list sessions
+ consul_session:
+ state: list
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - "'sessions' in result"
+
+- name: create a session
+ consul_session:
+ state: present
+ name: testsession
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result['name'] == 'testsession'
+ - "'session_id' in result"
+
+- set_fact:
+ session_id: "{{ result['session_id'] }}"
+
+- name: list sessions after creation
+ consul_session:
+ state: list
+ register: result
+
+- set_fact:
+ session_count: "{{ result['sessions'] | length }}"
+
+- assert:
+ that:
+ - result is changed
+ # selectattr not available on Jinja 2.2 provided by CentOS 6
+ # hence the two following tasks (set_fact/assert) are used
+ # - (result['sessions'] | selectattr('ID', 'match', '^' ~ session_id ~ '$') | first)['Name'] == 'testsession'
+
+- name: search created session
+ set_fact:
+ test_session_found: True
+ loop: "{{ result['sessions'] }}"
+ when: "item.get('ID') == session_id and item.get('Name') == 'testsession'"
+
+- name: ensure session was created
+ assert:
+ that:
+ - test_session_found|default(False)
+
+- name: fetch info about a session
+ consul_session:
+ state: info
+ id: '{{ session_id }}'
+ register: result
+
+- assert:
+ that:
+ - result is changed
+
+- name: ensure 'id' parameter is required when state=info
+ consul_session:
+ state: info
+ name: test
+ register: result
+ ignore_errors: True
+
+- assert:
+ that:
+ - result is failed
+
+- name: ensure unknown scheme fails
+ consul_session:
+ state: info
+ id: '{{ session_id }}'
+ scheme: non_existent
+ register: result
+ ignore_errors: True
+
+- assert:
+ that:
+ - result is failed
+
+- when: pyopenssl_version.stdout is version('0.15', '>=')
+ block:
+ - name: ensure SSL certificate is checked
+ consul_session:
+ state: info
+ id: '{{ session_id }}'
+ port: 8501
+ scheme: https
+ register: result
+ ignore_errors: True
+
+ - name: previous task should fail since certificate is not known
+ assert:
+ that:
+ - result is failed
+ - "'certificate verify failed' in result.msg"
+
+ - name: ensure SSL certificate isn't checked when validate_certs is disabled
+ consul_session:
+ state: info
+ id: '{{ session_id }}'
+ port: 8501
+ scheme: https
+ validate_certs: False
+ register: result
+
+ - name: previous task should succeed since certificate isn't checked
+ assert:
+ that:
+ - result is changed
+
+ - name: ensure a secure connection is possible
+ consul_session:
+ state: info
+ id: '{{ session_id }}'
+ port: 8501
+ scheme: https
+ environment:
+ REQUESTS_CA_BUNDLE: '{{ remote_dir }}/cert.pem'
+ register: result
+
+ - assert:
+ that:
+ - result is changed
+
+- name: delete a session
+ consul_session:
+ state: absent
+ id: '{{ session_id }}'
+ register: result
+
+- assert:
+ that:
+ - result is changed
+
+- name: list sessions after deletion
+ consul_session:
+ state: list
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ # selectattr and equalto not available on Jinja 2.2 provided by CentOS 6
+ # hence the two following tasks (command/assert) are used
+ # - (result['sessions'] | selectattr('ID', 'equalto', session_id) | list | length) == 0
+
+- name: search deleted session
+ command: echo 'session found'
+ loop: "{{ result['sessions'] }}"
+ when: "item.get('ID') == session_id and item.get('Name') == 'testsession'"
+ register: search_deleted
+
+- name: ensure session was deleted
+ assert:
+ that:
+ - search_deleted is skipped # each iteration is skipped
+ - search_deleted is not changed # and then unchanged
diff --git a/test/integration/targets/incidental_consul/tasks/main.yml b/test/integration/targets/incidental_consul/tasks/main.yml
new file mode 100644
index 0000000000..575c2ed9fb
--- /dev/null
+++ b/test/integration/targets/incidental_consul/tasks/main.yml
@@ -0,0 +1,97 @@
+---
+- name: Install Consul and test
+
+ vars:
+ consul_version: '1.5.0'
+ consul_uri: https://s3.amazonaws.com/ansible-ci-files/test/integration/targets/consul/consul_{{ consul_version }}_{{ ansible_system | lower }}_{{ consul_arch }}.zip
+ consul_cmd: '{{ output_dir }}/consul'
+
+ block:
+ - name: register pyOpenSSL version
+ command: "{{ ansible_python_interpreter }} -c 'import OpenSSL; print(OpenSSL.__version__)'"
+ register: pyopenssl_version
+
+ - name: Install requests<2.20 (CentOS/RHEL 6)
+ pip:
+ name: requests<2.20
+ register: result
+ until: result is success
+ when: ansible_distribution_file_variety|default() == 'RedHat' and ansible_distribution_major_version is version('6', '<=')
+
+ - name: Install python-consul
+ pip:
+ name: python-consul
+ register: result
+ until: result is success
+
+ - when: pyopenssl_version.stdout is version('0.15', '>=')
+ block:
+ - name: Generate privatekey
+ openssl_privatekey:
+ path: '{{ output_dir }}/privatekey.pem'
+
+ - name: Generate CSR
+ openssl_csr:
+ path: '{{ output_dir }}/csr.csr'
+ privatekey_path: '{{ output_dir }}/privatekey.pem'
+ subject:
+ commonName: localhost
+
+ - name: Generate selfsigned certificate
+ openssl_certificate:
+ path: '{{ output_dir }}/cert.pem'
+ csr_path: '{{ output_dir }}/csr.csr'
+ privatekey_path: '{{ output_dir }}/privatekey.pem'
+ provider: selfsigned
+ selfsigned_digest: sha256
+ register: selfsigned_certificate
+
+ - name: 'Install unzip'
+ package:
+ name: unzip
+ register: result
+ until: result is success
+ when: ansible_distribution != "MacOSX" # unzip already installed
+
+ - assert:
+ # Linux: x86_64, FreeBSD: amd64
+ that: ansible_architecture in ['i386', 'x86_64', 'amd64']
+ - set_fact:
+ consul_arch: '386'
+ when: ansible_architecture == 'i386'
+ - set_fact:
+ consul_arch: amd64
+ when: ansible_architecture in ['x86_64', 'amd64']
+
+ - name: 'Download consul binary'
+ unarchive:
+ src: '{{ consul_uri }}'
+ dest: '{{ output_dir }}'
+ remote_src: true
+ register: result
+ until: result is success
+
+ - vars:
+ remote_dir: '{{ echo_output_dir.stdout }}'
+ block:
+ - command: 'echo {{ output_dir }}'
+ register: echo_output_dir
+
+ - name: 'Create configuration file'
+ template:
+ src: consul_config.hcl.j2
+ dest: '{{ output_dir }}/consul_config.hcl'
+
+ - name: 'Start Consul (dev mode enabled)'
+ shell: 'nohup {{ consul_cmd }} agent -dev -config-file {{ output_dir }}/consul_config.hcl </dev/null >/dev/null 2>&1 &'
+
+ - name: 'Create some data'
+ command: '{{ consul_cmd }} kv put data/value{{ item }} foo{{ item }}'
+ loop: [1, 2, 3]
+
+ - import_tasks: consul_session.yml
+
+ always:
+ - name: 'Kill consul process'
+ shell: "kill $(cat {{ output_dir }}/consul.pid)"
+ ignore_errors: true
diff --git a/test/integration/targets/incidental_consul/templates/consul_config.hcl.j2 b/test/integration/targets/incidental_consul/templates/consul_config.hcl.j2
new file mode 100644
index 0000000000..9af06f02e9
--- /dev/null
+++ b/test/integration/targets/incidental_consul/templates/consul_config.hcl.j2
@@ -0,0 +1,13 @@
+# {{ ansible_managed }}
+server = true
+pid_file = "{{ remote_dir }}/consul.pid"
+ports {
+ http = 8500
+ {% if pyopenssl_version.stdout is version('0.15', '>=') %}
+ https = 8501
+ {% endif %}
+}
+{% if pyopenssl_version.stdout is version('0.15', '>=') %}
+key_file = "{{ remote_dir }}/privatekey.pem"
+cert_file = "{{ remote_dir }}/cert.pem"
+{% endif %}
diff --git a/test/integration/targets/incidental_deploy_helper/aliases b/test/integration/targets/incidental_deploy_helper/aliases
new file mode 100644
index 0000000000..31c6a8b454
--- /dev/null
+++ b/test/integration/targets/incidental_deploy_helper/aliases
@@ -0,0 +1 @@
+shippable/posix/incidental
diff --git a/test/integration/targets/incidental_deploy_helper/tasks/main.yml b/test/integration/targets/incidental_deploy_helper/tasks/main.yml
new file mode 100644
index 0000000000..962c894a81
--- /dev/null
+++ b/test/integration/targets/incidental_deploy_helper/tasks/main.yml
@@ -0,0 +1,149 @@
+---
+- name: record the output directory
+ set_fact: deploy_helper_test_root={{output_dir}}/deploy_helper_test_root
+
+- name: State=query with default parameters
+ deploy_helper: path={{ deploy_helper_test_root }} state=query
+- name: Assert State=query with default parameters
+ assert:
+ that:
+ - "'project_path' in deploy_helper"
+ - "deploy_helper.current_path == '{{ deploy_helper.project_path }}/current'"
+ - "deploy_helper.releases_path == '{{ deploy_helper.project_path }}/releases'"
+ - "deploy_helper.shared_path == '{{ deploy_helper.project_path }}/shared'"
+ - "deploy_helper.unfinished_filename == 'DEPLOY_UNFINISHED'"
+ - "'previous_release' in deploy_helper"
+ - "'previous_release_path' in deploy_helper"
+ - "'new_release' in deploy_helper"
+ - "'new_release_path' in deploy_helper"
+ - "deploy_helper.new_release_path == '{{ deploy_helper.releases_path }}/{{ deploy_helper.new_release }}'"
+
+- name: State=query with relative overridden paths
+ deploy_helper: path={{ deploy_helper_test_root }} current_path=CURRENT_PATH releases_path=RELEASES_PATH shared_path=SHARED_PATH state=query
+- name: Assert State=query with relative overridden paths
+ assert:
+ that:
+ - "deploy_helper.current_path == '{{ deploy_helper.project_path }}/CURRENT_PATH'"
+ - "deploy_helper.releases_path == '{{ deploy_helper.project_path }}/RELEASES_PATH'"
+ - "deploy_helper.shared_path == '{{ deploy_helper.project_path }}/SHARED_PATH'"
+ - "deploy_helper.new_release_path == '{{ deploy_helper.releases_path }}/{{ deploy_helper.new_release}}'"
+
+- name: State=query with absolute overridden paths
+ deploy_helper: path={{ deploy_helper_test_root }} current_path=/CURRENT_PATH releases_path=/RELEASES_PATH shared_path=/SHARED_PATH state=query
+- name: Assert State=query with absolute overridden paths
+ assert:
+ that:
+ - "deploy_helper.current_path == '/CURRENT_PATH'"
+ - "deploy_helper.releases_path == '/RELEASES_PATH'"
+ - "deploy_helper.shared_path == '/SHARED_PATH'"
+ - "deploy_helper.new_release_path == '{{ deploy_helper.releases_path }}/{{ deploy_helper.new_release}}'"
+
+- name: State=query with overridden unfinished_filename
+ deploy_helper: path={{ deploy_helper_test_root }} unfinished_filename=UNFINISHED_DEPLOY state=query
+- name: Assert State=query with overridden unfinished_filename
+ assert:
+ that:
+ - "'UNFINISHED_DEPLOY' == deploy_helper.unfinished_filename"
+
+# Remove the root folder just in case it exists
+- file: path={{ deploy_helper_test_root }} state=absent
+
+- name: State=present with default parameters
+ deploy_helper: path={{ deploy_helper_test_root }} state=present
+- stat: path={{ deploy_helper.releases_path }}
+ register: releases_path
+- stat: path={{ deploy_helper.shared_path }}
+ register: shared_path
+- name: Assert State=present with default parameters
+ assert:
+ that:
+ - "releases_path.stat.exists"
+ - "shared_path.stat.exists"
+
+# Setup older releases for tests
+- file: path={{ deploy_helper.releases_path }}/{{ item }} state=directory
+ with_items: ['first', 'second', 'third', 'fourth', 'fifth', 'sixth', 'seventh']
+# Setup the new release
+- file: path={{ deploy_helper.new_release_path }} state=directory
+# Add a buildfile, just like in a real deploy
+- copy: content='' dest={{ deploy_helper.new_release_path }}/{{ deploy_helper.unfinished_filename }}
+# Add a buildfile, to an older deploy
+- copy: content='' dest={{ deploy_helper.releases_path }}/third/{{ deploy_helper.unfinished_filename }}
+
+- name: State=finalize with default parameters
+ deploy_helper: path={{ deploy_helper_test_root }} release={{ deploy_helper.new_release }} state=finalize
+- stat: path={{ deploy_helper.current_path }}
+ register: current_path
+- stat: path={{ deploy_helper.current_path }}/DEPLOY_UNFINISHED
+ register: current_path_unfinished_filename
+- name: Assert State=finalize with default parameters
+ assert:
+ that:
+ - "current_path.stat.islnk"
+ - "deploy_helper.new_release_path in current_path.stat.lnk_source"
+ - "not current_path_unfinished_filename.stat.exists"
+- stat: path={{ deploy_helper.releases_path }}/third
+ register: third_release_path
+- shell: "ls {{ deploy_helper.releases_path }} | wc -l"
+ register: releases_count
+- name: Assert State=finalize with default parameters (clean=true checks)
+ assert:
+ that:
+ - "not third_release_path.stat.exists"
+ - "releases_count.stdout|trim == '6'"
+- deploy_helper: path={{ deploy_helper_test_root }} release={{ deploy_helper.new_release }} state=query
+- name: Assert State=finalize with default parameters (previous_release checks)
+ assert:
+ that:
+ - "deploy_helper.new_release == deploy_helper.previous_release"
+
+- name: State=absent with default parameters
+ deploy_helper: path={{ deploy_helper_test_root }} state=absent
+- stat: path={{ deploy_helper_test_root }}
+ register: project_path
+- name: Assert State=absent with default parameters
+ assert:
+ that:
+ - "not project_path.stat.exists"
+
+- debug: msg="Clearing all release data and facts ---------"
+
+- name: State=present with shared_path set to False
+ deploy_helper: path={{ deploy_helper_test_root }} state=present shared_path=''
+- stat: path={{ deploy_helper.releases_path }}
+ register: releases_path
+- stat: path={{ deploy_helper.shared_path }}
+ register: shared_path
+- name: Assert State=present with shared_path set to False
+ assert:
+ that:
+ - "releases_path.stat.exists"
+ - "not shared_path.stat.exists"
+
+# Setup older releases for tests
+- file: path={{ deploy_helper.releases_path }}/{{ item }} state=directory
+ with_items: ['first', 'second', 'third', 'fourth', 'fifth']
+# Setup the new release
+- file: path={{ deploy_helper.new_release_path }} state=directory
+# Add a buildfile, just like in a real deploy
+- copy: content='' dest={{ deploy_helper.new_release_path }}/{{ deploy_helper.unfinished_filename }}
+# Add a buildfile, to an older deploy
+- copy: content='' dest={{ deploy_helper.releases_path }}/third/{{ deploy_helper.unfinished_filename }}
+
+- shell: "ls {{ deploy_helper_test_root }}/releases | wc -l"
+ register: before_releases_count
+- name: State=clean with keep_releases=3
+ deploy_helper: path={{ deploy_helper_test_root }} release={{ deploy_helper.new_release }} state=clean keep_releases=3
+- stat: path={{ deploy_helper.releases_path }}/third
+ register: third_release_path
+- shell: "ls {{ deploy_helper.releases_path }} | wc -l"
+ register: releases_count
+- name: Assert State=finalize with default parameters (clean=true checks)
+ assert:
+ that:
+ - "not third_release_path.stat.exists"
+ - "before_releases_count.stdout|trim == '6'"
+ - "releases_count.stdout|trim == '3'"
+
+# Remove the root folder
+- file: path={{ deploy_helper_test_root }} state=absent
diff --git a/test/integration/targets/incidental_flatpak_remote/aliases b/test/integration/targets/incidental_flatpak_remote/aliases
new file mode 100644
index 0000000000..c550b0d779
--- /dev/null
+++ b/test/integration/targets/incidental_flatpak_remote/aliases
@@ -0,0 +1,7 @@
+shippable/posix/incidental
+destructive
+skip/aix
+skip/freebsd
+skip/osx
+skip/rhel
+needs/root
diff --git a/test/integration/targets/incidental_flatpak_remote/meta/main.yml b/test/integration/targets/incidental_flatpak_remote/meta/main.yml
new file mode 100644
index 0000000000..a1c58bf1e7
--- /dev/null
+++ b/test/integration/targets/incidental_flatpak_remote/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - incidental_setup_flatpak_remote
diff --git a/test/integration/targets/incidental_flatpak_remote/tasks/check_mode.yml b/test/integration/targets/incidental_flatpak_remote/tasks/check_mode.yml
new file mode 100644
index 0000000000..7ce89a8c15
--- /dev/null
+++ b/test/integration/targets/incidental_flatpak_remote/tasks/check_mode.yml
@@ -0,0 +1,101 @@
+# - Tests with absent flatpak remote -------------------------------------------
+
+# state=present
+
+- name: Test addition of absent flatpak remote (check mode)
+ flatpak_remote:
+ name: flatpak-test
+ flatpakrepo_url: /tmp/flatpak/repo/dummy-repo.flatpakrepo
+ state: present
+ register: addition_result
+ check_mode: true
+
+- name: Verify addition of absent flatpak remote test result (check mode)
+ assert:
+ that:
+ - "addition_result.changed == true"
+ msg: "Adding an absent flatpak remote shall mark module execution as changed"
+
+- name: Test non-existent idempotency of addition of absent flatpak remote (check mode)
+ flatpak_remote:
+ name: flatpak-test
+ flatpakrepo_url: /tmp/flatpak/repo/dummy-repo.flatpakrepo
+ state: present
+ register: double_addition_result
+ check_mode: true
+
+- name: >
+ Verify non-existent idempotency of addition of absent flatpak remote
+ test result (check mode)
+ assert:
+ that:
+ - "double_addition_result.changed == true"
+ msg: |
+ Adding an absent flatpak remote a second time shall still mark module execution
+ as changed in check mode
+
+# state=absent
+
+- name: Test removal of absent flatpak remote not doing anything in check mode
+ flatpak_remote:
+ name: flatpak-test
+ state: absent
+ register: removal_result
+ check_mode: true
+
+- name: Verify removal of absent flatpak remote test result (check mode)
+ assert:
+ that:
+ - "removal_result.changed == false"
+ msg: "Removing an absent flatpak remote shall mark module execution as not changed"
+
+
+# - Tests with present flatpak remote -------------------------------------------
+
+# state=present
+
+- name: Test addition of present flatpak remote (check mode)
+ flatpak_remote:
+ name: check-mode-test-remote
+ flatpakrepo_url: /tmp/flatpak/repo/dummy-repo.flatpakrepo
+ state: present
+ register: addition_result
+ check_mode: true
+
+- name: Verify addition of present flatpak remote test result (check mode)
+ assert:
+ that:
+ - "addition_result.changed == false"
+ msg: "Adding a present flatpak remote shall mark module execution as not changed"
+
+# state=absent
+
+- name: Test removal of present flatpak remote not doing anything in check mode
+ flatpak_remote:
+ name: check-mode-test-remote
+ state: absent
+ register: removal_result
+ check_mode: true
+
+- name: Verify removal of present flatpak remote test result (check mode)
+ assert:
+ that:
+ - "removal_result.changed == true"
+ msg: "Removing a present flatpak remote shall mark module execution as changed"
+
+- name: Test non-existent idempotency of removal of present flatpak remote (check mode)
+ flatpak_remote:
+ name: check-mode-test-remote
+ state: absent
+ register: double_removal_result
+ check_mode: true
+
+- name: >
+ Verify non-existent idempotency of removal of present flatpak remote
+ test result (check mode)
+ assert:
+ that:
+ - "double_removal_result.changed == true"
+ msg: |
+ Removing a present flatpak remote a second time shall still mark module execution
+ as changed in check mode
diff --git a/test/integration/targets/incidental_flatpak_remote/tasks/main.yml b/test/integration/targets/incidental_flatpak_remote/tasks/main.yml
new file mode 100644
index 0000000000..9c3ec6d798
--- /dev/null
+++ b/test/integration/targets/incidental_flatpak_remote/tasks/main.yml
@@ -0,0 +1,57 @@
+# (c) 2018, Alexander Bethke <oolongbrothers@gmx.net>
+# (c) 2018, Ansible Project
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+- block:
+
+ - import_tasks: setup.yml
+ become: true
+
+ # executable override
+
+ - name: Test executable override
+ flatpak_remote:
+ name: irrelevant
+ remote: irrelevant
+ state: present
+ executable: nothing-that-exists
+ ignore_errors: true
+ register: executable_override_result
+
+ - name: Verify executable override test result
+ assert:
+ that:
+ - "executable_override_result.failed == true"
+ - "executable_override_result.changed == false"
+ msg: "Specifying non-existing executable shall fail module execution"
+
+ - import_tasks: check_mode.yml
+ become: false
+
+ - import_tasks: test.yml
+ become: false
+ vars:
+ method: user
+
+ - import_tasks: test.yml
+ become: true
+ vars:
+ method: system
+
+ when: |
+ ansible_distribution == 'Fedora' or
+ ansible_distribution == 'Ubuntu' and not ansible_distribution_major_version | int < 16
diff --git a/test/integration/targets/incidental_flatpak_remote/tasks/setup.yml b/test/integration/targets/incidental_flatpak_remote/tasks/setup.yml
new file mode 100644
index 0000000000..b2fd276604
--- /dev/null
+++ b/test/integration/targets/incidental_flatpak_remote/tasks/setup.yml
@@ -0,0 +1,27 @@
+- name: Install flatpak on Fedora
+ dnf:
+ name: flatpak
+ state: present
+
+ when: ansible_distribution == 'Fedora'
+
+- block:
+ - name: Activate flatpak ppa on Ubuntu versions older than 18.04/bionic
+ apt_repository:
+ repo: "ppa:alexlarsson/flatpak"
+ state: present
+ mode: 0644
+ when: ansible_lsb.major_release | int < 18
+
+ - name: Install flatpak package on Ubuntu
+ apt:
+ name: flatpak
+ state: present
+
+ when: ansible_distribution == 'Ubuntu'
+
+- name: Install flatpak remote for testing check mode
+ flatpak_remote:
+ name: check-mode-test-remote
+ flatpakrepo_url: /tmp/flatpak/repo/dummy-repo.flatpakrepo
+ state: present
diff --git a/test/integration/targets/incidental_flatpak_remote/tasks/test.yml b/test/integration/targets/incidental_flatpak_remote/tasks/test.yml
new file mode 100644
index 0000000000..97a13f0cbd
--- /dev/null
+++ b/test/integration/targets/incidental_flatpak_remote/tasks/test.yml
@@ -0,0 +1,72 @@
+# state=present
+
+- name: Test addition - {{ method }}
+ flatpak_remote:
+ name: flatpak-test
+ flatpakrepo_url: /tmp/flatpak/repo/dummy-repo.flatpakrepo
+ state: present
+ method: "{{ method }}"
+ register: addition_result
+
+- name: Verify addition test result - {{ method }}
+ assert:
+ that:
+ - "addition_result.changed == true"
+ msg: "state=preset shall add flatpak when absent"
+
+- name: Test idempotency of addition - {{ method }}
+ flatpak_remote:
+ name: flatpak-test
+ flatpakrepo_url: /tmp/flatpak/repo/dummy-repo.flatpakrepo
+ state: present
+ method: "{{ method }}"
+ register: double_addition_result
+
+- name: Verify idempotency of addition test result - {{ method }}
+ assert:
+ that:
+ - "double_addition_result.changed == false"
+ msg: "state=present shall not do anything when flatpak is already present"
+
+- name: Test updating remote url does not do anything - {{ method }}
+ flatpak_remote:
+ name: flatpak-test
+ flatpakrepo_url: https://a.different/repo.flatpakrepo
+ state: present
+ method: "{{ method }}"
+ register: url_update_result
+
+- name: Verify updating remote url does not do anything - {{ method }}
+ assert:
+ that:
+ - "url_update_result.changed == false"
+ msg: "Trying to update the URL of an existing flatpak remote shall not do anything"
+
+
+# state=absent
+
+- name: Test removal - {{ method }}
+ flatpak_remote:
+ name: flatpak-test
+ state: absent
+ method: "{{ method }}"
+ register: removal_result
+
+- name: Verify removal test result - {{ method }}
+ assert:
+ that:
+ - "removal_result.changed == true"
+ msg: "state=absent shall remove flatpak when present"
+
+- name: Test idempotency of removal - {{ method }}
+ flatpak_remote:
+ name: flatpak-test
+ state: absent
+ method: "{{ method }}"
+ register: double_removal_result
+
+- name: Verify idempotency of removal test result - {{ method }}
+ assert:
+ that:
+ - "double_removal_result.changed == false"
+ msg: "state=absent shall not do anything when flatpak is not present"
diff --git a/test/integration/targets/incidental_inventory_docker_swarm/aliases b/test/integration/targets/incidental_inventory_docker_swarm/aliases
new file mode 100644
index 0000000000..cdb5961501
--- /dev/null
+++ b/test/integration/targets/incidental_inventory_docker_swarm/aliases
@@ -0,0 +1,11 @@
+shippable/posix/incidental
+skip/aix
+skip/osx
+skip/freebsd
+destructive
+skip/docker # The tests sometimes make docker daemon unstable; hence,
+ # we skip all docker-based CI runs to avoid disrupting
+ # the whole CI system. On VMs, we restart docker daemon
+ # after finishing the tests to minimize potential effects
+ # on other tests.
+needs/root
diff --git a/test/integration/targets/incidental_inventory_docker_swarm/inventory_1.docker_swarm.yml b/test/integration/targets/incidental_inventory_docker_swarm/inventory_1.docker_swarm.yml
new file mode 100644
index 0000000000..e8e6d55e5e
--- /dev/null
+++ b/test/integration/targets/incidental_inventory_docker_swarm/inventory_1.docker_swarm.yml
@@ -0,0 +1,3 @@
+---
+plugin: docker_swarm
+docker_host: unix://var/run/docker.sock
diff --git a/test/integration/targets/incidental_inventory_docker_swarm/inventory_2.docker_swarm.yml b/test/integration/targets/incidental_inventory_docker_swarm/inventory_2.docker_swarm.yml
new file mode 100644
index 0000000000..e36bd00f93
--- /dev/null
+++ b/test/integration/targets/incidental_inventory_docker_swarm/inventory_2.docker_swarm.yml
@@ -0,0 +1,5 @@
+---
+plugin: docker_swarm
+docker_host: unix://var/run/docker.sock
+verbose_output: no
+include_host_uri: yes
diff --git a/test/integration/targets/incidental_inventory_docker_swarm/meta/main.yml b/test/integration/targets/incidental_inventory_docker_swarm/meta/main.yml
new file mode 100644
index 0000000000..569a453cfa
--- /dev/null
+++ b/test/integration/targets/incidental_inventory_docker_swarm/meta/main.yml
@@ -0,0 +1,3 @@
+---
+dependencies:
+ - incidental_setup_docker
diff --git a/test/integration/targets/incidental_inventory_docker_swarm/playbooks/swarm_cleanup.yml b/test/integration/targets/incidental_inventory_docker_swarm/playbooks/swarm_cleanup.yml
new file mode 100644
index 0000000000..9cf87159c6
--- /dev/null
+++ b/test/integration/targets/incidental_inventory_docker_swarm/playbooks/swarm_cleanup.yml
@@ -0,0 +1,18 @@
+---
+- hosts: 127.0.0.1
+ connection: local
+ gather_facts: yes
+ tasks:
+ - name: Make sure swarm is removed
+ docker_swarm:
+ state: absent
+ force: yes
+
+ - name: remove docker pagkages
+ action: "{{ ansible_facts.pkg_mgr }}"
+ args:
+ name:
+ - docker
+ - docker-ce
+ - docker-ce-cli
+ state: absent
diff --git a/test/integration/targets/incidental_inventory_docker_swarm/playbooks/swarm_setup.yml b/test/integration/targets/incidental_inventory_docker_swarm/playbooks/swarm_setup.yml
new file mode 100644
index 0000000000..d9f777327c
--- /dev/null
+++ b/test/integration/targets/incidental_inventory_docker_swarm/playbooks/swarm_setup.yml
@@ -0,0 +1,15 @@
+---
+- hosts: 127.0.0.1
+ connection: local
+ vars:
+ docker_skip_cleanup: yes
+
+ tasks:
+ - name: Setup docker
+ import_role:
+ name: incidental_setup_docker
+
+ - name: Create a Swarm cluster
+ docker_swarm:
+ state: present
+ advertise_addr: "{{ansible_default_ipv4.address | default('127.0.0.1')}}"
diff --git a/test/integration/targets/incidental_inventory_docker_swarm/playbooks/test_inventory_1.yml b/test/integration/targets/incidental_inventory_docker_swarm/playbooks/test_inventory_1.yml
new file mode 100644
index 0000000000..600a89b1da
--- /dev/null
+++ b/test/integration/targets/incidental_inventory_docker_swarm/playbooks/test_inventory_1.yml
@@ -0,0 +1,58 @@
+---
+- hosts: 127.0.0.1
+ connection: local # otherwise Ansible will complain that it cannot connect via ssh to 127.0.0.1:22
+ gather_facts: no
+ tasks:
+ - name: Show all groups
+ debug:
+ var: groups
+ - name: Make sure docker_swarm groups are there
+ assert:
+ that:
+ - groups.all | length > 0
+ - groups.leader | length == 1
+ - groups.manager | length > 0
+ - groups.worker | length >= 0
+ - groups.nonleaders | length >= 0
+
+- hosts: all
+ connection: local # otherwise Ansible will complain that it cannot connect via ssh to 127.0.0.1:22
+ vars:
+ # for some reason, Ansible can't find the Python interpreter when connecting to the nodes,
+ # which is in fact just localhost in disguise. That's why we use ansible_playbook_python.
+ ansible_python_interpreter: "{{ ansible_playbook_python }}"
+ tasks:
+ - name: Check for groups
+ assert:
+ that:
+ - "groups.manager | length > 0"
+ - "groups.worker | length >= 0"
+ - "groups.leader | length == 1"
+ run_once: yes
+
+ - name: List manager group
+ debug:
+ var: groups.manager
+ run_once: yes
+
+ - name: List worker group
+ debug:
+ var: groups.worker
+ run_once: yes
+
+ - name: List leader group
+ debug:
+ var: groups.leader
+ run_once: yes
+
+ - name: Print ansible_host per host
+ debug:
+ var: ansible_host
+
+ - name: Make sure docker_swarm_node_attributes is available
+ assert:
+ that:
+ - docker_swarm_node_attributes is not undefined
+ - name: Print docker_swarm_node_attributes per host
+ debug:
+ var: docker_swarm_node_attributes
diff --git a/test/integration/targets/incidental_inventory_docker_swarm/playbooks/test_inventory_2.yml b/test/integration/targets/incidental_inventory_docker_swarm/playbooks/test_inventory_2.yml
new file mode 100644
index 0000000000..b2a794d35b
--- /dev/null
+++ b/test/integration/targets/incidental_inventory_docker_swarm/playbooks/test_inventory_2.yml
@@ -0,0 +1,35 @@
+---
+- hosts: 127.0.0.1
+ connection: local # otherwise Ansible will complain that it cannot connect via ssh to 127.0.0.1:22
+ gather_facts: no
+ tasks:
+ - name: Show all groups
+ debug:
+ var: groups
+ - name: Make sure docker_swarm groups are there
+ assert:
+ that:
+ - groups.all | length > 0
+ - groups.leader | length == 1
+ - groups.manager | length > 0
+ - groups.worker | length >= 0
+ - groups.nonleaders | length >= 0
+
+- hosts: all
+ connection: local # otherwise Ansible will complain that it cannot connect via ssh to 127.0.0.1:22
+ vars:
+ # for some reason, Ansible can't find the Python interpreter when connecting to the nodes,
+ # which is in fact just localhost in disguise. That's why we use ansible_playbook_python.
+ ansible_python_interpreter: "{{ ansible_playbook_python }}"
+ tasks:
+ - name: Make sure docker_swarm_node_attributes is not available
+ assert:
+ that:
+ - docker_swarm_node_attributes is undefined
+ - name: Make sure ansible_host_uri is available
+ assert:
+ that:
+ - ansible_host_uri is defined
+ - name: Print ansible_host_uri
+ debug:
+ var: ansible_host_uri
diff --git a/test/integration/targets/incidental_inventory_docker_swarm/runme.sh b/test/integration/targets/incidental_inventory_docker_swarm/runme.sh
new file mode 100755
index 0000000000..e2ba6869e8
--- /dev/null
+++ b/test/integration/targets/incidental_inventory_docker_swarm/runme.sh
@@ -0,0 +1,23 @@
+#!/usr/bin/env bash
+
+[[ -n "$DEBUG" || -n "$ANSIBLE_DEBUG" ]] && set -x
+
+set -euo pipefail
+
+cleanup() {
+ echo "Cleanup"
+ ansible-playbook playbooks/swarm_cleanup.yml
+ echo "Done"
+ exit 0
+}
+
+trap cleanup INT TERM EXIT
+
+echo "Setup"
+ANSIBLE_ROLES_PATH=.. ansible-playbook playbooks/swarm_setup.yml
+
+echo "Test docker_swarm inventory 1"
+ansible-playbook -i inventory_1.docker_swarm.yml playbooks/test_inventory_1.yml
+
+echo "Test docker_swarm inventory 2"
+ansible-playbook -i inventory_2.docker_swarm.yml playbooks/test_inventory_2.yml
diff --git a/test/integration/targets/incidental_lookup_hashi_vault/aliases b/test/integration/targets/incidental_lookup_hashi_vault/aliases
new file mode 100644
index 0000000000..7e29c80ee2
--- /dev/null
+++ b/test/integration/targets/incidental_lookup_hashi_vault/aliases
@@ -0,0 +1,6 @@
+shippable/posix/incidental
+destructive
+needs/target/incidental_setup_openssl
+needs/file/test/lib/ansible_test/_data/requirements/constraints.txt
+skip/aix
+skip/python2.6
diff --git a/test/integration/targets/incidental_lookup_hashi_vault/lookup_hashi_vault/defaults/main.yml b/test/integration/targets/incidental_lookup_hashi_vault/lookup_hashi_vault/defaults/main.yml
new file mode 100644
index 0000000000..f1f6dd981d
--- /dev/null
+++ b/test/integration/targets/incidental_lookup_hashi_vault/lookup_hashi_vault/defaults/main.yml
@@ -0,0 +1,4 @@
+---
+vault_gen_path: 'gen/testproject'
+vault_kv1_path: 'kv1/testproject'
+vault_kv2_path: 'kv2/data/testproject'
diff --git a/test/integration/targets/incidental_lookup_hashi_vault/lookup_hashi_vault/tasks/approle_setup.yml b/test/integration/targets/incidental_lookup_hashi_vault/lookup_hashi_vault/tasks/approle_setup.yml
new file mode 100644
index 0000000000..63307728a3
--- /dev/null
+++ b/test/integration/targets/incidental_lookup_hashi_vault/lookup_hashi_vault/tasks/approle_setup.yml
@@ -0,0 +1,21 @@
+- name: 'Create an approle policy'
+ shell: "echo '{{ policy }}' | {{ vault_cmd }} policy write approle-policy -"
+ vars:
+ policy: |
+ path "auth/approle/login" {
+ capabilities = [ "create", "read" ]
+ }
+
+- name: 'Enable the AppRole auth method'
+ command: '{{ vault_cmd }} auth enable approle'
+
+- name: 'Create a named role'
+ command: '{{ vault_cmd }} write auth/approle/role/test-role policies="test-policy,approle-policy"'
+
+- name: 'Fetch the RoleID of the AppRole'
+ command: '{{ vault_cmd }} read -field=role_id auth/approle/role/test-role/role-id'
+ register: role_id_cmd
+
+- name: 'Get a SecretID issued against the AppRole'
+ command: '{{ vault_cmd }} write -field=secret_id -f auth/approle/role/test-role/secret-id'
+ register: secret_id_cmd
diff --git a/test/integration/targets/incidental_lookup_hashi_vault/lookup_hashi_vault/tasks/approle_test.yml b/test/integration/targets/incidental_lookup_hashi_vault/lookup_hashi_vault/tasks/approle_test.yml
new file mode 100644
index 0000000000..44eb5ed18d
--- /dev/null
+++ b/test/integration/targets/incidental_lookup_hashi_vault/lookup_hashi_vault/tasks/approle_test.yml
@@ -0,0 +1,45 @@
+- vars:
+ role_id: '{{ role_id_cmd.stdout }}'
+ secret_id: '{{ secret_id_cmd.stdout }}'
+ block:
+ - name: 'Fetch secrets using "hashi_vault" lookup'
+ set_fact:
+ secret1: "{{ lookup('hashi_vault', conn_params ~ 'secret=' ~ vault_kv2_path ~ '/secret1 auth_method=approle secret_id=' ~ secret_id ~ ' role_id=' ~ role_id) }}"
+ secret2: "{{ lookup('hashi_vault', conn_params ~ 'secret=' ~ vault_kv2_path ~ '/secret2 auth_method=approle secret_id=' ~ secret_id ~ ' role_id=' ~ role_id) }}"
+
+ - name: 'Check secret values'
+ fail:
+ msg: 'unexpected secret values'
+ when: secret1['value'] != 'foo1' or secret2['value'] != 'foo2'
+
+ - name: 'Failure expected when erroneous credentials are used'
+ vars:
+ secret_wrong_cred: "{{ lookup('hashi_vault', conn_params ~ 'secret=' ~ vault_kv2_path ~ '/secret2 auth_method=approle secret_id=toto role_id=' ~ role_id) }}"
+ debug:
+ msg: 'Failure is expected ({{ secret_wrong_cred }})'
+ register: test_wrong_cred
+ ignore_errors: true
+
+ - name: 'Failure expected when unauthorized secret is read'
+ vars:
+ secret_unauthorized: "{{ lookup('hashi_vault', conn_params ~ 'secret=' ~ vault_kv2_path ~ '/secret3 auth_method=approle secret_id=' ~ secret_id ~ ' role_id=' ~ role_id) }}"
+ debug:
+ msg: 'Failure is expected ({{ secret_unauthorized }})'
+ register: test_unauthorized
+ ignore_errors: true
+
+ - name: 'Failure expected when inexistent secret is read'
+ vars:
+ secret_inexistent: "{{ lookup('hashi_vault', conn_params ~ 'secret=' ~ vault_kv2_path ~ '/secret4 auth_method=approle secret_id=' ~ secret_id ~ ' role_id=' ~ role_id) }}"
+ debug:
+ msg: 'Failure is expected ({{ secret_inexistent }})'
+ register: test_inexistent
+ ignore_errors: true
+
+ - name: 'Check expected failures'
+ assert:
+ msg: "an expected failure didn't occur"
+ that:
+ - test_wrong_cred is failed
+ - test_unauthorized is failed
+ - test_inexistent is failed
diff --git a/test/integration/targets/incidental_lookup_hashi_vault/lookup_hashi_vault/tasks/main.yml b/test/integration/targets/incidental_lookup_hashi_vault/lookup_hashi_vault/tasks/main.yml
new file mode 100644
index 0000000000..42fd0907f3
--- /dev/null
+++ b/test/integration/targets/incidental_lookup_hashi_vault/lookup_hashi_vault/tasks/main.yml
@@ -0,0 +1,155 @@
+---
+- name: Install Hashi Vault on controlled node and test
+
+ vars:
+ vault_version: '0.11.0'
+ vault_uri: 'https://ansible-ci-files.s3.amazonaws.com/test/integration/targets/lookup_hashi_vault/vault_{{ vault_version }}_{{ ansible_system | lower }}_{{ vault_arch }}.zip'
+ vault_cmd: '{{ local_temp_dir }}/vault'
+
+ block:
+ - name: Create a local temporary directory
+ tempfile:
+ state: directory
+ register: tempfile_result
+
+ - set_fact:
+ local_temp_dir: '{{ tempfile_result.path }}'
+
+ - when: pyopenssl_version.stdout is version('0.15', '>=')
+ block:
+ - name: Generate privatekey
+ openssl_privatekey:
+ path: '{{ local_temp_dir }}/privatekey.pem'
+
+ - name: Generate CSR
+ openssl_csr:
+ path: '{{ local_temp_dir }}/csr.csr'
+ privatekey_path: '{{ local_temp_dir }}/privatekey.pem'
+ subject:
+ commonName: localhost
+
+ - name: Generate selfsigned certificate
+ openssl_certificate:
+ path: '{{ local_temp_dir }}/cert.pem'
+ csr_path: '{{ local_temp_dir }}/csr.csr'
+ privatekey_path: '{{ local_temp_dir }}/privatekey.pem'
+ provider: selfsigned
+ selfsigned_digest: sha256
+ register: selfsigned_certificate
+
+ - name: 'Install unzip'
+ package:
+ name: unzip
+ when: ansible_distribution != "MacOSX" # unzip already installed
+
+ - assert:
+ # Linux: x86_64, FreeBSD: amd64
+ that: ansible_architecture in ['i386', 'x86_64', 'amd64']
+ - set_fact:
+ vault_arch: '386'
+ when: ansible_architecture == 'i386'
+ - set_fact:
+ vault_arch: amd64
+ when: ansible_architecture in ['x86_64', 'amd64']
+
+ - name: 'Download vault binary'
+ unarchive:
+ src: '{{ vault_uri }}'
+ dest: '{{ local_temp_dir }}'
+ remote_src: true
+
+ - environment:
+ # used by vault command
+ VAULT_DEV_ROOT_TOKEN_ID: '47542cbc-6bf8-4fba-8eda-02e0a0d29a0a'
+ block:
+ - name: 'Create configuration file'
+ template:
+ src: vault_config.hcl.j2
+ dest: '{{ local_temp_dir }}/vault_config.hcl'
+
+ - name: 'Start vault service'
+ environment:
+ VAULT_ADDR: 'http://localhost:8200'
+ block:
+ - name: 'Start vault server (dev mode enabled)'
+ shell: 'nohup {{ vault_cmd }} server -dev -config {{ local_temp_dir }}/vault_config.hcl </dev/null >/dev/null 2>&1 &'
+
+ - name: 'Create generic secrets engine'
+ command: '{{ vault_cmd }} secrets enable -path=gen generic'
+
+ - name: 'Create KV v1 secrets engine'
+ command: '{{ vault_cmd }} secrets enable -path=kv1 -version=1 kv'
+
+ - name: 'Create KV v2 secrets engine'
+ command: '{{ vault_cmd }} secrets enable -path=kv2 -version=2 kv'
+
+ - name: 'Create a test policy'
+ shell: "echo '{{ policy }}' | {{ vault_cmd }} policy write test-policy -"
+ vars:
+ policy: |
+ path "{{ vault_gen_path }}/secret1" {
+ capabilities = ["read"]
+ }
+ path "{{ vault_gen_path }}/secret2" {
+ capabilities = ["read", "update"]
+ }
+ path "{{ vault_gen_path }}/secret3" {
+ capabilities = ["deny"]
+ }
+ path "{{ vault_kv1_path }}/secret1" {
+ capabilities = ["read"]
+ }
+ path "{{ vault_kv1_path }}/secret2" {
+ capabilities = ["read", "update"]
+ }
+ path "{{ vault_kv1_path }}/secret3" {
+ capabilities = ["deny"]
+ }
+ path "{{ vault_kv2_path }}/secret1" {
+ capabilities = ["read"]
+ }
+ path "{{ vault_kv2_path }}/secret2" {
+ capabilities = ["read", "update"]
+ }
+ path "{{ vault_kv2_path }}/secret3" {
+ capabilities = ["deny"]
+ }
+
+ - name: 'Create generic secrets'
+ command: '{{ vault_cmd }} write {{ vault_gen_path }}/secret{{ item }} value=foo{{ item }}'
+ loop: [1, 2, 3]
+
+ - name: 'Create KV v1 secrets'
+ command: '{{ vault_cmd }} kv put {{ vault_kv1_path }}/secret{{ item }} value=foo{{ item }}'
+ loop: [1, 2, 3]
+
+ - name: 'Create KV v2 secrets'
+ command: '{{ vault_cmd }} kv put {{ vault_kv2_path | regex_replace("/data") }}/secret{{ item }} value=foo{{ item }}'
+ loop: [1, 2, 3]
+
+ - name: setup approle auth
+ import_tasks: approle_setup.yml
+ when: ansible_distribution != 'RedHat' or ansible_distribution_major_version is version('7', '>')
+
+ - name: setup token auth
+ import_tasks: token_setup.yml
+
+ - import_tasks: tests.yml
+ vars:
+ auth_type: approle
+ when: ansible_distribution != 'RedHat' or ansible_distribution_major_version is version('7', '>')
+
+ - import_tasks: tests.yml
+ vars:
+ auth_type: token
+
+ always:
+ - name: 'Kill vault process'
+ shell: "kill $(cat {{ local_temp_dir }}/vault.pid)"
+ ignore_errors: true
+
+ always:
+ - name: 'Delete temp dir'
+ file:
+ path: '{{ local_temp_dir }}'
+ state: absent
diff --git a/test/integration/targets/incidental_lookup_hashi_vault/lookup_hashi_vault/tasks/tests.yml b/test/integration/targets/incidental_lookup_hashi_vault/lookup_hashi_vault/tasks/tests.yml
new file mode 100644
index 0000000000..198f587a77
--- /dev/null
+++ b/test/integration/targets/incidental_lookup_hashi_vault/lookup_hashi_vault/tasks/tests.yml
@@ -0,0 +1,35 @@
+- name: 'test {{ auth_type }} auth without SSL (lookup parameters)'
+ include_tasks: '{{ auth_type }}_test.yml'
+ vars:
+ conn_params: 'url=http://localhost:8200 '
+
+- name: 'test {{ auth_type }} auth without SSL (environment variable)'
+ include_tasks: '{{ auth_type }}_test.yml'
+ args:
+ apply:
+ vars:
+ conn_params: ''
+ environment:
+ VAULT_ADDR: 'http://localhost:8200'
+
+- when: pyopenssl_version.stdout is version('0.15', '>=')
+ block:
+ - name: 'test {{ auth_type }} auth with certs (validation enabled, lookup parameters)'
+ include_tasks: '{{ auth_type }}_test.yml'
+ vars:
+ conn_params: 'url=https://localhost:8201 ca_cert={{ local_temp_dir }}/cert.pem validate_certs=True '
+
+ - name: 'test {{ auth_type }} auth with certs (validation enabled, environment variables)'
+ include_tasks: '{{ auth_type }}_test.yml'
+ args:
+ apply:
+ vars:
+ conn_params: ''
+ environment:
+ VAULT_ADDR: 'https://localhost:8201'
+ VAULT_CACERT: '{{ local_temp_dir }}/cert.pem'
+
+ - name: 'test {{ auth_type }} auth with certs (validation disabled, lookup parameters)'
+ include_tasks: '{{ auth_type }}_test.yml'
+ vars:
+ conn_params: 'url=https://localhost:8201 validate_certs=False '
diff --git a/test/integration/targets/incidental_lookup_hashi_vault/lookup_hashi_vault/tasks/token_setup.yml b/test/integration/targets/incidental_lookup_hashi_vault/lookup_hashi_vault/tasks/token_setup.yml
new file mode 100644
index 0000000000..d5ce280346
--- /dev/null
+++ b/test/integration/targets/incidental_lookup_hashi_vault/lookup_hashi_vault/tasks/token_setup.yml
@@ -0,0 +1,3 @@
+- name: 'Create a test credentials (token)'
+ command: '{{ vault_cmd }} token create -policy test-policy -field token'
+ register: user_token_cmd
diff --git a/test/integration/targets/incidental_lookup_hashi_vault/lookup_hashi_vault/tasks/token_test.yml b/test/integration/targets/incidental_lookup_hashi_vault/lookup_hashi_vault/tasks/token_test.yml
new file mode 100644
index 0000000000..20c1af791e
--- /dev/null
+++ b/test/integration/targets/incidental_lookup_hashi_vault/lookup_hashi_vault/tasks/token_test.yml
@@ -0,0 +1,58 @@
+- vars:
+ user_token: '{{ user_token_cmd.stdout }}'
+ block:
+ - name: 'Fetch secrets using "hashi_vault" lookup'
+ set_fact:
+ gen_secret1: "{{ lookup('hashi_vault', conn_params ~ 'secret=' ~ vault_gen_path ~ '/secret1 auth_method=token token=' ~ user_token) }}"
+ gen_secret2: "{{ lookup('hashi_vault', conn_params ~ 'secret=' ~ vault_gen_path ~ '/secret2 token=' ~ user_token) }}"
+ kv1_secret1: "{{ lookup('hashi_vault', conn_params ~ 'secret=' ~ vault_kv1_path ~ '/secret1 auth_method=token token=' ~ user_token) }}"
+ kv1_secret2: "{{ lookup('hashi_vault', conn_params ~ 'secret=' ~ vault_kv1_path ~ '/secret2 token=' ~ user_token) }}"
+ kv2_secret1: "{{ lookup('hashi_vault', conn_params ~ 'secret=' ~ vault_kv2_path ~ '/secret1 auth_method=token token=' ~ user_token) }}"
+ kv2_secret2: "{{ lookup('hashi_vault', conn_params ~ 'secret=' ~ vault_kv2_path ~ '/secret2 token=' ~ user_token) }}"
+
+ - name: 'Check secret generic values'
+ fail:
+ msg: 'unexpected secret values'
+ when: gen_secret1['value'] != 'foo1' or gen_secret2['value'] != 'foo2'
+
+ - name: 'Check secret kv1 values'
+ fail:
+ msg: 'unexpected secret values'
+ when: kv1_secret1['value'] != 'foo1' or kv1_secret2['value'] != 'foo2'
+
+ - name: 'Check secret kv2 values'
+ fail:
+ msg: 'unexpected secret values'
+ when: kv2_secret1['value'] != 'foo1' or kv2_secret2['value'] != 'foo2'
+
+ - name: 'Failure expected when erroneous credentials are used'
+ vars:
+ secret_wrong_cred: "{{ lookup('hashi_vault', conn_params ~ 'secret=' ~ vault_kv2_path ~ '/secret2 auth_method=token token=wrong_token') }}"
+ debug:
+ msg: 'Failure is expected ({{ secret_wrong_cred }})'
+ register: test_wrong_cred
+ ignore_errors: true
+
+ - name: 'Failure expected when unauthorized secret is read'
+ vars:
+ secret_unauthorized: "{{ lookup('hashi_vault', conn_params ~ 'secret=' ~ vault_kv2_path ~ '/secret3 token=' ~ user_token) }}"
+ debug:
+ msg: 'Failure is expected ({{ secret_unauthorized }})'
+ register: test_unauthorized
+ ignore_errors: true
+
+ - name: 'Failure expected when inexistent secret is read'
+ vars:
+ secret_inexistent: "{{ lookup('hashi_vault', conn_params ~ 'secret=' ~ vault_kv2_path ~ '/secret4 token=' ~ user_token) }}"
+ debug:
+ msg: 'Failure is expected ({{ secret_inexistent }})'
+ register: test_inexistent
+ ignore_errors: true
+
+ - name: 'Check expected failures'
+ assert:
+ msg: "an expected failure didn't occur"
+ that:
+ - test_wrong_cred is failed
+ - test_unauthorized is failed
+ - test_inexistent is failed
diff --git a/test/integration/targets/incidental_lookup_hashi_vault/lookup_hashi_vault/templates/vault_config.hcl.j2 b/test/integration/targets/incidental_lookup_hashi_vault/lookup_hashi_vault/templates/vault_config.hcl.j2
new file mode 100644
index 0000000000..effc90ba90
--- /dev/null
+++ b/test/integration/targets/incidental_lookup_hashi_vault/lookup_hashi_vault/templates/vault_config.hcl.j2
@@ -0,0 +1,10 @@
+# {{ ansible_managed }}
+pid_file = "{{ local_temp_dir }}/vault.pid"
+{% if pyopenssl_version.stdout is version('0.15', '>=') %}
+listener "tcp" {
+ tls_key_file = "{{ local_temp_dir }}/privatekey.pem"
+ tls_cert_file = "{{ local_temp_dir }}/cert.pem"
+ tls_disable = false
+ address = "localhost:8201"
+}
+{% endif %}
diff --git a/test/integration/targets/incidental_lookup_hashi_vault/playbooks/install_dependencies.yml b/test/integration/targets/incidental_lookup_hashi_vault/playbooks/install_dependencies.yml
new file mode 100644
index 0000000000..9edbdbd631
--- /dev/null
+++ b/test/integration/targets/incidental_lookup_hashi_vault/playbooks/install_dependencies.yml
@@ -0,0 +1,19 @@
+- hosts: localhost
+ tasks:
+ - name: Install openssl
+ import_role:
+ name: incidental_setup_openssl
+
+ - name: "RedHat <= 7, select last version compatible with request 2.6.0 (this version doesn't support approle auth)"
+ set_fact:
+ hvac_package: 'hvac==0.2.5'
+ when: ansible_distribution == 'RedHat' and ansible_distribution_major_version is version('7', '<=')
+
+ - name: 'CentOS < 7, select last version compatible with Python 2.6'
+ set_fact:
+ hvac_package: 'hvac==0.5.0'
+ when: ansible_distribution == 'CentOS' and ansible_distribution_major_version is version('7', '<')
+
+ - name: 'Install hvac Python package'
+ pip:
+ name: "{{ hvac_package|default('hvac') }}"
diff --git a/test/integration/targets/incidental_lookup_hashi_vault/playbooks/test_lookup_hashi_vault.yml b/test/integration/targets/incidental_lookup_hashi_vault/playbooks/test_lookup_hashi_vault.yml
new file mode 100644
index 0000000000..343763af09
--- /dev/null
+++ b/test/integration/targets/incidental_lookup_hashi_vault/playbooks/test_lookup_hashi_vault.yml
@@ -0,0 +1,9 @@
+- hosts: localhost
+ tasks:
+ - name: register pyOpenSSL version
+ command: "{{ ansible_python.executable }} -c 'import OpenSSL; print(OpenSSL.__version__)'"
+ register: pyopenssl_version
+
+ - name: Test lookup hashi_vault
+ import_role:
+ name: incidental_lookup_hashi_vault/lookup_hashi_vault
diff --git a/test/integration/targets/incidental_lookup_hashi_vault/runme.sh b/test/integration/targets/incidental_lookup_hashi_vault/runme.sh
new file mode 100755
index 0000000000..e5e0df347f
--- /dev/null
+++ b/test/integration/targets/incidental_lookup_hashi_vault/runme.sh
@@ -0,0 +1,23 @@
+#!/usr/bin/env bash
+
+set -eux
+
+# First install pyOpenSSL, then test lookup in a second playbook in order to
+# workaround this error which occurs on OS X 10.11 only:
+#
+# TASK [lookup_hashi_vault : test token auth with certs (validation enabled, lookup parameters)] ***
+# included: lookup_hashi_vault/tasks/token_test.yml for testhost
+#
+# TASK [lookup_hashi_vault : Fetch secrets using "hashi_vault" lookup] ***
+# From cffi callback <function _verify_callback at 0x106f995f0>:
+# Traceback (most recent call last):
+# File "/usr/local/lib/python2.7/site-packages/OpenSSL/SSL.py", line 309, in wrapper
+# _lib.X509_up_ref(x509)
+# AttributeError: 'module' object has no attribute 'X509_up_ref'
+# fatal: [testhost]: FAILED! => { "msg": "An unhandled exception occurred while running the lookup plugin 'hashi_vault'. Error was a <class 'requests.exceptions.SSLError'>, original message: HTTPSConnectionPool(host='localhost', port=8201): Max retries exceeded with url: /v1/auth/token/lookup-self (Caused by SSLError(SSLError(\"bad handshake: Error([('SSL routines', 'ssl3_get_server_certificate', 'certificate verify failed')],)\",),))"}
+
+ANSIBLE_ROLES_PATH=../ \
+ ansible-playbook playbooks/install_dependencies.yml -v "$@"
+
+ANSIBLE_ROLES_PATH=../ \
+ ansible-playbook playbooks/test_lookup_hashi_vault.yml -v "$@"
diff --git a/test/integration/targets/incidental_lookup_rabbitmq/aliases b/test/integration/targets/incidental_lookup_rabbitmq/aliases
new file mode 100644
index 0000000000..f89752b833
--- /dev/null
+++ b/test/integration/targets/incidental_lookup_rabbitmq/aliases
@@ -0,0 +1,6 @@
+destructive
+shippable/posix/incidental
+skip/aix
+skip/osx
+skip/freebsd
+skip/rhel
diff --git a/test/integration/targets/incidental_lookup_rabbitmq/meta/main.yml b/test/integration/targets/incidental_lookup_rabbitmq/meta/main.yml
new file mode 100644
index 0000000000..33fa97dc9e
--- /dev/null
+++ b/test/integration/targets/incidental_lookup_rabbitmq/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - incidental_setup_rabbitmq
diff --git a/test/integration/targets/incidental_lookup_rabbitmq/tasks/main.yml b/test/integration/targets/incidental_lookup_rabbitmq/tasks/main.yml
new file mode 100644
index 0000000000..740f899805
--- /dev/null
+++ b/test/integration/targets/incidental_lookup_rabbitmq/tasks/main.yml
@@ -0,0 +1,5 @@
+# Rabbitmq lookup
+- include: ubuntu.yml
+ when:
+ - ansible_distribution == 'Ubuntu'
+ - ansible_distribution_release != 'trusty'
diff --git a/test/integration/targets/incidental_lookup_rabbitmq/tasks/ubuntu.yml b/test/integration/targets/incidental_lookup_rabbitmq/tasks/ubuntu.yml
new file mode 100644
index 0000000000..3b007edecc
--- /dev/null
+++ b/test/integration/targets/incidental_lookup_rabbitmq/tasks/ubuntu.yml
@@ -0,0 +1,138 @@
+- name: Test failure without pika installed
+ set_fact:
+ rabbit_missing_pika: "{{ lookup('rabbitmq', url='amqp://guest:guest@192.168.250.1:5672/%2F', queue='hello', count=3) }}"
+ ignore_errors: yes
+ register: rabbitmq_missing_pika_error
+
+- assert:
+ that:
+ - "'pika python package is required' in rabbitmq_missing_pika_error.msg"
+
+- name: Install pika and requests
+ pip:
+ name: pika<1.0.0,requests
+ state: latest
+
+- name: Test that giving an incorrect amqp protocol in URL will error
+ set_fact:
+ rabbitmq_test_protocol: "{{ lookup('rabbitmq', url='zzzamqp://guest:guest@192.168.250.1:5672/%2F', queue='hello', count=3) }}"
+ ignore_errors: yes
+ register: rabbitmq_protocol_error
+
+- assert:
+ that:
+ - "rabbitmq_protocol_error is failed"
+ - "'URL malformed' in rabbitmq_protocol_error.msg"
+
+- name: Test that giving an incorrect IP address in URL will error
+ set_fact:
+ rabbitmq_test_protocol: "{{ lookup('rabbitmq', url='amqp://guest:guest@xxxxx192.112312368.250.1:5672/%2F', queue='hello', count=3) }}"
+ ignore_errors: yes
+ register: rabbitmq_ip_error
+
+- assert:
+ that:
+ - "rabbitmq_ip_error is failed"
+ - "'Connection issue' in rabbitmq_ip_error.msg"
+
+- name: Test missing parameters will error
+ set_fact:
+ rabbitmq_test_protocol: "{{ lookup('rabbitmq') }}"
+ ignore_errors: yes
+ register: rabbitmq_params_error
+
+- assert:
+ that:
+ - "rabbitmq_params_error is failed"
+ - "'URL is required for rabbitmq lookup.' in rabbitmq_params_error.msg"
+
+- name: Test missing queue will error
+ set_fact:
+ rabbitmq_queue_protocol: "{{ lookup('rabbitmq', url='amqp://guest:guest@192.168.250.1:5672/%2F') }}"
+ ignore_errors: yes
+ register: rabbitmq_queue_error
+
+- assert:
+ that:
+ - "rabbitmq_queue_error is failed"
+ - "'Queue is required for rabbitmq lookup' in rabbitmq_queue_error.msg"
+
+- name: Enables the rabbitmq_management plugin
+ rabbitmq_plugin:
+ names: rabbitmq_management
+ state: enabled
+
+- name: Setup test queue
+ rabbitmq_queue:
+ name: hello
+
+- name: Post test message to the exchange (string)
+ uri:
+ url: http://localhost:15672/api/exchanges/%2f/amq.default/publish
+ method: POST
+ body: '{"properties":{},"routing_key":"hello","payload":"ansible-test","payload_encoding":"string"}'
+ user: guest
+ password: guest
+ force_basic_auth: yes
+ return_content: yes
+ headers:
+ Content-Type: "application/json"
+ register: post_data
+
+
+- name: Post test message to the exchange (json)
+ uri:
+ url: http://localhost:15672/api/exchanges/%2f/amq.default/publish
+ method: POST
+ body: '{"properties":{"content_type": "application/json"},"routing_key":"hello","payload":"{\"key\": \"value\" }","payload_encoding":"string"}'
+ user: guest
+ password: guest
+ force_basic_auth: yes
+ return_content: yes
+ headers:
+ Content-Type: "application/json"
+ register: post_data_json
+
+- name: Test retrieve messages
+ set_fact:
+ rabbitmq_msg: "{{ lookup('rabbitmq', url='amqp://guest:guest@localhost:5672/%2f/hello', queue='hello') }}"
+ ignore_errors: yes
+ register: rabbitmq_msg_error
+
+- name: Ensure two messages received
+ assert:
+ that:
+ - "rabbitmq_msg_error is not failed"
+ - rabbitmq_msg | length == 2
+
+- name: Ensure first message is a string
+ assert:
+ that:
+ - rabbitmq_msg[0].msg == "ansible-test"
+
+- name: Ensure second message is json
+ assert:
+ that:
+ - rabbitmq_msg[1].json.key == "value"
+
+- name: Test missing vhost
+ set_fact:
+ rabbitmq_msg: "{{ lookup('rabbitmq', url='amqp://guest:guest@localhost:5672/missing/', queue='hello') }}"
+ ignore_errors: yes
+ register: rabbitmq_vhost_error
+
+- assert:
+ that:
+ - "rabbitmq_vhost_error is failed"
+ - "'NOT_ALLOWED' in rabbitmq_vhost_error.msg"
+
+# Tidy up
+- name: Uninstall pika and requests
+ pip:
+ name: pika,requests
+ state: absent
+
+- name: Disable the rabbitmq_management plugin
+ rabbitmq_plugin:
+ names: rabbitmq_management
+ state: disabled
diff --git a/test/integration/targets/incidental_lvg/aliases b/test/integration/targets/incidental_lvg/aliases
new file mode 100644
index 0000000000..d5baa06d53
--- /dev/null
+++ b/test/integration/targets/incidental_lvg/aliases
@@ -0,0 +1,6 @@
+destructive
+needs/privileged
+shippable/posix/incidental
+skip/aix
+skip/freebsd
+skip/osx
diff --git a/test/integration/targets/incidental_lvg/meta/main.yml b/test/integration/targets/incidental_lvg/meta/main.yml
new file mode 100644
index 0000000000..1810d4bec9
--- /dev/null
+++ b/test/integration/targets/incidental_lvg/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - setup_remote_tmp_dir
diff --git a/test/integration/targets/incidental_lvg/tasks/main.yml b/test/integration/targets/incidental_lvg/tasks/main.yml
new file mode 100644
index 0000000000..a57f591bf0
--- /dev/null
+++ b/test/integration/targets/incidental_lvg/tasks/main.yml
@@ -0,0 +1,15 @@
+- name: Install required packages (Linux)
+ package:
+ name: lvm2
+ state: present
+ when: ansible_system == 'Linux'
+
+- name: Test lvg module
+ block:
+ - import_tasks: setup.yml
+
+ - import_tasks: test_indempotency.yml
+
+ - import_tasks: test_grow_reduce.yml
+ always:
+ - import_tasks: teardown.yml
diff --git a/test/integration/targets/incidental_lvg/tasks/setup.yml b/test/integration/targets/incidental_lvg/tasks/setup.yml
new file mode 100644
index 0000000000..e63c2d641e
--- /dev/null
+++ b/test/integration/targets/incidental_lvg/tasks/setup.yml
@@ -0,0 +1,13 @@
+- name: "Create files to use as a disk devices"
+ command: "dd if=/dev/zero of={{ remote_tmp_dir }}/img{{ item }} bs=1M count=10"
+ with_sequence: 'count=2'
+
+- name: "Create loop device for file"
+ command: "losetup --show -f {{ remote_tmp_dir }}/img{{ item }}"
+ with_sequence: 'count=2'
+ register: loop_devices
+
+- name: "Affect name on disk to work on"
+ set_fact:
+ loop_device1: "{{ loop_devices.results[0] }}"
+ loop_device2: "{{ loop_devices.results[1] }}"
diff --git a/test/integration/targets/incidental_lvg/tasks/teardown.yml b/test/integration/targets/incidental_lvg/tasks/teardown.yml
new file mode 100644
index 0000000000..ed662f1e1f
--- /dev/null
+++ b/test/integration/targets/incidental_lvg/tasks/teardown.yml
@@ -0,0 +1,17 @@
+- name: Remove test volume group
+ lvg:
+ vg: testvg
+ state: absent
+
+- name: Detach loop device
+ command: "losetup -d {{ item.stdout }}"
+ loop: "{{ loop_devices.results|default([]) }}"
+ when:
+ - item.stdout is defined
+ - item.stdout is match("/dev/.*")
+
+- name: Remove device files
+ file:
+ path: "{{ remote_tmp_dir }}/img{{ item }}"
+ state: absent
+ with_sequence: 'count={{ loop_devices.results|length }}'
diff --git a/test/integration/targets/incidental_lvg/tasks/test_grow_reduce.yml b/test/integration/targets/incidental_lvg/tasks/test_grow_reduce.yml
new file mode 100644
index 0000000000..1e98804538
--- /dev/null
+++ b/test/integration/targets/incidental_lvg/tasks/test_grow_reduce.yml
@@ -0,0 +1,33 @@
+- name: "Create volume group on first disk"
+ lvg:
+ vg: testvg
+ pvs: "{{ loop_device1.stdout }}"
+
+- name: "get lvm facts"
+ setup:
+
+- debug: var=ansible_lvm
+
+- name: "Assert the testvg span only on first disk"
+ assert:
+ that:
+ - ansible_lvm.pvs[loop_device1.stdout].vg == "testvg"
+ - 'loop_device2.stdout not in ansible_lvm.pvs or
+ ansible_lvm.pvs[loop_device2.stdout].vg == ""'
+
+- name: "Extend to second disk AND reduce from the first disk"
+ lvg:
+ vg: testvg
+ pvs: "{{ loop_device2.stdout }}"
+
+- name: "get lvm facts"
+ setup:
+
+- debug: var=ansible_lvm
+
+- name: "Assert the testvg span only on first disk"
+ assert:
+ that:
+ - 'loop_device1.stdout not in ansible_lvm.pvs or
+ ansible_lvm.pvs[loop_device1.stdout].vg == ""'
+ - ansible_lvm.pvs[loop_device2.stdout].vg == "testvg"
diff --git a/test/integration/targets/incidental_lvg/tasks/test_indempotency.yml b/test/integration/targets/incidental_lvg/tasks/test_indempotency.yml
new file mode 100644
index 0000000000..5007e56a5b
--- /dev/null
+++ b/test/integration/targets/incidental_lvg/tasks/test_indempotency.yml
@@ -0,0 +1,15 @@
+- name: Create volume group on disk device
+ lvg:
+ vg: testvg
+ pvs: "{{ loop_device1.stdout }}"
+
+- name: Create the volume group again to verify idempotence
+ lvg:
+ vg: testvg
+ pvs: "{{ loop_device1.stdout }}"
+ register: repeat_vg_create
+
+- name: Do all assertions to verify expected results
+ assert:
+ that:
+ - repeat_vg_create is not changed
diff --git a/test/integration/targets/incidental_mongodb_parameter/aliases b/test/integration/targets/incidental_mongodb_parameter/aliases
new file mode 100644
index 0000000000..45a1ea497d
--- /dev/null
+++ b/test/integration/targets/incidental_mongodb_parameter/aliases
@@ -0,0 +1,7 @@
+destructive
+shippable/posix/incidental
+skip/aix
+skip/osx
+skip/freebsd
+skip/rhel
+needs/root
diff --git a/test/integration/targets/incidental_mongodb_parameter/defaults/main.yml b/test/integration/targets/incidental_mongodb_parameter/defaults/main.yml
new file mode 100644
index 0000000000..aac55526df
--- /dev/null
+++ b/test/integration/targets/incidental_mongodb_parameter/defaults/main.yml
@@ -0,0 +1,21 @@
+---
+# defaults file for test_mongodb_user
+mongodb_admin_user: test_root
+mongodb_admin_password: saE_Rr9!gE6gh#e~R#nZ
+mongod_auth: false
+kill_signal: SIGTERM
+# Should be one of
+# --storageEngine wiredTiger --wiredTigerEngineConfigString="cache_size=200M"
+# --storageEngine mmapv1 --nojournal
+mongod_storage_engine_opts: "--storageEngine wiredTiger --wiredTigerEngineConfigString='cache_size=200M'"
+mongodb_user: mongodb
+mongodb_user_list:
+ - { "name": "user1", "password": "password1", "roles": "read", "database": "test" }
+ - { "name": "user2", "password": "password2", "roles": "readWrite", "database": "test" }
+ - { "name": "user3", "password": "password3", "roles": "dbAdmin", "database": "test" }
+ - { "name": "user4", "password": "password4", "roles": "userAdmin", "database": "test" }
+ - { "name": "user5", "password": "password5", "roles": "clusterAdmin", "database": "admin" }
+ - { "name": "user6", "password": "password6", "roles": "readAnyDatabase", "database": "admin" }
+ - { "name": "user7", "password": "password7", "roles": "readWriteAnyDatabase", "database": "admin" }
+ - { "name": "user8", "password": "password8", "roles": "userAdminAnyDatabase", "database": "admin" }
+ - { "name": "user9", "password": "password9", "roles": "dbAdminAnyDatabase", "database": "admin" }
diff --git a/test/integration/targets/incidental_mongodb_parameter/meta/main.yml b/test/integration/targets/incidental_mongodb_parameter/meta/main.yml
new file mode 100644
index 0000000000..10fc3936a3
--- /dev/null
+++ b/test/integration/targets/incidental_mongodb_parameter/meta/main.yml
@@ -0,0 +1,3 @@
+dependencies:
+ - incidental_setup_mongodb
+ - setup_remote_tmp_dir
diff --git a/test/integration/targets/incidental_mongodb_parameter/tasks/main.yml b/test/integration/targets/incidental_mongodb_parameter/tasks/main.yml
new file mode 100644
index 0000000000..a0fda1dc60
--- /dev/null
+++ b/test/integration/targets/incidental_mongodb_parameter/tasks/main.yml
@@ -0,0 +1,143 @@
+# test code for the mongodb_parameter module
+# (c) 2019, Rhys Campbell <rhys.james.campbell@googlemail.com>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# ============================================================
+
+- name: Ensure tests home exists
+ file:
+ path: "{{ remote_tmp_dir }}/tests"
+ state: directory
+
+- include_tasks: mongod_teardown.yml
+
+- include_tasks: mongod_singlenode.yml
+
+- name: Set syncdelay to 99
+ mongodb_parameter:
+ login_port: 3001
+ param: syncdelay
+ value: 99
+ param_type: int
+ register: sd_change
+
+- assert:
+ that:
+ - sd_change.before | int == 60
+ - sd_change.after | int == 99
+ - sd_change.changed == True
+
+- name: Set syncdelay to 99 (again)
+ mongodb_parameter:
+ login_port: 3001
+ param: syncdelay
+ value: 99
+ param_type: int
+ register: sd_change
+
+- assert:
+ that:
+ - sd_change.before | int == 99
+ - sd_change.after | int == 99
+ - sd_change.changed == False
+
+- name: Create admin user with module
+ mongodb_user:
+ login_port: 3001
+ database: admin
+ name: "{{ mongodb_admin_user }}"
+ password: "{{ mongodb_admin_password }}"
+ roles: root
+ state: present
+ register: mongodb_admin_user_created
+
+- assert:
+ that:
+ - mongodb_admin_user_created.changed == True
+
+- name: Kill all mongod processes
+ command: pkill -{{ kill_signal }} mongod
+ ignore_errors: true
+
+- name: Getting pids for mongod
+ pids:
+ name: mongod
+ register: pids_of_mongod
+
+- name: Wait for all mongod processes to exit
+ wait_for:
+ path: "/proc/{{ item }}/status"
+ state: absent
+ delay: 3
+ with_items: "{{ pids_of_mongod }}"
+
+- set_fact:
+ mongod_auth: true
+
+- include_tasks: mongod_singlenode.yml
+# Tests with auth enabled
+
+- name: Set syncdelay to 59 with auth
+ mongodb_parameter:
+ login_port: 3001
+ login_user: "{{ mongodb_admin_user }}"
+ login_password: "{{ mongodb_admin_password }}"
+ param: syncdelay
+ value: 59
+ param_type: int
+ register: sd_change
+
+- assert:
+ that:
+ - sd_change.before | int == 60
+ - sd_change.after | int == 59
+ - sd_change.changed == True
+
+- name: Set syncdelay to 59 (again) with auth
+ mongodb_parameter:
+ login_port: 3001
+ login_user: "{{ mongodb_admin_user }}"
+ login_password: "{{ mongodb_admin_password }}"
+ param: syncdelay
+ value: 59
+ param_type: int
+ register: sd_change
+
+- assert:
+ that:
+ - sd_change.before | int == 59
+ - sd_change.after | int == 59
+ - sd_change.changed == False
+
+- name: Set authenticationMechanisms to MONGODB-X509 with auth (will fail)
+ mongodb_parameter:
+ login_port: 3001
+ login_user: "{{ mongodb_admin_user }}"
+ login_password: "{{ mongodb_admin_password }}"
+ param: authenticationMechanisms
+ value: "MONGODB-X509"
+ param_type: str
+ register: diag_change
+ ignore_errors: yes
+
+- assert:
+ that:
+ - '"unable to change parameter" in diag_change.msg'
+ - diag_change.failed == True
+
+# Clean up
+- include_tasks: mongod_teardown.yml
diff --git a/test/integration/targets/incidental_mongodb_parameter/tasks/mongod_singlenode.yml b/test/integration/targets/incidental_mongodb_parameter/tasks/mongod_singlenode.yml
new file mode 100644
index 0000000000..291cb1c980
--- /dev/null
+++ b/test/integration/targets/incidental_mongodb_parameter/tasks/mongod_singlenode.yml
@@ -0,0 +1,55 @@
+- name: Set mongodb_user user for redhat
+ set_fact:
+ mongodb_user: "mongod"
+ when: ansible_os_family == "RedHat"
+
+- set_fact:
+ mongodb_nodes:
+ - 3001
+
+- name: Create directories for mongod processes
+ file:
+ path: "{{ remote_tmp_dir }}/mongod{{ item }}"
+ state: directory
+ owner: "{{ mongodb_user }}"
+ group: "{{ mongodb_user }}"
+ mode: 0755
+ recurse: yes
+ with_items: "{{ mongodb_nodes }}"
+
+- name: Ensure {{ remote_tmp_dir }}/config dir exists
+ file:
+ path: "{{ remote_tmp_dir }}/config"
+ state: directory
+ owner: "{{ mongodb_user }}"
+ group: "{{ mongodb_user }}"
+ mode: 0755
+
+- name: Create keyfile
+ copy:
+ dest: "{{ remote_tmp_dir }}/my.key"
+ content: |
+ fd2CUrbXBJpB4rt74A6F
+ owner: "{{ mongodb_user }}"
+ group: "{{ mongodb_user }}"
+ mode: 0600
+ when: mongod_auth == True
+
+- name: Spawn mongod process without auth
+ command: mongod --shardsvr --smallfiles {{ mongod_storage_engine_opts }} --dbpath mongod{{ item }} --port {{ item }} --logpath mongod{{ item }}/log.log --fork
+ args:
+ chdir: "{{ remote_tmp_dir }}"
+ with_items: "{{ mongodb_nodes | sort }}"
+ when: mongod_auth == False
+
+- name: Spawn mongod process with auth
+ command: mongod --shardsvr --smallfiles {{ mongod_storage_engine_opts }} --dbpath mongod{{ item }} --port {{ item }} --logpath mongod{{ item }}/log.log --fork --auth --keyFile my.key
+ args:
+ chdir: "{{ remote_tmp_dir }}"
+ with_items: "{{ mongodb_nodes | sort }}"
+ when: mongod_auth == True
+
+- name: Wait for mongod to start responding
+ wait_for:
+ port: "{{ item }}"
+ with_items: "{{ mongodb_nodes }}"
diff --git a/test/integration/targets/incidental_mongodb_parameter/tasks/mongod_teardown.yml b/test/integration/targets/incidental_mongodb_parameter/tasks/mongod_teardown.yml
new file mode 100644
index 0000000000..a904a718b2
--- /dev/null
+++ b/test/integration/targets/incidental_mongodb_parameter/tasks/mongod_teardown.yml
@@ -0,0 +1,25 @@
+- name: Kill all mongod processes
+ command: pkill -{{ kill_signal }} mongod
+ ignore_errors: true
+
+- name: Getting pids for mongod
+ pids:
+ name: mongod
+ register: pids_of_mongod
+
+- name: Wait for all mongod processes to exit
+ wait_for:
+ path: "/proc/{{ item }}/status"
+ state: absent
+ delay: 1
+ with_items: "{{ pids_of_mongod }}"
+
+- name: Remove all mongod folders
+ file:
+ path: "{{ remote_tmp_dir }}/{{ item }}"
+ state: absent
+ with_items:
+ - mongod3001
+
+- name: Remove all mongod sock files
+ shell: rm -Rf /tmp/mongodb*.sock
diff --git a/test/integration/targets/incidental_postgresql_user/aliases b/test/integration/targets/incidental_postgresql_user/aliases
new file mode 100644
index 0000000000..78b47900ab
--- /dev/null
+++ b/test/integration/targets/incidental_postgresql_user/aliases
@@ -0,0 +1,4 @@
+destructive
+shippable/posix/incidental
+skip/aix
+skip/osx
diff --git a/test/integration/targets/incidental_postgresql_user/defaults/main.yml b/test/integration/targets/incidental_postgresql_user/defaults/main.yml
new file mode 100644
index 0000000000..bc9ef19b93
--- /dev/null
+++ b/test/integration/targets/incidental_postgresql_user/defaults/main.yml
@@ -0,0 +1,3 @@
+db_name: 'ansible_db'
+db_user1: 'ansible_db_user1'
+db_user2: 'ansible_db_user2'
diff --git a/test/integration/targets/incidental_postgresql_user/meta/main.yml b/test/integration/targets/incidental_postgresql_user/meta/main.yml
new file mode 100644
index 0000000000..c2a0d561d1
--- /dev/null
+++ b/test/integration/targets/incidental_postgresql_user/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - incidental_setup_postgresql_db
diff --git a/test/integration/targets/incidental_postgresql_user/tasks/main.yml b/test/integration/targets/incidental_postgresql_user/tasks/main.yml
new file mode 100644
index 0000000000..d59ae63502
--- /dev/null
+++ b/test/integration/targets/incidental_postgresql_user/tasks/main.yml
@@ -0,0 +1,7 @@
+# Initial CI tests of postgresql_user module
+- import_tasks: postgresql_user_initial.yml
+ when: postgres_version_resp.stdout is version('9.4', '>=')
+
+# General tests:
+- import_tasks: postgresql_user_general.yml
+ when: postgres_version_resp.stdout is version('9.4', '>=')
diff --git a/test/integration/targets/incidental_postgresql_user/tasks/postgresql_user_general.yml b/test/integration/targets/incidental_postgresql_user/tasks/postgresql_user_general.yml
new file mode 100644
index 0000000000..963f58ac1a
--- /dev/null
+++ b/test/integration/targets/incidental_postgresql_user/tasks/postgresql_user_general.yml
@@ -0,0 +1,741 @@
+# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# Integration tests for postgresql_user module.
+
+- vars:
+ test_user: hello.user.with.dots
+ test_user2: hello
+ test_group1: group1
+ test_group2: group2
+ test_table: test
+ test_comment1: 'comment1'
+ test_comment2: 'comment2'
+ task_parameters: &task_parameters
+ become_user: '{{ pg_user }}'
+ become: yes
+ register: result
+ pg_parameters: &pg_parameters
+ login_user: '{{ pg_user }}'
+ login_db: postgres
+
+ block:
+ #
+ # Common tests
+ #
+ - name: Create role in check_mode
+ <<: *task_parameters
+ check_mode: yes
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user }}'
+
+ - assert:
+ that:
+ - result is changed
+ - result.user == '{{ test_user }}'
+
+ - name: check that the user doesn't exist
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: "SELECT rolname FROM pg_roles WHERE rolname = '{{ test_user }}'"
+
+ - assert:
+ that:
+ - result.rowcount == 0
+
+ - name: Create role in actual mode
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user }}'
+
+ - assert:
+ that:
+ - result is changed
+ - result.user == '{{ test_user }}'
+
+ - name: check that the user exists
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: "SELECT rolname FROM pg_roles WHERE rolname = '{{ test_user }}'"
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ - name: Add a comment on the user
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user }}'
+ comment: '{{ test_comment1 }}'
+
+ - assert:
+ that:
+ - result is changed
+ - result.queries == ["COMMENT ON ROLE \"{{ test_user }}\" IS '{{ test_comment1 }}'"]
+
+ - name: check the comment
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: >
+ SELECT pg_catalog.shobj_description(r.oid, 'pg_authid') AS comment
+ FROM pg_catalog.pg_roles r WHERE r.rolname = '{{ test_user }}'
+
+ - assert:
+ that:
+ - result.rowcount == 1
+ - result.query_result[0].comment == '{{ test_comment1 }}'
+
+ - name: Try to add the same comment on the user
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user }}'
+ comment: '{{ test_comment1 }}'
+
+ - assert:
+ that:
+ - result is not changed
+
+ - name: Try to add another comment on the user
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user }}'
+ comment: '{{ test_comment2 }}'
+
+ - assert:
+ that:
+ - result is changed
+ - result.queries == ["COMMENT ON ROLE \"{{ test_user }}\" IS '{{ test_comment2 }}'"]
+
+ - name: check the comment
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: >
+ SELECT pg_catalog.shobj_description(r.oid, 'pg_authid') AS comment
+ FROM pg_catalog.pg_roles r WHERE r.rolname = '{{ test_user }}'
+
+ - assert:
+ that:
+ - result.rowcount == 1
+ - result.query_result[0].comment == '{{ test_comment2 }}'
+
+ - name: Try to create role again in check_mode
+ <<: *task_parameters
+ check_mode: yes
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user }}'
+
+ - assert:
+ that:
+ - result is not changed
+ - result.user == '{{ test_user }}'
+
+ - name: check that the user exists
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: "SELECT rolname FROM pg_roles WHERE rolname = '{{ test_user }}'"
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ - name: Try to create role again
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user }}'
+
+ - assert:
+ that:
+ - result is not changed
+ - result.user == '{{ test_user }}'
+
+ - name: check that the user exists
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: "SELECT rolname FROM pg_roles WHERE rolname = '{{ test_user }}'"
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ - name: Drop role in check_mode
+ <<: *task_parameters
+ check_mode: yes
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user }}'
+ state: absent
+
+ - assert:
+ that:
+ - result is changed
+ - result.user == '{{ test_user }}'
+
+ - name: check that the user actually exists
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: "SELECT rolname FROM pg_roles WHERE rolname = '{{ test_user }}'"
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ - name: Drop role in actual mode
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user }}'
+ state: absent
+
+ - assert:
+ that:
+ - result is changed
+ - result.user == '{{ test_user }}'
+
+ - name: check that the user doesn't exist
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: "SELECT rolname FROM pg_roles WHERE rolname = '{{ test_user }}'"
+
+ - assert:
+ that:
+ - result.rowcount == 0
+
+ - name: Try to drop role in check mode again
+ <<: *task_parameters
+ check_mode: yes
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user }}'
+ state: absent
+
+ - assert:
+ that:
+ - result is not changed
+ - result.user == '{{ test_user }}'
+
+ - name: Try to drop role in actual mode again
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user }}'
+ state: absent
+
+ - assert:
+ that:
+ - result is not changed
+ - result.user == '{{ test_user }}'
+
+ #
+ # password, no_password_changes, encrypted, expires parameters
+ #
+
+ - name: Create role with password, passed as hashed md5
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user }}'
+ password: md59543f1d82624df2b31672ec0f7050460
+
+ - assert:
+ that:
+ - result is changed
+ - result.user == '{{ test_user }}'
+
+ - name: Check that the user exist with a proper password
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: "SELECT rolname FROM pg_authid WHERE rolname = '{{ test_user }}' and rolpassword = 'md59543f1d82624df2b31672ec0f7050460'"
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ - name: Test no_password_changes
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user }}'
+ password: u123
+ no_password_changes: yes
+
+ - assert:
+ that:
+ - result is not changed
+ - result.user == '{{ test_user }}'
+
+
+ - name: Check that nothing changed
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: "SELECT rolname FROM pg_authid WHERE rolname = '{{ test_user }}' and rolpassword = 'md59543f1d82624df2b31672ec0f7050460'"
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ # Storing unencrypted passwords is not available from PostgreSQL 10
+ - name: Change password, passed as unencrypted
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user }}'
+ password: myunencryptedpass
+ encrypted: no
+ when: postgres_version_resp.stdout is version('10', '<')
+
+ - assert:
+ that:
+ - result is changed
+ - result.user == '{{ test_user }}'
+ when: postgres_version_resp.stdout is version('10', '<')
+
+ - name: Check that the user exist with the unencrypted password
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: "SELECT rolname FROM pg_authid WHERE rolname = '{{ test_user }}' and rolpassword = 'myunencryptedpass'"
+ when: postgres_version_resp.stdout is version('10', '<')
+
+ - assert:
+ that:
+ - result.rowcount == 1
+ when: postgres_version_resp.stdout is version('10', '<')
+
+ - name: Change password, explicit encrypted=yes
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user }}'
+ password: myunencryptedpass
+ encrypted: yes
+
+ - assert:
+ that:
+ - result is changed
+ - result.user == '{{ test_user }}'
+
+ - name: Check that the user exist with encrypted password
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: "SELECT rolname FROM pg_authid WHERE rolname = '{{ test_user }}' and rolpassword != 'myunencryptedpass'"
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ - name: Change rolvaliduntil attribute
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user }}'
+ expires: 'Jan 31 2020'
+
+ - assert:
+ that:
+ - result is changed
+ - result.user == '{{ test_user }}'
+
+ - name: Check the prev step
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: >
+ SELECT rolname FROM pg_authid WHERE rolname = '{{ test_user }}'
+ AND rolvaliduntil::text like '2020-01-31%'
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ - name: Try to set the same rolvaliduntil value again
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user }}'
+ expires: 'Jan 31 2020'
+
+ - assert:
+ that:
+ - result is not changed
+ - result.user == '{{ test_user }}'
+
+ - name: Check that nothing changed
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: >
+ SELECT rolname FROM pg_authid WHERE rolname = '{{ test_user }}'
+ AND rolvaliduntil::text like '2020-01-31%'
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ #
+ # role_attr_flags
+ #
+ - name: Set role attributes
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user }}'
+ role_attr_flags: CREATEROLE,CREATEDB
+
+ - assert:
+ that:
+ - result is changed
+ - result.user == '{{ test_user }}'
+
+ - name: Check the prev step
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: >
+ SELECT rolname FROM pg_authid WHERE rolname = '{{ test_user }}'
+ AND rolcreaterole = 't' and rolcreatedb = 't'
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ - name: Set the same role attributes again
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user }}'
+ role_attr_flags: CREATEROLE,CREATEDB
+
+ - assert:
+ that:
+ - result is not changed
+ - result.user == '{{ test_user }}'
+
+ - name: Check the prev step
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: >
+ SELECT rolname FROM pg_authid WHERE rolname = '{{ test_user }}'
+ AND rolcreaterole = 't' and rolcreatedb = 't'
+
+ - name: Set role attributes
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user }}'
+ role_attr_flags: NOCREATEROLE,NOCREATEDB
+
+ - assert:
+ that:
+ - result is changed
+ - result.user == '{{ test_user }}'
+
+ - name: Check the prev step
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: >
+ SELECT rolname FROM pg_authid WHERE rolname = '{{ test_user }}'
+ AND rolcreaterole = 'f' and rolcreatedb = 'f'
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ - name: Set role attributes
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user }}'
+ role_attr_flags: NOCREATEROLE,NOCREATEDB
+
+ - assert:
+ that:
+ - result is not changed
+ - result.user == '{{ test_user }}'
+
+ - name: Check the prev step
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: >
+ SELECT rolname FROM pg_authid WHERE rolname = '{{ test_user }}'
+ AND rolcreaterole = 'f' and rolcreatedb = 'f'
+
+ #
+ # priv
+ #
+ - name: Create test table
+ <<: *task_parameters
+ postgresql_table:
+ <<: *pg_parameters
+ name: '{{ test_table }}'
+ columns:
+ - id int
+
+ - name: Insert data to test table
+ <<: *task_parameters
+ postgresql_query:
+ query: "INSERT INTO {{ test_table }} (id) VALUES ('1')"
+ <<: *pg_parameters
+
+ - name: Check that test_user is not allowed to read the data
+ <<: *task_parameters
+ postgresql_query:
+ db: postgres
+ login_user: '{{ pg_user }}'
+ session_role: '{{ test_user }}'
+ query: 'SELECT * FROM {{ test_table }}'
+ ignore_errors: yes
+
+ - assert:
+ that:
+ - result is failed
+ - "'permission denied' in result.msg"
+
+ - name: Grant privileges
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user }}'
+ priv: '{{ test_table }}:SELECT'
+
+ - assert:
+ that:
+ - result is changed
+
+ - name: Check that test_user is allowed to read the data
+ <<: *task_parameters
+ postgresql_query:
+ db: postgres
+ login_user: '{{ pg_user }}'
+ session_role: '{{ test_user }}'
+ query: 'SELECT * FROM {{ test_table }}'
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ - name: Grant the same privileges again
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user }}'
+ priv: '{{ test_table }}:SELECT'
+
+ - assert:
+ that:
+ - result is not changed
+
+ - name: Remove test table
+ <<: *task_parameters
+ postgresql_table:
+ <<: *pg_parameters
+ name: '{{ test_table }}'
+ state: absent
+
+ #
+ # fail_on_user
+ #
+ - name: Create role for test
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user2 }}'
+
+ - name: Create test table, set owner as test_user
+ <<: *task_parameters
+ postgresql_table:
+ <<: *pg_parameters
+ name: '{{ test_table }}'
+ owner: '{{ test_user2 }}'
+
+ - name: Test fail_on_user
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user2 }}'
+ state: absent
+ ignore_errors: yes
+
+ - assert:
+ that:
+ - result is failed
+ - result.msg == 'Unable to remove user'
+
+ - name: Test fail_on_user
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user }}'
+ fail_on_user: no
+
+ - assert:
+ that:
+ - result is not changed
+
+ #
+ # Test groups parameter
+ #
+ - name: Create test group
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_group2 }}'
+ role_attr_flags: NOLOGIN
+
+ - name: Create role test_group1 and grant test_group2 to test_group1 in check_mode
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_group1 }}'
+ groups: '{{ test_group2 }}'
+ role_attr_flags: NOLOGIN
+ check_mode: yes
+
+ - assert:
+ that:
+ - result is changed
+ - result.user == '{{ test_group1 }}'
+ - result.queries == ['CREATE USER "{{ test_group1 }}" NOLOGIN', 'GRANT "{{ test_group2 }}" TO "{{ test_group1 }}"']
+
+ - name: check that the user doesn't exist
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: "SELECT rolname FROM pg_roles WHERE rolname = '{{ test_group1 }}'"
+
+ - assert:
+ that:
+ - result.rowcount == 0
+
+ - name: check membership
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: "SELECT grolist FROM pg_group WHERE groname = '{{ test_group2 }}' AND grolist != '{}'"
+
+ - assert:
+ that:
+ - result.rowcount == 0
+
+ - name: Create role test_group1 and grant test_group2 to test_group1
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_group1 }}'
+ groups: '{{ test_group2 }}'
+ role_attr_flags: NOLOGIN
+
+ - assert:
+ that:
+ - result is changed
+ - result.user == '{{ test_group1 }}'
+ - result.queries == ['CREATE USER "{{ test_group1 }}" NOLOGIN', 'GRANT "{{ test_group2 }}" TO "{{ test_group1 }}"']
+
+ - name: check that the user exists
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: "SELECT rolname FROM pg_roles WHERE rolname = '{{ test_group1 }}'"
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ - name: check membership
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: "SELECT grolist FROM pg_group WHERE groname = '{{ test_group2 }}' AND grolist != '{}'"
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ - name: Grant test_group2 to test_group1 again
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_group1 }}'
+ groups: '{{ test_group2 }}'
+
+ - assert:
+ that:
+ - result is not changed
+ - result.user == '{{ test_group1 }}'
+
+ - name: check membership
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: "SELECT grolist FROM pg_group WHERE groname = '{{ test_group2 }}' AND grolist != '{}'"
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ - name: Grant groups to existent role
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user }}'
+ groups:
+ - '{{ test_group1 }}'
+ - '{{ test_group2 }}'
+
+ - assert:
+ that:
+ - result is changed
+ - result.user == '{{ test_user }}'
+ - result.queries == ['GRANT "{{ test_group1 }}" TO "{{ test_user }}"', 'GRANT "{{ test_group2 }}" TO "{{ test_user }}"']
+
+ - name: check membership
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: "SELECT * FROM pg_group WHERE groname in ('{{ test_group1 }}', '{{ test_group2 }}') AND grolist != '{}'"
+
+ - assert:
+ that:
+ - result.rowcount == 2
+
+ always:
+ #
+ # Clean up
+ #
+ - name: Drop test table
+ <<: *task_parameters
+ postgresql_table:
+ <<: *pg_parameters
+ name: '{{ test_table }}'
+ state: absent
+
+ - name: Drop test user
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ item }}'
+ state: absent
+ loop:
+ - '{{ test_user }}'
+ - '{{ test_user2 }}'
+ - '{{ test_group1 }}'
+ - '{{ test_group2 }}'
diff --git a/test/integration/targets/incidental_postgresql_user/tasks/postgresql_user_initial.yml b/test/integration/targets/incidental_postgresql_user/tasks/postgresql_user_initial.yml
new file mode 100644
index 0000000000..ccd42847c6
--- /dev/null
+++ b/test/integration/targets/incidental_postgresql_user/tasks/postgresql_user_initial.yml
@@ -0,0 +1,153 @@
+#
+# Create and destroy user, test 'password' and 'encrypted' parameters
+#
+# unencrypted values are not supported on newer versions
+# do not run the encrypted: no tests if on 10+
+- set_fact:
+ encryption_values:
+ - 'yes'
+
+- set_fact:
+ encryption_values: '{{ encryption_values + ["no"]}}'
+ when: postgres_version_resp.stdout is version('10', '<=')
+
+- include_tasks: test_password.yml
+ vars:
+ encrypted: '{{ loop_item }}'
+ db_password1: 'secretù' # use UTF-8
+ loop: '{{ encryption_values }}'
+ loop_control:
+ loop_var: loop_item
+
+# BYPASSRLS role attribute was introduced in PostgreSQL 9.5, so
+# we want to test attribute management differently depending
+# on the version.
+- set_fact:
+ bypassrls_supported: "{{ postgres_version_resp.stdout is version('9.5.0', '>=') }}"
+
+# test 'no_password_change' and 'role_attr_flags' parameters
+- include_tasks: test_no_password_change.yml
+ vars:
+ no_password_changes: '{{ loop_item }}'
+ loop:
+ - 'yes'
+ - 'no'
+ loop_control:
+ loop_var: loop_item
+
+### TODO: fail_on_user
+
+#
+# Test login_user functionality
+#
+- name: Create a user to test login module parameters
+ become: yes
+ become_user: "{{ pg_user }}"
+ postgresql_user:
+ name: "{{ db_user1 }}"
+ state: "present"
+ encrypted: 'yes'
+ password: "password"
+ role_attr_flags: "CREATEDB,LOGIN,CREATEROLE"
+ login_user: "{{ pg_user }}"
+ db: postgres
+
+- name: Create db
+ postgresql_db:
+ name: "{{ db_name }}"
+ state: "present"
+ login_user: "{{ db_user1 }}"
+ login_password: "password"
+ login_host: "localhost"
+
+- name: Check that database created
+ become: yes
+ become_user: "{{ pg_user }}"
+ shell: echo "select datname from pg_database where datname = '{{ db_name }}';" | psql -d postgres
+ register: result
+
+- assert:
+ that:
+ - "result.stdout_lines[-1] == '(1 row)'"
+
+- name: Create a user
+ postgresql_user:
+ name: "{{ db_user2 }}"
+ state: "present"
+ encrypted: 'yes'
+ password: "md55c8ccfd9d6711fc69a7eae647fc54f51"
+ db: "{{ db_name }}"
+ login_user: "{{ db_user1 }}"
+ login_password: "password"
+ login_host: "localhost"
+
+- name: Check that it was created
+ become: yes
+ become_user: "{{ pg_user }}"
+ shell: echo "select * from pg_user where usename='{{ db_user2 }}';" | psql -d postgres
+ register: result
+
+- assert:
+ that:
+ - "result.stdout_lines[-1] == '(1 row)'"
+
+- name: Grant database privileges
+ postgresql_privs:
+ type: "database"
+ state: "present"
+ roles: "{{ db_user2 }}"
+ privs: "CREATE,connect"
+ objs: "{{ db_name }}"
+ db: "{{ db_name }}"
+ login: "{{ db_user1 }}"
+ password: "password"
+ host: "localhost"
+
+- name: Check that the user has the requested permissions (database)
+ become: yes
+ become_user: "{{ pg_user }}"
+ shell: echo "select datacl from pg_database where datname='{{ db_name }}';" | psql {{ db_name }}
+ register: result_database
+
+- assert:
+ that:
+ - "result_database.stdout_lines[-1] == '(1 row)'"
+ - "db_user2 ~ '=Cc' in result_database.stdout"
+
+- name: Remove user
+ postgresql_user:
+ name: "{{ db_user2 }}"
+ state: 'absent'
+ priv: "ALL"
+ db: "{{ db_name }}"
+ login_user: "{{ db_user1 }}"
+ login_password: "password"
+ login_host: "localhost"
+
+- name: Check that they were removed
+ become: yes
+ become_user: "{{ pg_user }}"
+ shell: echo "select * from pg_user where usename='{{ db_user2 }}';" | psql -d postgres
+ register: result
+
+- assert:
+ that:
+ - "result.stdout_lines[-1] == '(0 rows)'"
+
+- name: Destroy DB
+ postgresql_db:
+ state: absent
+ name: "{{ db_name }}"
+ login_user: "{{ db_user1 }}"
+ login_password: "password"
+ login_host: "localhost"
+
+- name: Check that database was destroyed
+ become: yes
+ become_user: "{{ pg_user }}"
+ shell: echo "select datname from pg_database where datname = '{{ db_name }}';" | psql -d postgres
+ register: result
+
+- assert:
+ that:
+ - "result.stdout_lines[-1] == '(0 rows)'"
diff --git a/test/integration/targets/incidental_postgresql_user/tasks/test_no_password_change.yml b/test/integration/targets/incidental_postgresql_user/tasks/test_no_password_change.yml
new file mode 100644
index 0000000000..c296c0ea77
--- /dev/null
+++ b/test/integration/targets/incidental_postgresql_user/tasks/test_no_password_change.yml
@@ -0,0 +1,167 @@
+- vars:
+ task_parameters: &task_parameters
+ become_user: "{{ pg_user }}"
+ become: yes
+ register: result
+ postgresql_parameters: &parameters
+ db: postgres
+ name: "{{ db_user1 }}"
+ login_user: "{{ pg_user }}"
+
+ block:
+
+ - name: Create a user with all role attributes
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ state: "present"
+ role_attr_flags: "SUPERUSER,CREATEROLE,CREATEDB,INHERIT,LOGIN{{ bypassrls_supported | ternary(',BYPASSRLS', '') }}"
+ no_password_changes: '{{ no_password_changes }}' # no_password_changes is ignored when user doesn't already exist
+
+ - name: Check that the user has the requested role attributes
+ <<: *task_parameters
+ shell: "echo \"select 'super:'||rolsuper, 'createrole:'||rolcreaterole, 'create:'||rolcreatedb, 'inherit:'||rolinherit, 'login:'||rolcanlogin {{ bypassrls_supported | ternary(\", 'bypassrls:'||rolbypassrls\", '') }} from pg_roles where rolname='{{ db_user1 }}';\" | psql -d postgres"
+
+ - assert:
+ that:
+ - "result.stdout_lines[-1] == '(1 row)'"
+ - "'super:t' in result.stdout_lines[-2]"
+ - "'createrole:t' in result.stdout_lines[-2]"
+ - "'create:t' in result.stdout_lines[-2]"
+ - "'inherit:t' in result.stdout_lines[-2]"
+ - "'login:t' in result.stdout_lines[-2]"
+
+ - block:
+ - name: Check that the user has the requested role attribute BYPASSRLS
+ <<: *task_parameters
+ shell: "echo \"select 'bypassrls:'||rolbypassrls from pg_roles where rolname='{{ db_user1 }}';\" | psql -d postgres"
+
+ - assert:
+ that:
+ - "not bypassrls_supported or 'bypassrls:t' in result.stdout_lines[-2]"
+ when: bypassrls_supported
+
+ - name: Modify a user to have no role attributes
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ state: "present"
+ role_attr_flags: "NOSUPERUSER,NOCREATEROLE,NOCREATEDB,noinherit,NOLOGIN{{ bypassrls_supported | ternary(',NOBYPASSRLS', '') }}"
+ no_password_changes: '{{ no_password_changes }}'
+
+ - name: Check that ansible reports it modified the role
+ assert:
+ that:
+ - result is changed
+
+ - name: "Check that the user doesn't have any attribute"
+ <<: *task_parameters
+ shell: "echo \"select 'super:'||rolsuper, 'createrole:'||rolcreaterole, 'create:'||rolcreatedb, 'inherit:'||rolinherit, 'login:'||rolcanlogin from pg_roles where rolname='{{ db_user1 }}';\" | psql -d postgres"
+
+ - assert:
+ that:
+ - "result.stdout_lines[-1] == '(1 row)'"
+ - "'super:f' in result.stdout_lines[-2]"
+ - "'createrole:f' in result.stdout_lines[-2]"
+ - "'create:f' in result.stdout_lines[-2]"
+ - "'inherit:f' in result.stdout_lines[-2]"
+ - "'login:f' in result.stdout_lines[-2]"
+
+ - block:
+ - name: Check that the user has the requested role attribute BYPASSRLS
+ <<: *task_parameters
+ shell: "echo \"select 'bypassrls:'||rolbypassrls from pg_roles where rolname='{{ db_user1 }}';\" | psql -d postgres"
+
+ - assert:
+ that:
+ - "not bypassrls_supported or 'bypassrls:f' in result.stdout_lines[-2]"
+ when: bypassrls_supported
+
+ - name: Try to add an invalid attribute
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ state: "present"
+ role_attr_flags: "NOSUPERUSER,NOCREATEROLE,NOCREATEDB,noinherit,NOLOGIN{{ bypassrls_supported | ternary(',NOBYPASSRLS', '') }},INVALID"
+ no_password_changes: '{{ no_password_changes }}'
+ ignore_errors: yes
+
+ - name: Check that ansible reports failure
+ assert:
+ that:
+ - result is not changed
+ - result is failed
+ - "result.msg == 'Invalid role_attr_flags specified: INVALID'"
+
+ - name: Modify a single role attribute on a user
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ state: "present"
+ role_attr_flags: "LOGIN"
+ no_password_changes: '{{ no_password_changes }}'
+
+ - name: Check that ansible reports it modified the role
+ assert:
+ that:
+ - result is changed
+
+ - name: Check the role attributes
+ <<: *task_parameters
+ shell: echo "select 'super:'||rolsuper, 'createrole:'||rolcreaterole, 'create:'||rolcreatedb, 'inherit:'||rolinherit, 'login:'||rolcanlogin from pg_roles where rolname='{{ db_user1 }}';" | psql -d postgres
+
+ - assert:
+ that:
+ - "result.stdout_lines[-1] == '(1 row)'"
+ - "'super:f' in result.stdout_lines[-2]"
+ - "'createrole:f' in result.stdout_lines[-2]"
+ - "'create:f' in result.stdout_lines[-2]"
+ - "'inherit:f' in result.stdout_lines[-2]"
+ - "'login:t' in result.stdout_lines[-2]"
+
+ - block:
+ - name: Check the role attribute BYPASSRLS
+ <<: *task_parameters
+ shell: echo "select 'bypassrls:'||rolbypassrls from pg_roles where rolname='{{ db_user1 }}';" | psql -d postgres
+
+ - assert:
+ that:
+ - "( postgres_version_resp.stdout is version('9.5.0', '<')) or 'bypassrls:f' in result.stdout_lines[-2]"
+ when: bypassrls_supported
+
+ - name: Check that using same attribute a second time does nothing
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ state: "present"
+ role_attr_flags: "LOGIN"
+ no_password_changes: '{{ no_password_changes }}'
+ environment:
+ PGOPTIONS: '-c default_transaction_read_only=on' # ensure 'alter user' query isn't executed
+
+ - name: Check there isn't any update reported
+ assert:
+ that:
+ - result is not changed
+
+ - name: Cleanup the user
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ state: 'absent'
+ no_password_changes: '{{ no_password_changes }}' # user deletion: no_password_changes is ignored
+
+ - name: Check that user was removed
+ <<: *task_parameters
+ shell: echo "select * from pg_user where usename='{{ db_user1 }}';" | psql -d postgres
+
+ - assert:
+ that:
+ - "result.stdout_lines[-1] == '(0 rows)'"
+
+ always:
+ - name: Cleanup the user
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ state: 'absent'
diff --git a/test/integration/targets/incidental_postgresql_user/tasks/test_password.yml b/test/integration/targets/incidental_postgresql_user/tasks/test_password.yml
new file mode 100644
index 0000000000..be033a5569
--- /dev/null
+++ b/test/integration/targets/incidental_postgresql_user/tasks/test_password.yml
@@ -0,0 +1,336 @@
+- vars:
+ task_parameters: &task_parameters
+ become_user: "{{ pg_user }}"
+ become: yes
+ register: result
+ postgresql_parameters: &parameters
+ db: postgres
+ name: "{{ db_user1 }}"
+ login_user: "{{ pg_user }}"
+
+ block:
+ - name: 'Check that PGOPTIONS environment variable is effective (1/2)'
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ password: '{{ db_password1 }}'
+ ignore_errors: true
+ environment:
+ PGCLIENTENCODING: 'UTF8'
+ PGOPTIONS: '-c default_transaction_read_only=on' # ensure 'alter user' query isn't executed
+
+ - name: 'Check that PGOPTIONS environment variable is effective (2/2)'
+ assert:
+ that:
+ - "{{ result is failed }}"
+
+ - name: 'Create a user (password encrypted: {{ encrypted }})'
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ password: '{{ db_password1 }}'
+ encrypted: '{{ encrypted }}'
+ environment:
+ PGCLIENTENCODING: 'UTF8'
+
+ - block: &changed # block is only used here in order to be able to define YAML anchor
+ - name: Check that ansible reports it was created
+ assert:
+ that:
+ - "{{ result is changed }}"
+
+ - name: Check that it was created
+ <<: *task_parameters
+ shell: echo "select * from pg_user where usename='{{ db_user1 }}';" | psql -d postgres
+
+ - assert:
+ that:
+ - "result.stdout_lines[-1] == '(1 row)'"
+
+ - name: Check that creating user a second time does nothing
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ password: '{{ db_password1 }}'
+ encrypted: '{{ encrypted }}'
+ environment:
+ PGCLIENTENCODING: 'UTF8'
+ PGOPTIONS: '-c default_transaction_read_only=on' # ensure 'alter user' query isn't executed
+
+ - block: &not_changed # block is only used here in order to be able to define YAML anchor
+ - name: Check that ansible reports no change
+ assert:
+ that:
+ - "{{ result is not changed }}"
+
+ - name: 'Define an expiration time'
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ expires: '2025-01-01'
+ environment:
+ PGCLIENTENCODING: 'UTF8'
+
+ - <<: *changed
+
+ - name: 'Redefine the same expiration time'
+ <<: *task_parameters
+ postgresql_user:
+ expires: '2025-01-01'
+ <<: *parameters
+ environment:
+ PGCLIENTENCODING: 'UTF8'
+ PGOPTIONS: '-c default_transaction_read_only=on' # ensure 'alter user' query isn't executed
+
+ - <<: *not_changed
+
+ - block:
+
+ - name: 'Using MD5-hashed password: check that password not changed when using cleartext password'
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ password: '{{ db_password1 }}'
+ encrypted: 'yes'
+ environment:
+ PGCLIENTENCODING: 'UTF8'
+ PGOPTIONS: '-c default_transaction_read_only=on' # ensure 'alter user' query isn't executed
+
+ - <<: *not_changed
+
+ - name: "Using MD5-hashed password: check that password not changed when using md5 hash with 'ENCRYPTED'"
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ password: "md5{{ (db_password1 ~ db_user1) | hash('md5')}}"
+ encrypted: 'yes'
+ environment:
+ PGOPTIONS: '-c default_transaction_read_only=on' # ensure 'alter user' query isn't executed
+
+ - <<: *not_changed
+
+ - name: "Using MD5-hashed password: check that password not changed when using md5 hash with 'UNENCRYPTED'"
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ password: "md5{{ (db_password1 ~ db_user1) | hash('md5')}}"
+ encrypted: 'no'
+ environment:
+ PGOPTIONS: '-c default_transaction_read_only=on' # ensure 'alter user' query isn't executed
+
+ - <<: *not_changed
+
+ - name: 'Redefine the same expiration time and password (encrypted)'
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ encrypted: 'yes'
+ password: "md5{{ (db_password1 ~ db_user1) | hash('md5')}}"
+ expires: '2025-01-01'
+ environment:
+ PGOPTIONS: '-c default_transaction_read_only=on' # ensure 'alter user' query isn't executed
+
+ - <<: *not_changed
+
+ - name: 'Using MD5-hashed password: check that password changed when using another cleartext password'
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ password: 'prefix{{ db_password1 }}'
+ encrypted: 'yes'
+ environment:
+ PGCLIENTENCODING: 'UTF8'
+
+ - <<: *changed
+
+ - name: "Using MD5-hashed password: check that password changed when using another md5 hash with 'ENCRYPTED'"
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ password: "md5{{ ('prefix1' ~ db_password1 ~ db_user1) | hash('md5')}}"
+ encrypted: 'yes'
+
+ - <<: *changed
+
+ - name: "Using MD5-hashed password: check that password changed when using md5 hash with 'UNENCRYPTED'"
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ password: "md5{{ ('prefix2' ~ db_password1 ~ db_user1) | hash('md5')}}"
+ encrypted: 'no'
+ register: change_pass_unencrypted
+ failed_when:
+ - change_pass_unencrypted is failed
+ # newer version of psycopg2 no longer supported unencrypted password, we ignore the error
+ - '"UNENCRYPTED PASSWORD is no longer supported" not in change_pass_unencrypted.msg'
+
+ - <<: *changed
+
+ - name: 'Using MD5-hashed password: check that password changed when clearing the password'
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ password: ''
+ encrypted: 'yes'
+ environment:
+ PGCLIENTENCODING: 'UTF8'
+
+ - <<: *changed
+
+ - name: 'Using MD5-hashed password: check that password not changed when clearing the password again'
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ password: ''
+ encrypted: 'yes'
+ environment:
+ PGCLIENTENCODING: 'UTF8'
+ PGOPTIONS: '-c default_transaction_read_only=on' # ensure 'alter user' query isn't executed
+
+ - <<: *not_changed
+
+ - name: 'Using cleartext password: check that password not changed when clearing the password again'
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ password: ''
+ encrypted: 'no'
+ environment:
+ PGCLIENTENCODING: 'UTF8'
+ PGOPTIONS: '-c default_transaction_read_only=on' # ensure 'alter user' query isn't executed
+
+ - <<: *not_changed
+
+ - name: 'Using MD5-hashed password: check that password changed when using a cleartext password'
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ password: '{{ db_password1 }}'
+ encrypted: 'yes'
+ environment:
+ PGCLIENTENCODING: 'UTF8'
+
+ - <<: *changed
+
+ when: encrypted == 'yes'
+
+ - block:
+
+ - name: 'Using cleartext password: check that password not changed when using cleartext password'
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ password: "{{ db_password1 }}"
+ encrypted: 'no'
+ environment:
+ PGCLIENTENCODING: 'UTF8'
+ PGOPTIONS: '-c default_transaction_read_only=on' # ensure 'alter user' query isn't executed
+
+ - <<: *not_changed
+
+ - name: 'Redefine the same expiration time and password (not encrypted)'
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ password: "{{ db_password1 }}"
+ encrypted: 'no'
+ expires: '2025-01-01'
+ environment:
+ PGCLIENTENCODING: 'UTF8'
+ PGOPTIONS: '-c default_transaction_read_only=on' # ensure 'alter user' query isn't executed
+
+ - <<: *not_changed
+
+ - name: 'Using cleartext password: check that password changed when using another cleartext password'
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ password: "changed{{ db_password1 }}"
+ encrypted: 'no'
+ environment:
+ PGCLIENTENCODING: 'UTF8'
+
+ - <<: *changed
+
+ - name: 'Using cleartext password: check that password changed when clearing the password'
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ password: ''
+ encrypted: 'no'
+ environment:
+ PGCLIENTENCODING: 'UTF8'
+
+ - <<: *changed
+
+ - name: 'Using cleartext password: check that password not changed when clearing the password again'
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ password: ''
+ encrypted: 'no'
+ environment:
+ PGCLIENTENCODING: 'UTF8'
+ PGOPTIONS: '-c default_transaction_read_only=on' # ensure 'alter user' query isn't executed
+
+ - <<: *not_changed
+
+ - name: 'Using MD5-hashed password: check that password not changed when clearing the password again'
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ password: ''
+ encrypted: 'yes'
+ environment:
+ PGCLIENTENCODING: 'UTF8'
+ PGOPTIONS: '-c default_transaction_read_only=on' # ensure 'alter user' query isn't executed
+
+ - <<: *not_changed
+
+ - name: 'Using cleartext password: check that password changed when using cleartext password'
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ password: "{{ db_password1 }}"
+ encrypted: 'no'
+ environment:
+ PGCLIENTENCODING: 'UTF8'
+
+ - <<: *changed
+
+ when: encrypted == 'no'
+
+ - name: Remove user
+ <<: *task_parameters
+ postgresql_user:
+ state: 'absent'
+ <<: *parameters
+
+ - <<: *changed
+
+ - name: Check that they were removed
+ <<: *task_parameters
+ shell: echo "select * from pg_user where usename='{{ db_user1 }}';" | psql -d postgres
+ environment:
+ PGOPTIONS: '-c default_transaction_read_only=on' # ensure 'alter user' query isn't executed
+
+ - assert:
+ that:
+ - "result.stdout_lines[-1] == '(0 rows)'"
+
+ - name: Check that removing user a second time does nothing
+ <<: *task_parameters
+ postgresql_user:
+ state: 'absent'
+ <<: *parameters
+ environment:
+ PGOPTIONS: '-c default_transaction_read_only=on' # ensure 'alter user' query isn't executed
+
+ - <<: *not_changed
+
+ always:
+ - name: Remove user
+ <<: *task_parameters
+ postgresql_user:
+ state: 'absent'
+ <<: *parameters
diff --git a/test/integration/targets/incidental_selinux/aliases b/test/integration/targets/incidental_selinux/aliases
new file mode 100644
index 0000000000..6bda43bced
--- /dev/null
+++ b/test/integration/targets/incidental_selinux/aliases
@@ -0,0 +1,3 @@
+needs/root
+shippable/posix/incidental
+skip/aix
diff --git a/test/integration/targets/incidental_selinux/tasks/main.yml b/test/integration/targets/incidental_selinux/tasks/main.yml
new file mode 100644
index 0000000000..41fdca5220
--- /dev/null
+++ b/test/integration/targets/incidental_selinux/tasks/main.yml
@@ -0,0 +1,36 @@
+# (c) 2017, Sam Doran <sdoran@redhat.com>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+- debug:
+ msg: SELinux is disabled
+ when: ansible_selinux is defined and ansible_selinux == False
+
+- debug:
+ msg: SELinux is {{ ansible_selinux.status }}
+ when: ansible_selinux is defined and ansible_selinux != False
+
+- include: selinux.yml
+ when:
+ - ansible_selinux is defined
+ - ansible_selinux != False
+ - ansible_selinux.status == 'enabled'
+
+- include: selogin.yml
+ when:
+ - ansible_selinux is defined
+ - ansible_selinux != False
+ - ansible_selinux.status == 'enabled'
diff --git a/test/integration/targets/incidental_selinux/tasks/selinux.yml b/test/integration/targets/incidental_selinux/tasks/selinux.yml
new file mode 100644
index 0000000000..7fcba899cf
--- /dev/null
+++ b/test/integration/targets/incidental_selinux/tasks/selinux.yml
@@ -0,0 +1,364 @@
+# (c) 2017, Sam Doran <sdoran@redhat.com>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+
+# First Test
+# ##############################################################################
+# Test changing the state, which requires a reboot
+
+- name: TEST 1 | Get current SELinux config file contents
+ set_fact:
+ selinux_config_original: "{{ lookup('file', '/etc/sysconfig/selinux').split('\n') }}"
+ before_test_sestatus: "{{ ansible_selinux }}"
+
+- debug:
+ var: "{{ item }}"
+ verbosity: 1
+ with_items:
+ - selinux_config_original
+ - before_test_sestatus
+ - ansible_selinux
+
+- name: TEST 1 | Setup SELinux configuration for tests
+ selinux:
+ state: enforcing
+ policy: targeted
+
+- name: TEST 1 | Disable SELinux
+ selinux:
+ state: disabled
+ policy: targeted
+ register: _disable_test1
+
+- debug:
+ var: _disable_test1
+ verbosity: 1
+
+- name: TEST 1 | Re-gather facts
+ setup:
+
+- name: TEST 1 | Assert that status was changed, reboot_required is True, a warning was displayed, and SELinux is configured properly
+ assert:
+ that:
+ - _disable_test1 is changed
+ - _disable_test1.reboot_required
+ - (_disable_test1.warnings | length ) >= 1
+ - ansible_selinux.config_mode == 'disabled'
+ - ansible_selinux.type == 'targeted'
+
+- debug:
+ var: ansible_selinux
+ verbosity: 1
+
+- name: TEST 1 | Disable SELinux again
+ selinux:
+ state: disabled
+ policy: targeted
+ register: _disable_test2
+
+- debug:
+ var: _disable_test2
+ verbosity: 1
+
+- name: TEST 1 | Assert that no change is reported, a warnking was dispalyed, and reboot_required is True
+ assert:
+ that:
+ - _disable_test2 is not changed
+ - (_disable_test1.warnings | length ) >= 1
+ - _disable_test2.reboot_required
+
+- name: TEST 1 | Get modified config file
+ set_fact:
+ selinux_config_after: "{{ lookup('file', '/etc/sysconfig/selinux').split('\n') }}"
+
+- debug:
+ var: selinux_config_after
+ verbosity: 1
+
+- name: TEST 1 | Ensure SELinux config file is properly formatted
+ assert:
+ that:
+ - selinux_config_original | length == selinux_config_after | length
+ - selinux_config_after[selinux_config_after.index('SELINUX=disabled')] is search("^SELINUX=\w+$")
+ - selinux_config_after[selinux_config_after.index('SELINUXTYPE=targeted')] is search("^SELINUXTYPE=\w+$")
+
+- name: TEST 1 | Reset SELinux configuration for next test
+ selinux:
+ state: enforcing
+ policy: targeted
+
+
+# Second Test
+# ##############################################################################
+# Test changing only the policy, which does not require a reboot
+
+- name: TEST 2 | Make sure the policy is present
+ package:
+ name: selinux-policy-mls
+ state: present
+
+- name: TEST 2 | Set SELinux policy
+ selinux:
+ state: enforcing
+ policy: mls
+ register: _state_test1
+
+- debug:
+ var: _state_test1
+ verbosity: 1
+
+- name: TEST 2 | Re-gather facts
+ setup:
+
+- debug:
+ var: ansible_selinux
+ tags: debug
+
+- name: TEST 2 | Assert that status was changed, reboot_required is False, no warnings were displayed, and SELinux is configured properly
+ assert:
+ that:
+ - _state_test1 is changed
+ - not _state_test1.reboot_required
+ - _state_test1.warnings is not defined
+ - ansible_selinux.config_mode == 'enforcing'
+ - ansible_selinux.type == 'mls'
+
+- name: TEST 2 | Set SELinux policy again
+ selinux:
+ state: enforcing
+ policy: mls
+ register: _state_test2
+
+- debug:
+ var: _state_test2
+ verbosity: 1
+
+- name: TEST 2 | Assert that no change was reported, no warnings were dispalyed, and reboot_required is False
+ assert:
+ that:
+ - _state_test2 is not changed
+ - _state_test2.warnings is not defined
+ - not _state_test2.reboot_required
+
+- name: TEST 2 | Get modified config file
+ set_fact:
+ selinux_config_after: "{{ lookup('file', '/etc/sysconfig/selinux').split('\n') }}"
+
+- debug:
+ var: selinux_config_after
+ verbosity: 1
+
+- name: TEST 2 | Ensure SELinux config file is properly formatted
+ assert:
+ that:
+ - selinux_config_original | length == selinux_config_after | length
+ - selinux_config_after[selinux_config_after.index('SELINUX=enforcing')] is search("^SELINUX=\w+$")
+ - selinux_config_after[selinux_config_after.index('SELINUXTYPE=mls')] is search("^SELINUXTYPE=\w+$")
+
+- name: TEST 2 | Reset SELinux configuration for next test
+ selinux:
+ state: enforcing
+ policy: targeted
+
+
+# Third Test
+# ##############################################################################
+# Test changing non-existing policy
+
+- name: TEST 3 | Set SELinux policy
+ selinux:
+ state: enforcing
+ policy: non-existing-selinux-policy
+ register: _state_test1
+ ignore_errors: yes
+
+- debug:
+ var: _state_test1
+ verbosity: 1
+
+- name: TEST 3 | Re-gather facts
+ setup:
+
+- debug:
+ var: ansible_selinux
+ tags: debug
+
+- name: TEST 3 | Assert that status was not changed, the task failed, the msg contains proper information and SELinux was not changed
+ assert:
+ that:
+ - _state_test1 is not changed
+ - _state_test1 is failed
+ - _state_test1.msg == 'Policy non-existing-selinux-policy does not exist in /etc/selinux/'
+ - ansible_selinux.config_mode == 'enforcing'
+ - ansible_selinux.type == 'targeted'
+
+
+# Fourth Test
+# ##############################################################################
+# Test if check mode returns correct changed values and
+# doesn't make any changes
+
+
+- name: TEST 4 | Set SELinux to enforcing
+ selinux:
+ state: enforcing
+ policy: targeted
+ register: _check_mode_test1
+
+- debug:
+ var: _check_mode_test1
+ verbosity: 1
+
+- name: TEST 4 | Set SELinux to enforcing in check mode
+ selinux:
+ state: enforcing
+ policy: targeted
+ register: _check_mode_test1
+ check_mode: yes
+
+- name: TEST 4 | Re-gather facts
+ setup:
+
+- debug:
+ var: ansible_selinux
+ verbosity: 1
+ tags: debug
+
+- name: TEST 4 | Assert that check mode is idempotent
+ assert:
+ that:
+ - _check_mode_test1 is success
+ - not _check_mode_test1.reboot_required
+ - ansible_selinux.config_mode == 'enforcing'
+ - ansible_selinux.type == 'targeted'
+
+- name: TEST 4 | Set SELinux to permissive in check mode
+ selinux:
+ state: permissive
+ policy: targeted
+ register: _check_mode_test2
+ check_mode: yes
+
+- name: TEST 4 | Re-gather facts
+ setup:
+
+- debug:
+ var: ansible_selinux
+ verbosity: 1
+ tags: debug
+
+- name: TEST 4 | Assert that check mode doesn't set state permissive and returns changed
+ assert:
+ that:
+ - _check_mode_test2 is changed
+ - not _check_mode_test2.reboot_required
+ - ansible_selinux.config_mode == 'enforcing'
+ - ansible_selinux.type == 'targeted'
+
+- name: TEST 4 | Disable SELinux in check mode
+ selinux:
+ state: disabled
+ register: _check_mode_test3
+ check_mode: yes
+
+- name: TEST 4 | Re-gather facts
+ setup:
+
+- debug:
+ var: ansible_selinux
+ verbosity: 1
+ tags: debug
+
+- name: TEST 4 | Assert that check mode didn't change anything, status is changed, reboot_required is True, a warning was displayed
+ assert:
+ that:
+ - _check_mode_test3 is changed
+ - _check_mode_test3.reboot_required
+ - (_check_mode_test3.warnings | length ) >= 1
+ - ansible_selinux.config_mode == 'enforcing'
+ - ansible_selinux.type == 'targeted'
+
+- name: TEST 4 | Set SELinux to permissive
+ selinux:
+ state: permissive
+ policy: targeted
+ register: _check_mode_test4
+
+- debug:
+ var: _check_mode_test4
+ verbosity: 1
+
+- name: TEST 4 | Disable SELinux in check mode
+ selinux:
+ state: disabled
+ register: _check_mode_test4
+ check_mode: yes
+
+- name: TEST 4 | Re-gather facts
+ setup:
+
+- debug:
+ var: ansible_selinux
+ verbosity: 1
+ tags: debug
+
+- name: TEST 4 | Assert that check mode didn't change anything, status is changed, reboot_required is True, a warning was displayed
+ assert:
+ that:
+ - _check_mode_test4 is changed
+ - _check_mode_test4.reboot_required
+ - (_check_mode_test3.warnings | length ) >= 1
+ - ansible_selinux.config_mode == 'permissive'
+ - ansible_selinux.type == 'targeted'
+
+- name: TEST 4 | Set SELinux to enforcing
+ selinux:
+ state: enforcing
+ policy: targeted
+ register: _check_mode_test5
+
+- debug:
+ var: _check_mode_test5
+ verbosity: 1
+
+- name: TEST 4 | Disable SELinux
+ selinux:
+ state: disabled
+ register: _check_mode_test5
+
+- name: TEST 4 | Disable SELinux in check mode
+ selinux:
+ state: disabled
+ register: _check_mode_test5
+ check_mode: yes
+
+- name: TEST 4 | Re-gather facts
+ setup:
+
+- debug:
+ var: ansible_selinux
+ verbosity: 1
+ tags: debug
+
+- name: TEST 4 | Assert that in check mode status was not changed, reboot_required is True, a warning was displayed, and SELinux is configured properly
+ assert:
+ that:
+ - _check_mode_test5 is success
+ - _check_mode_test5.reboot_required
+ - (_check_mode_test5.warnings | length ) >= 1
+ - ansible_selinux.config_mode == 'disabled'
+ - ansible_selinux.type == 'targeted'
diff --git a/test/integration/targets/incidental_selinux/tasks/selogin.yml b/test/integration/targets/incidental_selinux/tasks/selogin.yml
new file mode 100644
index 0000000000..a2c820ff38
--- /dev/null
+++ b/test/integration/targets/incidental_selinux/tasks/selogin.yml
@@ -0,0 +1,81 @@
+---
+
+- name: create user for testing
+ user:
+ name: seuser
+
+- name: attempt to add mapping without 'seuser'
+ selogin:
+ login: seuser
+ register: selogin_error
+ ignore_errors: yes
+
+- name: verify failure
+ assert:
+ that:
+ - selogin_error is failed
+
+- name: map login to SELinux user
+ selogin:
+ login: seuser
+ seuser: staff_u
+ register: selogin_new_mapping
+ check_mode: "{{ item }}"
+ with_items:
+ - yes
+ - no
+ - yes
+ - no
+
+- name: new mapping- verify functionality and check_mode
+ assert:
+ that:
+ - selogin_new_mapping.results[0] is changed
+ - selogin_new_mapping.results[1] is changed
+ - selogin_new_mapping.results[2] is not changed
+ - selogin_new_mapping.results[3] is not changed
+
+- name: change SELinux user login mapping
+ selogin:
+ login: seuser
+ seuser: user_u
+ register: selogin_mod_mapping
+ check_mode: "{{ item }}"
+ with_items:
+ - yes
+ - no
+ - yes
+ - no
+
+- name: changed mapping- verify functionality and check_mode
+ assert:
+ that:
+ - selogin_mod_mapping.results[0] is changed
+ - selogin_mod_mapping.results[1] is changed
+ - selogin_mod_mapping.results[2] is not changed
+ - selogin_mod_mapping.results[3] is not changed
+
+- name: remove SELinux user mapping
+ selogin:
+ login: seuser
+ state: absent
+ register: selogin_del_mapping
+ check_mode: "{{ item }}"
+ with_items:
+ - yes
+ - no
+ - yes
+ - no
+
+- name: delete mapping- verify functionality and check_mode
+ assert:
+ that:
+ - selogin_del_mapping.results[0] is changed
+ - selogin_del_mapping.results[1] is changed
+ - selogin_del_mapping.results[2] is not changed
+ - selogin_del_mapping.results[3] is not changed
+
+- name: remove test user
+ user:
+ name: seuser
+ state: absent
diff --git a/test/integration/targets/incidental_setup_docker/aliases b/test/integration/targets/incidental_setup_docker/aliases
new file mode 100644
index 0000000000..d466c39c20
--- /dev/null
+++ b/test/integration/targets/incidental_setup_docker/aliases
@@ -0,0 +1,2 @@
+needs/target/setup_epel
+hidden
diff --git a/test/integration/targets/incidental_setup_docker/defaults/main.yml b/test/integration/targets/incidental_setup_docker/defaults/main.yml
new file mode 100644
index 0000000000..f7be73dc0e
--- /dev/null
+++ b/test/integration/targets/incidental_setup_docker/defaults/main.yml
@@ -0,0 +1,16 @@
+docker_cli_version: '0.0'
+docker_api_version: '0.0'
+docker_py_version: '0.0'
+docker_skip_cleanup: no
+docker_prereq_packages: []
+docker_packages:
+ - docker-ce
+
+docker_pip_extra_packages: []
+docker_pip_packages:
+ - docker
+
+docker_cleanup_packages:
+ - docker
+ - docker-ce
+ - docker-ce-cli
diff --git a/test/integration/targets/incidental_setup_docker/handlers/main.yml b/test/integration/targets/incidental_setup_docker/handlers/main.yml
new file mode 100644
index 0000000000..93d9657cbb
--- /dev/null
+++ b/test/integration/targets/incidental_setup_docker/handlers/main.yml
@@ -0,0 +1,14 @@
+- name: remove pip packages
+ pip:
+ state: present
+ name: "{{ docker_pip_packages | union(docker_pip_extra_packages) }}"
+ listen: cleanup docker
+ when: not docker_skip_cleanup | bool
+
+- name: remove docker pagkages
+ action: "{{ ansible_facts.pkg_mgr }}"
+ args:
+ name: "{{ docker_cleanup_packages }}"
+ state: absent
+ listen: cleanup docker
+ when: not docker_skip_cleanup | bool
diff --git a/test/integration/targets/incidental_setup_docker/meta/main.yml b/test/integration/targets/incidental_setup_docker/meta/main.yml
new file mode 100644
index 0000000000..91a63627f6
--- /dev/null
+++ b/test/integration/targets/incidental_setup_docker/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - setup_remote_constraints
diff --git a/test/integration/targets/incidental_setup_docker/tasks/Debian.yml b/test/integration/targets/incidental_setup_docker/tasks/Debian.yml
new file mode 100644
index 0000000000..0ea2cb4be2
--- /dev/null
+++ b/test/integration/targets/incidental_setup_docker/tasks/Debian.yml
@@ -0,0 +1,43 @@
+- name: Get OS version
+ shell: uname -r
+ register: os_version
+
+- name: Install pre-reqs
+ apt:
+ name: "{{ docker_prereq_packages }}"
+ state: present
+ update_cache: yes
+ notify: cleanup docker
+
+- name: Add gpg key
+ shell: curl -fsSL https://download.docker.com/linux/ubuntu/gpg >key && apt-key add key
+
+- name: Add Docker repo
+ shell: add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
+
+- block:
+ - name: Prevent service restart
+ copy:
+ content: exit 101
+ dest: /usr/sbin/policy-rc.d
+ backup: yes
+ mode: 0755
+ register: policy_rc_d
+
+ - name: Install Docker CE
+ apt:
+ name: "{{ docker_packages }}"
+ state: present
+ update_cache: yes
+ always:
+ - name: Restore /usr/sbin/policy-rc.d (if needed)
+ command: mv {{ policy_rc_d.backup_file }} /usr/sbin/policy-rc.d
+ when:
+ - "'backup_file' in policy_rc_d"
+
+ - name: Remove /usr/sbin/policy-rc.d (if needed)
+ file:
+ path: /usr/sbin/policy-rc.d
+ state: absent
+ when:
+ - "'backup_file' not in policy_rc_d"
diff --git a/test/integration/targets/incidental_setup_docker/tasks/Fedora.yml b/test/integration/targets/incidental_setup_docker/tasks/Fedora.yml
new file mode 100644
index 0000000000..9f52e8f144
--- /dev/null
+++ b/test/integration/targets/incidental_setup_docker/tasks/Fedora.yml
@@ -0,0 +1,21 @@
+- name: Add repository
+ yum_repository:
+ file: docker-ce
+ name: docker-ce-stable
+ description: Docker CE Stable - $basearch
+ baseurl: https://download.docker.com/linux/fedora/$releasever/$basearch/stable
+ enabled: yes
+ gpgcheck: yes
+ gpgkey: https://download.docker.com/linux/fedora/gpg
+
+- name: Update cache
+ command: dnf makecache
+ args:
+ warn: no
+
+- name: Install docker
+ dnf:
+ name: "{{ docker_packages }}"
+ state: present
+ enablerepo: docker-ce-test
+ notify: cleanup docker
diff --git a/test/integration/targets/incidental_setup_docker/tasks/RedHat-7.yml b/test/integration/targets/incidental_setup_docker/tasks/RedHat-7.yml
new file mode 100644
index 0000000000..cacc708dc8
--- /dev/null
+++ b/test/integration/targets/incidental_setup_docker/tasks/RedHat-7.yml
@@ -0,0 +1,39 @@
+# The RHEL extras repository must be enabled to provide the container-selinux package.
+# See: https://docs.docker.com/engine/installation/linux/docker-ee/rhel/#install-using-the-repository
+
+- name: Install Docker pre-reqs
+ yum:
+ name: "{{ docker_prereq_packages }}"
+ state: present
+ notify: cleanup docker
+
+- name: Install epel repo which is missing on rhel-7 and is needed for pigz (needed for docker-ce 18)
+ include_role:
+ name: setup_epel
+
+- name: Enable extras repository for RHEL on AWS
+ command: yum-config-manager --enable rhui-REGION-rhel-server-extras
+ args:
+ warn: no
+
+- name: Add repository
+ command: yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
+ args:
+ warn: no
+
+- name: Update cache
+ command: yum -y makecache fast
+ args:
+ warn: no
+
+- name: Install docker
+ yum:
+ name: "{{ docker_packages }}"
+ state: present
+ notify: cleanup docker
+
+- name: Make sure the docker daemon is running (failure expected inside docker container)
+ service:
+ name: docker
+ state: started
+ ignore_errors: "{{ ansible_virtualization_type == 'docker' }}"
diff --git a/test/integration/targets/incidental_setup_docker/tasks/RedHat-8.yml b/test/integration/targets/incidental_setup_docker/tasks/RedHat-8.yml
new file mode 100644
index 0000000000..63bf2a42af
--- /dev/null
+++ b/test/integration/targets/incidental_setup_docker/tasks/RedHat-8.yml
@@ -0,0 +1,29 @@
+# The RHEL extras repository must be enabled to provide the container-selinux package.
+# See: https://docs.docker.com/engine/installation/linux/docker-ee/rhel/#install-using-the-repository
+
+- name: Install Docker pre-reqs
+ dnf:
+ name: "{{ docker_prereq_packages }}"
+ state: present
+ notify: cleanup docker
+ register: result
+ until: result is success
+ retries: 10
+ delay: 2
+
+- name: Set-up repository
+ command: dnf config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
+ args:
+ warn: no
+
+- name: Install docker
+ dnf:
+ name: "{{ docker_packages }}"
+ state: present
+ notify: cleanup docker
+
+- name: Make sure the docker daemon is running (failure expected inside docker container)
+ service:
+ name: docker
+ state: started
+ ignore_errors: "{{ ansible_virtualization_type == 'docker' }}"
diff --git a/test/integration/targets/incidental_setup_docker/tasks/Suse.yml b/test/integration/targets/incidental_setup_docker/tasks/Suse.yml
new file mode 100644
index 0000000000..8183194149
--- /dev/null
+++ b/test/integration/targets/incidental_setup_docker/tasks/Suse.yml
@@ -0,0 +1,7 @@
+- name: Install docker 17
+ zypper:
+ name: "{{ docker_packages }}"
+ force: yes
+ disable_gpg_check: yes
+ update_cache: yes
+ notify: cleanup docker
diff --git a/test/integration/targets/incidental_setup_docker/tasks/main.yml b/test/integration/targets/incidental_setup_docker/tasks/main.yml
new file mode 100644
index 0000000000..359a6d4493
--- /dev/null
+++ b/test/integration/targets/incidental_setup_docker/tasks/main.yml
@@ -0,0 +1,113 @@
+- name: Setup Docker
+ when: ansible_facts.distribution ~ ansible_facts.distribution_major_version not in ['CentOS6', 'RedHat6']
+ block:
+ - name: Include distribution specific variables
+ include_vars: "{{ lookup('first_found', params) }}"
+ vars:
+ params:
+ files:
+ - "{{ ansible_facts.distribution }}-{{ ansible_facts.distribution_major_version }}.yml"
+ - "{{ ansible_facts.os_family }}-{{ ansible_facts.distribution_major_version }}.yml"
+ - "{{ ansible_facts.distribution }}.yml"
+ - "{{ ansible_facts.os_family }}.yml"
+ - default.yml
+ paths:
+ - "{{ role_path }}/vars"
+
+ - name: Include distribution specific tasks
+ include_tasks: "{{ lookup('first_found', params) }}"
+ vars:
+ params:
+ files:
+ - "{{ ansible_facts.distribution }}-{{ ansible_facts.distribution_major_version }}.yml"
+ - "{{ ansible_facts.os_family }}-{{ ansible_facts.distribution_major_version }}.yml"
+ - "{{ ansible_facts.distribution }}.yml"
+ - "{{ ansible_facts.os_family }}.yml"
+ paths:
+ - "{{ role_path }}/tasks"
+
+ - name: Install Python requirements
+ pip:
+ state: present
+ name: "{{ docker_pip_packages | union(docker_pip_extra_packages) }}"
+ extra_args: "-c {{ remote_constraints }}"
+ notify: cleanup docker
+
+ # Detect docker CLI, API and docker-py versions
+ - name: Check Docker CLI version
+ command: "docker version -f {% raw %}'{{.Client.Version}}'{% endraw %}"
+ register: docker_cli_version_stdout
+ ignore_errors: yes
+
+ - name: Check Docker API version
+ command: "{{ ansible_python.executable }} -c 'import docker; print(docker.from_env().version()[\"ApiVersion\"])'"
+ register: docker_api_version_stdout
+ ignore_errors: yes
+
+ - name: Check docker-py API version
+ command: "{{ ansible_python.executable }} -c 'import docker; print(docker.__version__)'"
+ register: docker_py_version_stdout
+ ignore_errors: yes
+
+ - set_fact:
+ docker_cli_version: "{{ docker_cli_version_stdout.stdout | default('0.0') }}"
+ docker_api_version: "{{ docker_api_version_stdout.stdout | default('0.0') }}"
+ docker_py_version: "{{ docker_py_version_stdout.stdout | default('0.0') }}"
+
+ - debug:
+ msg: "Docker CLI version: {{ docker_cli_version }}; Docker API version: {{ docker_api_version }}; docker-py library version: {{ docker_py_version }}"
+
+ - block:
+ # Cleanup docker daemon
+ - name: "Remove all ansible-test-* docker containers"
+ shell: 'docker ps --no-trunc --format {% raw %}"{{.Names}}"{% endraw %} | grep "^ansible-test-" | xargs -r docker rm -f'
+ register: docker_containers
+ retries: 3
+ delay: 3
+ until: docker_containers is success
+
+ - name: "Remove all ansible-test-* docker volumes"
+ shell: 'docker volume ls --format {% raw %}"{{.Name}}"{% endraw %} | grep "^ansible-test-" | xargs -r docker volume rm -f'
+ register: docker_volumes
+
+ - name: "Remove all ansible-test-* docker networks"
+ shell: 'docker network ls --no-trunc --format {% raw %}"{{.Name}}"{% endraw %} | grep "^ansible-test-" | xargs -r docker network rm'
+ register: docker_networks
+
+ - name: Cleaned docker resources
+ debug:
+ var: docker_resources
+ vars:
+ docker_resources:
+ containers: "{{ docker_containers.stdout_lines }}"
+ volumes: "{{ docker_volumes.stdout_lines }}"
+ networks: "{{ docker_networks.stdout_lines }}"
+
+ # List all existing docker resources
+ - name: List all docker containers
+ command: docker ps --no-trunc -a
+ register: docker_containers
+
+ - name: List all docker volumes
+ command: docker volume ls
+ register: docker_volumes
+
+ - name: List all docker networks
+ command: docker network ls --no-trunc
+ register: docker_networks
+
+ - name: List all docker images
+ command: docker images --no-trunc -a
+ register: docker_images
+
+ - name: Still existing docker resources
+ debug:
+ var: docker_resources
+ vars:
+ docker_resources:
+ containers: "{{ docker_containers.stdout_lines }}"
+ volumes: "{{ docker_volumes.stdout_lines }}"
+ networks: "{{ docker_networks.stdout_lines }}"
+ images: "{{ docker_images.stdout_lines }}"
+
+ when: docker_cli_version is version('0.0', '>')
diff --git a/test/integration/targets/incidental_setup_docker/vars/Debian.yml b/test/integration/targets/incidental_setup_docker/vars/Debian.yml
new file mode 100644
index 0000000000..e9ffb94159
--- /dev/null
+++ b/test/integration/targets/incidental_setup_docker/vars/Debian.yml
@@ -0,0 +1,5 @@
+docker_prereq_packages:
+ - apt-transport-https
+ - ca-certificates
+ - curl
+ - software-properties-common
diff --git a/test/integration/targets/incidental_setup_docker/vars/Fedora.yml b/test/integration/targets/incidental_setup_docker/vars/Fedora.yml
new file mode 100644
index 0000000000..4312688103
--- /dev/null
+++ b/test/integration/targets/incidental_setup_docker/vars/Fedora.yml
@@ -0,0 +1,4 @@
+docker_prereq_packages: []
+
+docker_packages:
+ - docker-ce
diff --git a/test/integration/targets/incidental_setup_docker/vars/RedHat-7.yml b/test/integration/targets/incidental_setup_docker/vars/RedHat-7.yml
new file mode 100644
index 0000000000..7166b1f573
--- /dev/null
+++ b/test/integration/targets/incidental_setup_docker/vars/RedHat-7.yml
@@ -0,0 +1,8 @@
+docker_prereq_packages:
+ - yum-utils
+ - device-mapper-persistent-data
+ - lvm2
+ - libseccomp
+
+docker_pip_extra_packages:
+ - requests==2.6.0
diff --git a/test/integration/targets/incidental_setup_docker/vars/RedHat-8.yml b/test/integration/targets/incidental_setup_docker/vars/RedHat-8.yml
new file mode 100644
index 0000000000..3e15293354
--- /dev/null
+++ b/test/integration/targets/incidental_setup_docker/vars/RedHat-8.yml
@@ -0,0 +1,9 @@
+docker_prereq_packages:
+ - yum-utils
+ - device-mapper-persistent-data
+ - lvm2
+ - libseccomp
+
+# Docker CE > 3:18.09.1 requires containerd.io >= 1.2.2-3 which is unavaible at this time
+docker_packages:
+ - docker-ce-3:18.09.1
diff --git a/test/integration/targets/incidental_setup_docker/vars/Suse.yml b/test/integration/targets/incidental_setup_docker/vars/Suse.yml
new file mode 100644
index 0000000000..ad0d515e2b
--- /dev/null
+++ b/test/integration/targets/incidental_setup_docker/vars/Suse.yml
@@ -0,0 +1,2 @@
+docker_packages:
+ - docker>=17
diff --git a/test/integration/targets/incidental_setup_docker/vars/Ubuntu-14.yml b/test/integration/targets/incidental_setup_docker/vars/Ubuntu-14.yml
new file mode 100644
index 0000000000..36ab54b9d9
--- /dev/null
+++ b/test/integration/targets/incidental_setup_docker/vars/Ubuntu-14.yml
@@ -0,0 +1,5 @@
+docker_pip_extra_packages:
+ # Installing requests >=2.12.0 on Ubuntu 14.04 breaks certificate validation. We restrict to an older version
+ # to ensure out get_url tests work out fine. This is only an issue if pyOpenSSL is also installed.
+ # Not sure why RHEL7 needs this specific version
+ - requests==2.6.0
diff --git a/test/integration/targets/incidental_setup_docker/vars/default.yml b/test/integration/targets/incidental_setup_docker/vars/default.yml
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/test/integration/targets/incidental_setup_docker/vars/default.yml
diff --git a/test/integration/targets/incidental_setup_flatpak_remote/README.md b/test/integration/targets/incidental_setup_flatpak_remote/README.md
new file mode 100644
index 0000000000..d7916c14b3
--- /dev/null
+++ b/test/integration/targets/incidental_setup_flatpak_remote/README.md
@@ -0,0 +1,138 @@
+# Create a dummy flatpak repository remote
+
+This document describes how to create a local flatpak dummy repo. Just like the one contained in the `files/repo.tar.gxz` archive.
+
+
+## Create a hello world app
+
+Prerequisites:
+
+ - flathub
+
+Prepare the environment:
+
+```
+flatpak install --system flathub org.freedesktop.Platform//1.6 org.freedesktop.Sdk//1.6
+```
+
+Create a hello world executable:
+
+```
+echo $'#!/bin/sh\necho hello world' > hello.sh
+```
+
+To create dummy flatpaks, run this (defining a unique NUM for every flatpak to add):
+
+```
+export NUM=1
+flatpak build-init appdir$NUM com.dummy.App$NUM org.freedesktop.Sdk org.freedesktop.Platform 1.6;
+flatpak build appdir$NUM mkdir /app/bin;
+flatpak build appdir$NUM install --mode=750 hello.sh /app/bin;
+flatpak build-finish --command=hello.sh appdir$NUM
+```
+
+## Create a repo and/or add the app to it
+
+Create a repo and add the file to it in one command:
+
+```
+flatpak build-export repo appdir$NUM stable
+```
+
+## Create flatpak*-files
+
+Put a flatpakref file under the repo folder (`repo/com.dummy.App1.flatpakref`):
+
+```
+[Flatpak Ref]
+Title=Dummy App$NUM
+Name=com.dummy.App$NUM
+Branch=stable
+Url=file:///tmp/flatpak/repo
+GPGKey={{ base64-encoded public KEY }}
+IsRuntime=false
+RuntimeRepo=https://flathub.org/repo/flathub.flatpakrepo
+```
+
+Add a `.flatpakrepo` file to the `repo` folder (`repo/dummy-repo.flatpakrepo`):
+
+```
+[Flatpak Repo]
+Title=Dummy Repo
+Url=file:///tmp/flatpak/repo
+Comment=Dummy repo for ansible module integration testing
+Description=Dummy repo for ansible module integration testing
+GPGKey={{ base64-encoded public KEY }}
+```
+
+## Sign the repo
+
+Create a new key in a new gpg home folder (On RedHat systems, the executable needs to addressed as gpg2):
+
+```
+mkdir gpg
+gpg --homedir gpg --quick-gen-key test@dummy.com
+```
+
+Sign the repo and summary file, you need to redo this when you update the repository:
+
+```
+flatpak build-sign repo --gpg-sign=KEY_ID --gpg-homedir=gpg
+flatpak build-update-repo repo --gpg-sign=KEY_ID --gpg-homedir=gpg
+```
+
+Export the public key as a file:
+
+```
+gpg --homedir=gpg --export KEY_ID > dummy-repo.gpg
+```
+
+Create base64-encoded string from gpg-file for `GPGKey=` property in flatpak*-files:
+
+```
+base64 dummy-repo.gpg | tr -d '\n'
+```
+
+## How to use the repo
+
+Now you can add the `repo` folder as a local repo:
+
+```
+flatpak --system remote-add --gpg-import=/tmp/flatpak/repo/dummy-repo.gpg dummy-repo /tmp/flatpak/repo
+```
+
+Or, via `.flatpakrepo` file:
+
+```
+flatpak --system remote-add dummy-repo /tmp/flatpak/repo/dummy-repo.flatpakrepo
+```
+
+And install the hello world flatpaks like this:
+
+```
+flatpak --system install dummy-repo com.dummy.App$NUM
+```
+
+Or from flatpakref:
+
+```
+flatpak --system install --from /tmp/flatpak/repo/com.dummy.App$NUM.flatpakref
+```
+
+Run the app:
+
+```
+flatpak run com.dummy.App$NUM
+```
+
+To install an app without any runtime dependencies (the app will be broken, but it is enough to test flatpak installation):
+
+```
+flatpak --system install --no-deps dummy-repo com.dummy.App$NUM
+```
+
+## Sources:
+
+* https://blogs.gnome.org/alexl/2017/02/10/maintaining-a-flatpak-repository/
+
+* http://docs.flatpak.org/en/latest/first-build.html
diff --git a/test/integration/targets/incidental_setup_flatpak_remote/aliases b/test/integration/targets/incidental_setup_flatpak_remote/aliases
new file mode 100644
index 0000000000..136c05e0d0
--- /dev/null
+++ b/test/integration/targets/incidental_setup_flatpak_remote/aliases
@@ -0,0 +1 @@
+hidden
diff --git a/test/integration/targets/incidental_setup_flatpak_remote/files/repo.tar.xz b/test/integration/targets/incidental_setup_flatpak_remote/files/repo.tar.xz
new file mode 100644
index 0000000000..41a89c46ef
--- /dev/null
+++ b/test/integration/targets/incidental_setup_flatpak_remote/files/repo.tar.xz
Binary files differ
diff --git a/test/integration/targets/incidental_setup_flatpak_remote/handlers/main.yaml b/test/integration/targets/incidental_setup_flatpak_remote/handlers/main.yaml
new file mode 100644
index 0000000000..9380dee96b
--- /dev/null
+++ b/test/integration/targets/incidental_setup_flatpak_remote/handlers/main.yaml
@@ -0,0 +1,4 @@
+- name: remove temporary flatpak link
+ file:
+ state: absent
+ path: /tmp/flatpak
diff --git a/test/integration/targets/incidental_setup_flatpak_remote/meta/main.yaml b/test/integration/targets/incidental_setup_flatpak_remote/meta/main.yaml
new file mode 100644
index 0000000000..75ee4583ac
--- /dev/null
+++ b/test/integration/targets/incidental_setup_flatpak_remote/meta/main.yaml
@@ -0,0 +1,2 @@
+dependencies:
+ - setup_remote_tmp_dir
diff --git a/test/integration/targets/incidental_setup_flatpak_remote/tasks/main.yaml b/test/integration/targets/incidental_setup_flatpak_remote/tasks/main.yaml
new file mode 100644
index 0000000000..c199d216fa
--- /dev/null
+++ b/test/integration/targets/incidental_setup_flatpak_remote/tasks/main.yaml
@@ -0,0 +1,22 @@
+- name: Set up dummy flatpak repository remote
+ block:
+
+ - name: Copy repo into place
+ unarchive:
+ src: repo.tar.xz
+ dest: "{{ remote_tmp_dir }}"
+ owner: root
+ group: root
+ mode: 0644
+
+ - name: Create deterministic link to temp directory
+ file:
+ state: link
+ src: "{{ remote_tmp_dir }}/"
+ path: "/tmp/flatpak"
+ owner: root
+ group: root
+ mode: 0644
+ notify: remove temporary flatpak link
+
+ become: true
diff --git a/test/integration/targets/incidental_setup_mongodb/aliases b/test/integration/targets/incidental_setup_mongodb/aliases
new file mode 100644
index 0000000000..136c05e0d0
--- /dev/null
+++ b/test/integration/targets/incidental_setup_mongodb/aliases
@@ -0,0 +1 @@
+hidden
diff --git a/test/integration/targets/incidental_setup_mongodb/defaults/main.yml b/test/integration/targets/incidental_setup_mongodb/defaults/main.yml
new file mode 100644
index 0000000000..b205013c94
--- /dev/null
+++ b/test/integration/targets/incidental_setup_mongodb/defaults/main.yml
@@ -0,0 +1,46 @@
+mongodb_version: "4.0"
+
+apt:
+ keyserver: "keyserver.ubuntu.com"
+ keyserver_id: "9DA31620334BD75D9DCB49F368818C72E52529D4"
+ repo: "deb [ arch=amd64 ] http://repo.mongodb.org/apt/ubuntu {{ansible_distribution_release}}/mongodb-org/{{mongodb_version}} multiverse"
+
+mongodb_packages:
+ mongod: mongodb-org-server
+ mongos: mongodb-org-mongos
+ mongo: mongodb-org-shell
+
+yum:
+ name: mongodb-org
+ description: "Official MongoDB {{mongodb_version}} yum repo"
+ baseurl: https://repo.mongodb.org/yum/redhat/$releasever/mongodb-org/{{mongodb_version}}/x86_64/
+ gpgcheck: 1
+ gpgkey: https://www.mongodb.org/static/pgp/server-{{mongodb_version}}.asc
+ redhat8url: https://repo.mongodb.org/yum/redhat/7/mongodb-org/{{mongodb_version}}/x86_64/
+ fedoraurl: https://repo.mongodb.org/yum/amazon/2013.03/mongodb-org/{{mongodb_version}}/x86_64/
+
+debian_packages_py2:
+ - python-dev
+ - python-setuptools
+ - python-pip
+
+debian_packages_py36:
+ - python3.6-dev
+ - python3-setuptools
+ - python3-pip
+
+redhat_packages_py2:
+ - python-devel
+ - python-setuptools
+ - python-pip
+
+redhat_packages_py3:
+ - python3-devel
+ - python3-setuptools
+ - python3-pip
+
+# Do not install requests[security] via pip. It will cause test failures.
+# See https://github.com/ansible/ansible/pull/66319
+pip_packages:
+ - psutil
+ - pymongo
diff --git a/test/integration/targets/incidental_setup_mongodb/handlers/main.yml b/test/integration/targets/incidental_setup_mongodb/handlers/main.yml
new file mode 100644
index 0000000000..1b73525e9c
--- /dev/null
+++ b/test/integration/targets/incidental_setup_mongodb/handlers/main.yml
@@ -0,0 +1,24 @@
+- name: Remove debian_packages_py2
+ apt:
+ name: "{{ debian_packages_py2 }}"
+ state: absent
+
+- name: Remove debian_packages_py36
+ apt:
+ name: "{{ debian_packages_py36 }}"
+ state: absent
+
+- name: Remove redhat_packages_py2
+ yum:
+ name: "{{ redhat_packages_py36 }}"
+ state: absent
+
+- name: Remove redhat_packages_py36
+ yum:
+ name: "{{ redhat_packages_py36 }}"
+ state: absent
+
+- name: remove mongodb pip packages
+ pip:
+ name: "{{ pip_packages }}"
+ state: absent
diff --git a/test/integration/targets/incidental_setup_mongodb/tasks/main.yml b/test/integration/targets/incidental_setup_mongodb/tasks/main.yml
new file mode 100644
index 0000000000..04109c9718
--- /dev/null
+++ b/test/integration/targets/incidental_setup_mongodb/tasks/main.yml
@@ -0,0 +1,166 @@
+# (c) 2019, Rhys Campbell <rhys.james.campbell@googlemail.com>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# ============================================================
+
+# https://docs.mongodb.com/manual/tutorial/install-mongodb-on-ubuntu/
+# Support for Ubuntu 14.04 has been removed from MongoDB 4.0.10+, 3.6.13+, and 3.4.21+.
+# CentOS6 has python version issues
+- meta: end_play
+ when: (ansible_distribution == 'Ubuntu' and ansible_distribution_version == '14.04')
+ or (ansible_os_family == "RedHat" and ansible_distribution_major_version == '6')
+ or ansible_os_family == "Suse"
+ or ansible_distribution == 'Fedora'
+ or (ansible_facts['distribution'] == "CentOS" and ansible_facts['distribution_major_version'] == '7')
+
+# Ubuntu
+- name: Import MongoDB public GPG Key
+ apt_key:
+ keyserver: "{{ apt.keyserver }}"
+ id: "{{ apt.keyserver_id }}"
+ when:
+ - ansible_distribution_version in ["16.04", "18.04"]
+ - ansible_distribution == 'Ubuntu'
+
+- name: Add MongoDB repository into sources list
+ apt_repository:
+ repo: "{{ apt.repo }}"
+ state: present
+ update_cache: yes
+ when:
+ - ansible_distribution_version in ["16.04", "18.04"]
+ - ansible_distribution == 'Ubuntu'
+
+# Need to handle various platforms here. Package name will not always be the same
+- name: Ensure mongod package is installed
+ apt:
+ name: "{{ mongodb_packages.mongod }}"
+ state: present
+ force: yes
+ when:
+ - ansible_distribution == 'Ubuntu'
+
+- name: Ensure mongos package is installed
+ apt:
+ name: "{{ mongodb_packages.mongos }}"
+ state: present
+ force: yes
+ when:
+ - ansible_distribution == 'Ubuntu'
+
+- name: Ensure mongo client is installed
+ apt:
+ name: "{{ mongodb_packages.mongo }}"
+ state: present
+ force: yes
+ when:
+ - ansible_distribution == 'Ubuntu'
+# EOF Ubuntu
+
+# Redhat
+- name: Add MongopDB repo
+ yum_repository:
+ name: "{{ yum.name }}"
+ description: "{{ yum.description }}"
+ baseurl: "{{ yum.baseurl }}"
+ gpgcheck: "{{ yum.gpgcheck }}"
+ gpgkey: "{{ yum.gpgkey }}"
+ when:
+ - ansible_os_family == "RedHat"
+ - ansible_distribution_version.split('.')[0]|int <= 7
+ - not ansible_distribution == "Fedora"
+
+
+- name: RedHat 8 repo not yet available so use 7 url
+ yum_repository:
+ name: "{{ yum.name }}"
+ description: "{{ yum.description }}"
+ baseurl: "{{ yum.redhat8url }}"
+ gpgcheck: "{{ yum.gpgcheck }}"
+ gpgkey: "{{ yum.gpgkey }}"
+ when:
+ - ansible_os_family == "RedHat"
+ - ansible_distribution_version.split('.')[0]|int == 8
+ - not ansible_distribution == "Fedora"
+
+- name: Another url for Fedora based systems
+ yum_repository:
+ name: "{{ yum.name }}"
+ description: "{{ yum.description }}"
+ baseurl: "{{ yum.fedoraurl }}"
+ gpgcheck: "{{ yum.gpgcheck }}"
+ gpgkey: "{{ yum.gpgkey }}"
+ when:
+ - ansible_distribution == "Fedora"
+
+- name: Ensure mongod package is installed
+ yum:
+ name: "{{ mongodb_packages.mongod }}"
+ state: present
+ when: ansible_os_family == "RedHat"
+
+- name: Ensure mongos package is installed
+ yum:
+ name: "{{ mongodb_packages.mongos }}"
+ state: present
+ when: ansible_os_family == "RedHat"
+
+- name: Ensure mongo client is installed
+ yum:
+ name: "{{ mongodb_packages.mongo }}"
+ state: present
+ when: ansible_os_family == "RedHat"
+# EOF Redhat
+
+- name: Install debian_packages
+ apt:
+ name: "{{ debian_packages_py2 }}"
+ when:
+ - ansible_os_family == "Debian"
+ - ansible_distribution_version == "16.04"
+ notify: Remove debian_packages_py2
+
+- name: Install debian_packages
+ apt:
+ name: "{{ debian_packages_py36 }}"
+ when:
+ - ansible_os_family == "Debian"
+ - ansible_distribution_version == "18.04"
+ notify: Remove debian_packages_py36
+
+- name: Install redhat_packages_py2
+ yum:
+ name: "{{ redhat_packages_py2 }}"
+ when:
+ - ansible_os_family == "RedHat"
+ - ansible_distribution_version|float < 8
+ - not (ansible_os_family == "RedHat" and ansible_distribution_version|float < 8)
+ notify: Remove redhat_packages_py2
+
+- name: Install redhat_packages_py3
+ yum:
+ name: "{{ redhat_packages_py3 }}"
+ when:
+ - ansible_os_family == "RedHat"
+ - ansible_distribution_version|float >= 8
+ notify: Remove redhat_packages_py3
+
+- name: Install pip packages
+ pip:
+ name: "{{ pip_packages }}"
+ state: present
+ notify: remove mongodb pip packages
diff --git a/test/integration/targets/incidental_setup_mysql_db/aliases b/test/integration/targets/incidental_setup_mysql_db/aliases
new file mode 100644
index 0000000000..136c05e0d0
--- /dev/null
+++ b/test/integration/targets/incidental_setup_mysql_db/aliases
@@ -0,0 +1 @@
+hidden
diff --git a/test/integration/targets/incidental_setup_mysql_db/defaults/main.yml b/test/integration/targets/incidental_setup_mysql_db/defaults/main.yml
new file mode 100644
index 0000000000..47712dc266
--- /dev/null
+++ b/test/integration/targets/incidental_setup_mysql_db/defaults/main.yml
@@ -0,0 +1,18 @@
+mysql_service: mysqld
+
+mysql_packages:
+ - mysql-server
+ - MySQL-python
+ - bzip2
+
+mysql_cleanup_packages:
+ - mysql-community-client
+ - mysql-community-common
+ - mysql-community-libs
+ - mysql-community-libs-compat
+ - mysql-community-server
+ - mysql80-community-release
+
+mysql_data_dirs:
+ - /var/lib/mysql
+ - /usr/mysql
diff --git a/test/integration/targets/incidental_setup_mysql_db/handlers/main.yml b/test/integration/targets/incidental_setup_mysql_db/handlers/main.yml
new file mode 100644
index 0000000000..abcd488ef2
--- /dev/null
+++ b/test/integration/targets/incidental_setup_mysql_db/handlers/main.yml
@@ -0,0 +1,25 @@
+- name: stop mysql service
+ service:
+ name: "{{ mysql_service }}"
+ state: stopped
+ listen: cleanup mysql
+
+- name: remove mysql packages
+ action: '{{ ansible_pkg_mgr }}'
+ args:
+ name: "{{ item }}"
+ state: absent
+ loop: "{{ mysql_packages | union(mysql_cleanup_packages) }}"
+ listen: cleanup mysql
+
+- name: remove mysql data
+ file:
+ path: "{{ item }}"
+ state: absent
+ loop: "{{ mysql_data_dirs }}"
+ listen: cleanup mysql
+
+- name: remove pip packages
+ pip:
+ name: mysql-python
+ state: absent
diff --git a/test/integration/targets/incidental_setup_mysql_db/tasks/main.yml b/test/integration/targets/incidental_setup_mysql_db/tasks/main.yml
new file mode 100644
index 0000000000..990cdb6e63
--- /dev/null
+++ b/test/integration/targets/incidental_setup_mysql_db/tasks/main.yml
@@ -0,0 +1,105 @@
+# setup code for the mysql_db module
+# (c) 2014, Wayne Rosario <wrosario@ansible.com>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# ============================================================
+- name: python 2
+ set_fact:
+ python_suffix: ""
+ when: ansible_python_version is version('3', '<')
+
+- name: python 3
+ set_fact:
+ python_suffix: "-py3"
+ when: ansible_python_version is version('3', '>=')
+
+- name: Include distribution specific variables
+ include_vars: "{{ lookup('first_found', params) }}"
+ vars:
+ params:
+ files:
+ - '{{ ansible_facts.distribution }}-{{ ansible_facts.distribution_major_version }}{{ python_suffix }}.yml'
+ - '{{ ansible_facts.distribution }}-{{ ansible_facts.distribution_major_version }}.yml'
+ - '{{ ansible_facts.os_family }}-{{ ansible_facts.distribution_major_version }}{{ python_suffix }}.yml'
+ - '{{ ansible_facts.os_family }}-{{ ansible_facts.distribution_major_version }}.yml'
+ - '{{ ansible_facts.distribution }}{{ python_suffix }}.yml'
+ - '{{ ansible_facts.os_family }}{{ python_suffix }}.yml'
+ - 'default{{ python_suffix }}.yml'
+ paths: "{{ role_path }}/vars"
+
+- name: install mysqldb_test rpm dependencies
+ yum:
+ name: "{{ mysql_packages }}"
+ state: latest
+ when: ansible_pkg_mgr == 'yum'
+ notify: cleanup mysql
+
+- name: install mysqldb_test rpm dependencies
+ dnf:
+ name: '{{ mysql_packages }}'
+ state: latest
+ install_weak_deps: False # mariadb-server has a weak dep on python2 which break Python 3 test environments
+ when: ansible_pkg_mgr == 'dnf'
+ notify: cleanup mysql
+
+- name: install mysqldb_test debian dependencies
+ apt:
+ name: "{{ mysql_packages }}"
+ state: latest
+ when: ansible_pkg_mgr == 'apt'
+ notify: cleanup mysql
+
+- name: install mysqldb_test FreeBSD dependencies
+ package:
+ name: "{{ mysql_packages }}"
+ state: present
+ when: ansible_os_family == "FreeBSD"
+ notify: cleanup mysql
+
+- name: install mysql-python package via pip (FreeBSD)
+ pip:
+ name: mysql-python
+ state: present
+ when: ansible_os_family == "FreeBSD"
+ notify:
+ - cleanup mysql
+ - remove pip packages
+
+- name: enable mysql-server service (FreeBSD)
+ lineinfile:
+ path: /etc/rc.conf
+ line: 'mysql_server_enable="YES"'
+ when: ansible_os_family == "FreeBSD"
+
+- name: apply work-around for OverlayFS issue
+ # https://github.com/docker/for-linux/issues/72#issuecomment-319904698
+ command: find {{ mysql_data_dirs[0] }} -type f -exec touch {} ;
+ # find will fail if mysql has never been started, as the directory won't exist
+ ignore_errors: yes
+
+- name: restart mysql_db service
+ service:
+ name: "{{ mysql_service }}"
+ state: restarted
+
+- name: Detect socket path
+ shell: 'echo "show variables like ''socket''\G" | mysql | grep ''Value: '' | sed ''s/[ ]\+Value: //'''
+ register: _socket_path
+
+- name: Set socket path
+ set_fact:
+ mysql_socket: '{{ _socket_path["stdout"] }}'
diff --git a/test/integration/targets/incidental_setup_mysql_db/vars/Debian.yml b/test/integration/targets/incidental_setup_mysql_db/vars/Debian.yml
new file mode 100644
index 0000000000..52062c703d
--- /dev/null
+++ b/test/integration/targets/incidental_setup_mysql_db/vars/Debian.yml
@@ -0,0 +1,16 @@
+mysql_service: 'mysql'
+
+mysql_packages:
+ - mysql-server
+ - python-mysqldb
+ - bzip2
+
+mysql_cleanup_packages:
+ - mysql-client*
+ - mysql-server*
+ - mysql-common
+ - mysql-sandbox
+
+mysql_data_dirs:
+ - /var/lib/mysql
+ - /usr/share/mysql
diff --git a/test/integration/targets/incidental_setup_mysql_db/vars/Fedora-py3.yml b/test/integration/targets/incidental_setup_mysql_db/vars/Fedora-py3.yml
new file mode 100644
index 0000000000..fa7d06e52a
--- /dev/null
+++ b/test/integration/targets/incidental_setup_mysql_db/vars/Fedora-py3.yml
@@ -0,0 +1,6 @@
+mysql_service: 'mariadb'
+
+mysql_packages:
+ - mariadb-server
+ - python3-PyMySQL
+ - bzip2
diff --git a/test/integration/targets/incidental_setup_mysql_db/vars/Fedora.yml b/test/integration/targets/incidental_setup_mysql_db/vars/Fedora.yml
new file mode 100644
index 0000000000..718326ae08
--- /dev/null
+++ b/test/integration/targets/incidental_setup_mysql_db/vars/Fedora.yml
@@ -0,0 +1,6 @@
+mysql_service: 'mariadb'
+
+mysql_packages:
+ - mariadb-server
+ - python-PyMySQL
+ - bzip2
diff --git a/test/integration/targets/incidental_setup_mysql_db/vars/FreeBSD.yml b/test/integration/targets/incidental_setup_mysql_db/vars/FreeBSD.yml
new file mode 100644
index 0000000000..af45ebfd40
--- /dev/null
+++ b/test/integration/targets/incidental_setup_mysql_db/vars/FreeBSD.yml
@@ -0,0 +1,5 @@
+mysql_service: 'mysql-server'
+
+mysql_packages:
+ - mariadb101-server
+ - py-pymysql
diff --git a/test/integration/targets/incidental_setup_mysql_db/vars/RedHat-7.yml b/test/integration/targets/incidental_setup_mysql_db/vars/RedHat-7.yml
new file mode 100644
index 0000000000..f8b29fd7a1
--- /dev/null
+++ b/test/integration/targets/incidental_setup_mysql_db/vars/RedHat-7.yml
@@ -0,0 +1,6 @@
+mysql_service: 'mariadb'
+
+mysql_packages:
+ - mariadb-server
+ - MySQL-python
+ - bzip2
diff --git a/test/integration/targets/incidental_setup_mysql_db/vars/RedHat-8.yml b/test/integration/targets/incidental_setup_mysql_db/vars/RedHat-8.yml
new file mode 100644
index 0000000000..fa7d06e52a
--- /dev/null
+++ b/test/integration/targets/incidental_setup_mysql_db/vars/RedHat-8.yml
@@ -0,0 +1,6 @@
+mysql_service: 'mariadb'
+
+mysql_packages:
+ - mariadb-server
+ - python3-PyMySQL
+ - bzip2
diff --git a/test/integration/targets/incidental_setup_mysql_db/vars/RedHat.yml b/test/integration/targets/incidental_setup_mysql_db/vars/RedHat.yml
new file mode 100644
index 0000000000..742c35225b
--- /dev/null
+++ b/test/integration/targets/incidental_setup_mysql_db/vars/RedHat.yml
@@ -0,0 +1,6 @@
+mysql_service: 'mysqld'
+
+mysql_packages:
+ - mysql-server
+ - MySQL-python
+ - bzip2
diff --git a/test/integration/targets/incidental_setup_mysql_db/vars/Suse-py3.yml b/test/integration/targets/incidental_setup_mysql_db/vars/Suse-py3.yml
new file mode 100644
index 0000000000..adf2754d75
--- /dev/null
+++ b/test/integration/targets/incidental_setup_mysql_db/vars/Suse-py3.yml
@@ -0,0 +1,6 @@
+mysql_service: 'mysql'
+
+mysql_packages:
+ - mariadb
+ - python3-PyMySQL
+ - bzip2
diff --git a/test/integration/targets/incidental_setup_mysql_db/vars/Suse.yml b/test/integration/targets/incidental_setup_mysql_db/vars/Suse.yml
new file mode 100644
index 0000000000..a48a2e1330
--- /dev/null
+++ b/test/integration/targets/incidental_setup_mysql_db/vars/Suse.yml
@@ -0,0 +1,6 @@
+mysql_service: 'mysql'
+
+mysql_packages:
+ - mariadb
+ - python-PyMySQL
+ - bzip2
diff --git a/test/integration/targets/incidental_setup_mysql_db/vars/Ubuntu-py3.yml b/test/integration/targets/incidental_setup_mysql_db/vars/Ubuntu-py3.yml
new file mode 100644
index 0000000000..7728244252
--- /dev/null
+++ b/test/integration/targets/incidental_setup_mysql_db/vars/Ubuntu-py3.yml
@@ -0,0 +1,16 @@
+mysql_service: 'mysql'
+
+mysql_packages:
+ - mysql-server
+ - python3-pymysql
+ - bzip2
+
+mysql_cleanup_packages:
+ - mysql-client*
+ - mysql-server*
+ - mysql-common
+ - mysql-sandbox
+
+mysql_data_dirs:
+ - /var/lib/mysql
+ - /usr/share/mysql
diff --git a/test/integration/targets/incidental_setup_mysql_db/vars/default-py3.yml b/test/integration/targets/incidental_setup_mysql_db/vars/default-py3.yml
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/test/integration/targets/incidental_setup_mysql_db/vars/default-py3.yml
diff --git a/test/integration/targets/incidental_setup_mysql_db/vars/default.yml b/test/integration/targets/incidental_setup_mysql_db/vars/default.yml
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/test/integration/targets/incidental_setup_mysql_db/vars/default.yml
diff --git a/test/integration/targets/incidental_setup_openssl/aliases b/test/integration/targets/incidental_setup_openssl/aliases
new file mode 100644
index 0000000000..e5830e282b
--- /dev/null
+++ b/test/integration/targets/incidental_setup_openssl/aliases
@@ -0,0 +1,2 @@
+hidden
+
diff --git a/test/integration/targets/incidental_setup_openssl/tasks/main.yml b/test/integration/targets/incidental_setup_openssl/tasks/main.yml
new file mode 100644
index 0000000000..5a634458e2
--- /dev/null
+++ b/test/integration/targets/incidental_setup_openssl/tasks/main.yml
@@ -0,0 +1,40 @@
+---
+- name: Include OS-specific variables
+ include_vars: '{{ ansible_os_family }}.yml'
+ when: not ansible_os_family == "Darwin"
+
+- name: Install OpenSSL
+ become: True
+ package:
+ name: '{{ openssl_package_name }}'
+ when: not ansible_os_family == 'Darwin'
+
+- name: Install pyOpenSSL (Python 3)
+ become: True
+ package:
+ name: '{{ pyopenssl_package_name_python3 }}'
+ when: not ansible_os_family == 'Darwin' and ansible_python_version is version('3.0', '>=')
+
+- name: Install pyOpenSSL (Python 2)
+ become: True
+ package:
+ name: '{{ pyopenssl_package_name }}'
+ when: not ansible_os_family == 'Darwin' and ansible_python_version is version('3.0', '<')
+
+- name: Install pyOpenSSL (Darwin)
+ become: True
+ pip:
+ name: pyOpenSSL
+ when: ansible_os_family == 'Darwin'
+
+- name: register pyOpenSSL version
+ command: "{{ ansible_python.executable }} -c 'import OpenSSL; print(OpenSSL.__version__)'"
+ register: pyopenssl_version
+
+- name: register openssl version
+ shell: "openssl version | cut -d' ' -f2"
+ register: openssl_version
+
+- name: register cryptography version
+ command: "{{ ansible_python.executable }} -c 'import cryptography; print(cryptography.__version__)'"
+ register: cryptography_version
diff --git a/test/integration/targets/incidental_setup_openssl/vars/Debian.yml b/test/integration/targets/incidental_setup_openssl/vars/Debian.yml
new file mode 100644
index 0000000000..755c7a083c
--- /dev/null
+++ b/test/integration/targets/incidental_setup_openssl/vars/Debian.yml
@@ -0,0 +1,3 @@
+pyopenssl_package_name: python-openssl
+pyopenssl_package_name_python3: python3-openssl
+openssl_package_name: openssl
diff --git a/test/integration/targets/incidental_setup_openssl/vars/FreeBSD.yml b/test/integration/targets/incidental_setup_openssl/vars/FreeBSD.yml
new file mode 100644
index 0000000000..608689158a
--- /dev/null
+++ b/test/integration/targets/incidental_setup_openssl/vars/FreeBSD.yml
@@ -0,0 +1,3 @@
+pyopenssl_package_name: py27-openssl
+pyopenssl_package_name_python3: py36-openssl
+openssl_package_name: openssl
diff --git a/test/integration/targets/incidental_setup_openssl/vars/RedHat.yml b/test/integration/targets/incidental_setup_openssl/vars/RedHat.yml
new file mode 100644
index 0000000000..2959932cd7
--- /dev/null
+++ b/test/integration/targets/incidental_setup_openssl/vars/RedHat.yml
@@ -0,0 +1,3 @@
+pyopenssl_package_name: pyOpenSSL
+pyopenssl_package_name_python3: python3-pyOpenSSL
+openssl_package_name: openssl
diff --git a/test/integration/targets/incidental_setup_openssl/vars/Suse.yml b/test/integration/targets/incidental_setup_openssl/vars/Suse.yml
new file mode 100644
index 0000000000..2d5200f341
--- /dev/null
+++ b/test/integration/targets/incidental_setup_openssl/vars/Suse.yml
@@ -0,0 +1,3 @@
+pyopenssl_package_name: python-pyOpenSSL
+pyopenssl_package_name_python3: python3-pyOpenSSL
+openssl_package_name: openssl
diff --git a/test/integration/targets/incidental_setup_postgresql_db/aliases b/test/integration/targets/incidental_setup_postgresql_db/aliases
new file mode 100644
index 0000000000..136c05e0d0
--- /dev/null
+++ b/test/integration/targets/incidental_setup_postgresql_db/aliases
@@ -0,0 +1 @@
+hidden
diff --git a/test/integration/targets/incidental_setup_postgresql_db/defaults/main.yml b/test/integration/targets/incidental_setup_postgresql_db/defaults/main.yml
new file mode 100644
index 0000000000..aea0244280
--- /dev/null
+++ b/test/integration/targets/incidental_setup_postgresql_db/defaults/main.yml
@@ -0,0 +1,17 @@
+postgresql_service: postgresql
+
+postgresql_packages:
+ - postgresql-server
+ - python-psycopg2
+
+pg_user: postgres
+pg_group: root
+
+locale_latin_suffix:
+locale_utf8_suffix:
+
+# defaults for test SSL
+ssl_db: 'ssl_db'
+ssl_user: 'ssl_user'
+ssl_pass: 'ssl_pass'
+ssl_rootcert: '~{{ pg_user }}/root.crt'
diff --git a/test/integration/targets/incidental_setup_postgresql_db/files/dummy--1.0.sql b/test/integration/targets/incidental_setup_postgresql_db/files/dummy--1.0.sql
new file mode 100644
index 0000000000..53c79666b4
--- /dev/null
+++ b/test/integration/targets/incidental_setup_postgresql_db/files/dummy--1.0.sql
@@ -0,0 +1,2 @@
+CREATE OR REPLACE FUNCTION dummy_display_ext_version()
+RETURNS text LANGUAGE SQL AS 'SELECT (''1.0'')::text';
diff --git a/test/integration/targets/incidental_setup_postgresql_db/files/dummy--2.0.sql b/test/integration/targets/incidental_setup_postgresql_db/files/dummy--2.0.sql
new file mode 100644
index 0000000000..227ba1b4c4
--- /dev/null
+++ b/test/integration/targets/incidental_setup_postgresql_db/files/dummy--2.0.sql
@@ -0,0 +1,2 @@
+CREATE OR REPLACE FUNCTION dummy_display_ext_version()
+RETURNS text LANGUAGE SQL AS 'SELECT (''2.0'')::text';
diff --git a/test/integration/targets/incidental_setup_postgresql_db/files/dummy--3.0.sql b/test/integration/targets/incidental_setup_postgresql_db/files/dummy--3.0.sql
new file mode 100644
index 0000000000..7d6a60e543
--- /dev/null
+++ b/test/integration/targets/incidental_setup_postgresql_db/files/dummy--3.0.sql
@@ -0,0 +1,2 @@
+CREATE OR REPLACE FUNCTION dummy_display_ext_version()
+RETURNS text LANGUAGE SQL AS 'SELECT (''3.0'')::text';
diff --git a/test/integration/targets/incidental_setup_postgresql_db/files/dummy.control b/test/integration/targets/incidental_setup_postgresql_db/files/dummy.control
new file mode 100644
index 0000000000..4f8553c227
--- /dev/null
+++ b/test/integration/targets/incidental_setup_postgresql_db/files/dummy.control
@@ -0,0 +1,3 @@
+comment = 'dummy extension used to test postgresql_ext Ansible module'
+default_version = '3.0'
+relocatable = true
diff --git a/test/integration/targets/incidental_setup_postgresql_db/files/pg_hba.conf b/test/integration/targets/incidental_setup_postgresql_db/files/pg_hba.conf
new file mode 100644
index 0000000000..58de3607f9
--- /dev/null
+++ b/test/integration/targets/incidental_setup_postgresql_db/files/pg_hba.conf
@@ -0,0 +1,10 @@
+# !!! This file managed by Ansible. Any local changes may be overwritten. !!!
+
+# Database administrative login by UNIX sockets
+# note: you may wish to restrict this further later
+local all {{ pg_user }} trust
+
+# TYPE DATABASE USER CIDR-ADDRESS METHOD
+local all all md5
+host all all 127.0.0.1/32 md5
+host all all ::1/128 md5
diff --git a/test/integration/targets/incidental_setup_postgresql_db/tasks/main.yml b/test/integration/targets/incidental_setup_postgresql_db/tasks/main.yml
new file mode 100644
index 0000000000..651f6b3946
--- /dev/null
+++ b/test/integration/targets/incidental_setup_postgresql_db/tasks/main.yml
@@ -0,0 +1,215 @@
+- name: python 2
+ set_fact:
+ python_suffix: ""
+ when: ansible_python_version is version('3', '<')
+
+- name: python 3
+ set_fact:
+ python_suffix: "-py3"
+ when: ansible_python_version is version('3', '>=')
+
+- name: Include distribution and Python version specific variables
+ include_vars: "{{ lookup('first_found', params) }}"
+ vars:
+ params:
+ files:
+ - '{{ ansible_distribution }}-{{ ansible_distribution_major_version }}{{ python_suffix }}.yml'
+ - '{{ ansible_distribution }}-{{ ansible_distribution_version }}{{ python_suffix }}.yml'
+ - '{{ ansible_os_family }}{{ python_suffix }}.yml'
+ - 'default{{ python_suffix }}.yml'
+ paths:
+ - "{{ role_path }}/vars"
+
+- name: make sure the dbus service is started under systemd
+ systemd:
+ name: dbus
+ state: started
+ when: ansible_service_mgr == 'systemd' and ansible_distribution == 'Fedora'
+
+# Make sure we start fresh
+- name: stop postgresql service
+ service: name={{ postgresql_service }} state=stopped
+ ignore_errors: True
+
+- name: remove old db (RedHat or Suse)
+ file:
+ path: "{{ pg_dir }}"
+ state: absent
+ ignore_errors: True
+ when: ansible_os_family == "RedHat" or ansible_os_family == "Suse"
+
+- name: remove old db (FreeBSD)
+ file:
+ path: "{{ pg_dir }}"
+ state: absent
+ ignore_errors: True
+ when: ansible_os_family == "FreeBSD"
+
+# Theoretically, pg_dropcluster should work but it doesn't so remove files
+- name: remove old db config and files (debian)
+ file:
+ path: '{{ loop_item }}'
+ state: absent
+ ignore_errors: True
+ when: ansible_os_family == "Debian"
+ loop:
+ - /etc/postgresql
+ - /var/lib/postgresql
+ loop_control:
+ loop_var: loop_item
+
+- name: install dependencies for postgresql test
+ package:
+ name: "{{ postgresql_package_item }}"
+ state: present
+ with_items: "{{ postgresql_packages }}"
+ loop_control:
+ loop_var: postgresql_package_item
+
+- name: initialize postgres (FreeBSD)
+ command: /usr/local/etc/rc.d/postgresql oneinitdb
+ when: ansible_os_family == "FreeBSD"
+
+- name: Initialize postgres (RedHat systemd)
+ command: postgresql-setup initdb
+ when: ansible_os_family == "RedHat" and ansible_service_mgr == "systemd"
+
+- name: Initialize postgres (RedHat sysv)
+ command: /sbin/service postgresql initdb
+ when: ansible_os_family == "RedHat" and ansible_service_mgr != "systemd"
+
+- name: Initialize postgres (Debian)
+ shell: '. /usr/share/postgresql-common/maintscripts-functions && set_system_locale && /usr/bin/pg_createcluster -u postgres {{ pg_ver }} main'
+ args:
+ creates: "/etc/postgresql/{{ pg_ver }}/"
+ when: ansible_os_family == 'Debian'
+
+- name: Initialize postgres (Suse)
+ service: name=postgresql state=restarted
+ when: ansible_os_family == 'Suse'
+
+- name: Copy pg_hba into place
+ template:
+ src: files/pg_hba.conf
+ dest: "{{ pg_hba_location }}"
+ owner: "{{ pg_user }}"
+ group: "{{ pg_group }}"
+ mode: "0644"
+
+- name: Generate locales (Debian)
+ locale_gen:
+ name: '{{ item }}'
+ state: present
+ with_items:
+ - pt_BR
+ - es_ES
+ when: ansible_os_family == 'Debian'
+
+# Suse: locales are installed by default (glibc-locale package).
+# Fedora 23: locales are installed by default (glibc-common package)
+# CentOS: all locales are installed by default (glibc-common package) but some
+# RPM macros could prevent their installation (for example when using anaconda
+# instLangs parameter).
+
+- block:
+ - name: Install langpacks (RHEL8)
+ yum:
+ name:
+ - glibc-langpack-es
+ - glibc-langpack-pt
+ - glibc-all-langpacks
+ state: present
+ when: ansible_distribution_major_version is version('8', '>=')
+
+ - name: Check if locales need to be generated (RedHat)
+ shell: "localedef --list-archive | grep -a -q '^{{ locale }}$'"
+ register: locale_present
+ ignore_errors: True
+ with_items:
+ - es_ES
+ - pt_BR
+ loop_control:
+ loop_var: locale
+
+ - name: Reinstall internationalization files
+ shell: 'yum -y reinstall glibc-common || yum -y install glibc-common'
+ args:
+ warn: no
+ when: locale_present is failed
+
+ - name: Generate locale (RedHat)
+ command: 'localedef -f ISO-8859-1 -i {{ item.locale }} {{ item.locale }}'
+ when: item is failed
+ with_items: '{{ locale_present.results }}'
+ when: ansible_os_family == 'RedHat' and ansible_distribution != 'Fedora'
+
+- name: Install glibc langpacks (Fedora >= 24)
+ package:
+ name: '{{ item }}'
+ state: 'latest'
+ with_items:
+ - glibc-langpack-es
+ - glibc-langpack-pt
+ when: ansible_distribution == 'Fedora' and ansible_distribution_major_version is version('24', '>=')
+
+- name: enable postgresql service (FreeBSD)
+ lineinfile:
+ path: /etc/rc.conf
+ line: 'postgresql_enable="YES"'
+ when: ansible_os_family == "FreeBSD"
+
+- name: start postgresql service
+ # work-around for issue on FreeBSD where service won't restart if currently stopped
+ service: name={{ postgresql_service }} state=started
+
+- name: restart postgresql service
+ service: name={{ postgresql_service }} state=restarted
+
+########################
+# Setup dummy extension:
+- name: copy control file for dummy ext
+ copy:
+ src: dummy.control
+ dest: "/usr/share/postgresql/{{ pg_ver }}/extension/dummy.control"
+ mode: 0444
+ when: ansible_os_family == 'Debian'
+
+- name: copy version files for dummy ext
+ copy:
+ src: "{{ item }}"
+ dest: "/usr/share/postgresql/{{ pg_ver }}/extension/{{ item }}"
+ mode: 0444
+ with_items:
+ - dummy--1.0.sql
+ - dummy--2.0.sql
+ - dummy--3.0.sql
+ when: ansible_os_family == 'Debian'
+
+- name: add update paths
+ file:
+ path: "/usr/share/postgresql/{{ pg_ver }}/extension/{{ item }}"
+ mode: 0444
+ state: touch
+ with_items:
+ - dummy--1.0--2.0.sql
+ - dummy--2.0--3.0.sql
+ when: ansible_os_family == 'Debian'
+
+- name: Get PostgreSQL version
+ become_user: "{{ pg_user }}"
+ become: yes
+ shell: "echo 'SHOW SERVER_VERSION' | psql --tuples-only --no-align --dbname postgres"
+ register: postgres_version_resp
+
+- name: Print PostgreSQL server version
+ debug:
+ msg: "{{ postgres_version_resp.stdout }}"
+
+# SSL configuration.
+# Restricted using Debian family because of there are errors on other distributions
+# that not related with PostgreSQL or psycopg2 SSL support.
+# The tests key point is to be sure that ssl options work in general
+- import_tasks: ssl.yml
+ when:
+ - ansible_os_family == 'Debian'
+ - postgres_version_resp.stdout is version('9.4', '>=')
diff --git a/test/integration/targets/incidental_setup_postgresql_db/tasks/ssl.yml b/test/integration/targets/incidental_setup_postgresql_db/tasks/ssl.yml
new file mode 100644
index 0000000000..bc45ec6f4b
--- /dev/null
+++ b/test/integration/targets/incidental_setup_postgresql_db/tasks/ssl.yml
@@ -0,0 +1,81 @@
+# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# The aim of this test is to be sure that SSL options work in general
+# and preparing the environment for testing these options in
+# the following PostgreSQL modules (ssl_db, ssl_user, certs).
+# Configured by https://www.postgresql.org/docs/current/ssl-tcp.html
+
+####################
+# Prepare for tests:
+
+- name: postgresql SSL - create database
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_db:
+ name: "{{ ssl_db }}"
+
+- name: postgresql SSL - create role
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_user:
+ name: "{{ ssl_user }}"
+ role_attr_flags: SUPERUSER
+ password: "{{ ssl_pass }}"
+
+- name: postgresql SSL - install openssl
+ become: yes
+ package: name=openssl state=present
+
+- name: postgresql SSL - create certs 1
+ become_user: root
+ become: yes
+ shell: 'openssl req -new -nodes -text -out ~{{ pg_user }}/root.csr \
+ -keyout ~{{ pg_user }}/root.key -subj "/CN=localhost.local"'
+
+- name: postgresql SSL - create certs 2
+ become_user: root
+ become: yes
+ shell: 'openssl x509 -req -in ~{{ pg_user }}/root.csr -text -days 3650 \
+ -extensions v3_ca -signkey ~{{ pg_user }}/root.key -out ~{{ pg_user }}/root.crt'
+
+- name: postgresql SSL - create certs 3
+ become_user: root
+ become: yes
+ shell: 'openssl req -new -nodes -text -out ~{{ pg_user }}/server.csr \
+ -keyout ~{{ pg_user }}/server.key -subj "/CN=localhost.local"'
+
+- name: postgresql SSL - create certs 4
+ become_user: root
+ become: yes
+ shell: 'openssl x509 -req -in ~{{ pg_user }}/server.csr -text -days 365 \
+ -CA ~{{ pg_user }}/root.crt -CAkey ~{{ pg_user }}/root.key -CAcreateserial -out server.crt'
+
+- name: postgresql SSL - set right permissions to files
+ become_user: root
+ become: yes
+ file:
+ path: '{{ item }}'
+ mode: 0600
+ owner: '{{ pg_user }}'
+ group: '{{ pg_user }}'
+ with_items:
+ - '~{{ pg_user }}/root.key'
+ - '~{{ pg_user }}/server.key'
+ - '~{{ pg_user }}/root.crt'
+ - '~{{ pg_user }}/server.csr'
+
+- name: postgresql SSL - enable SSL
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_set:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ name: ssl
+ value: on
+
+- name: postgresql SSL - reload PostgreSQL to enable ssl on
+ become: yes
+ service:
+ name: "{{ postgresql_service }}"
+ state: reloaded
diff --git a/test/integration/targets/incidental_setup_postgresql_db/vars/Debian-8.yml b/test/integration/targets/incidental_setup_postgresql_db/vars/Debian-8.yml
new file mode 100644
index 0000000000..c5c6795eac
--- /dev/null
+++ b/test/integration/targets/incidental_setup_postgresql_db/vars/Debian-8.yml
@@ -0,0 +1,8 @@
+postgresql_packages:
+ - "postgresql"
+ - "postgresql-common"
+ - "python-psycopg2"
+
+pg_hba_location: "/etc/postgresql/9.4/main/pg_hba.conf"
+pg_dir: "/var/lib/postgresql/9.4/main"
+pg_ver: 9.4
diff --git a/test/integration/targets/incidental_setup_postgresql_db/vars/FreeBSD-11-py3.yml b/test/integration/targets/incidental_setup_postgresql_db/vars/FreeBSD-11-py3.yml
new file mode 100644
index 0000000000..2f6b0d9846
--- /dev/null
+++ b/test/integration/targets/incidental_setup_postgresql_db/vars/FreeBSD-11-py3.yml
@@ -0,0 +1,12 @@
+postgresql_packages:
+ - postgresql95-server
+ - py36-psycopg2
+
+pg_dir: /usr/local/pgsql/data
+pg_hba_location: "{{ pg_dir }}/pg_hba.conf"
+pg_ver: 9.5
+pg_user: pgsql
+pg_group: pgsql
+
+locale_latin_suffix: .ISO8859-1
+locale_utf8_suffix: .UTF-8
diff --git a/test/integration/targets/incidental_setup_postgresql_db/vars/FreeBSD-11.yml b/test/integration/targets/incidental_setup_postgresql_db/vars/FreeBSD-11.yml
new file mode 100644
index 0000000000..efb0603b5e
--- /dev/null
+++ b/test/integration/targets/incidental_setup_postgresql_db/vars/FreeBSD-11.yml
@@ -0,0 +1,12 @@
+postgresql_packages:
+ - postgresql95-server
+ - py27-psycopg2
+
+pg_dir: /usr/local/pgsql/data
+pg_hba_location: "{{ pg_dir }}/pg_hba.conf"
+pg_ver: 9.5
+pg_user: pgsql
+pg_group: pgsql
+
+locale_latin_suffix: .ISO8859-1
+locale_utf8_suffix: .UTF-8
diff --git a/test/integration/targets/incidental_setup_postgresql_db/vars/FreeBSD-12.0-py3.yml b/test/integration/targets/incidental_setup_postgresql_db/vars/FreeBSD-12.0-py3.yml
new file mode 100644
index 0000000000..2f6b0d9846
--- /dev/null
+++ b/test/integration/targets/incidental_setup_postgresql_db/vars/FreeBSD-12.0-py3.yml
@@ -0,0 +1,12 @@
+postgresql_packages:
+ - postgresql95-server
+ - py36-psycopg2
+
+pg_dir: /usr/local/pgsql/data
+pg_hba_location: "{{ pg_dir }}/pg_hba.conf"
+pg_ver: 9.5
+pg_user: pgsql
+pg_group: pgsql
+
+locale_latin_suffix: .ISO8859-1
+locale_utf8_suffix: .UTF-8
diff --git a/test/integration/targets/incidental_setup_postgresql_db/vars/FreeBSD-12.0.yml b/test/integration/targets/incidental_setup_postgresql_db/vars/FreeBSD-12.0.yml
new file mode 100644
index 0000000000..1fe6678262
--- /dev/null
+++ b/test/integration/targets/incidental_setup_postgresql_db/vars/FreeBSD-12.0.yml
@@ -0,0 +1,12 @@
+postgresql_packages:
+ - postgresql96-server
+ - py27-psycopg2
+
+pg_dir: /usr/local/pgsql/data
+pg_hba_location: "{{ pg_dir }}/pg_hba.conf"
+pg_ver: 9.6
+pg_user: pgsql
+pg_group: pgsql
+
+locale_latin_suffix: .ISO8859-1
+locale_utf8_suffix: .UTF-8
diff --git a/test/integration/targets/incidental_setup_postgresql_db/vars/FreeBSD-12.1-py3.yml b/test/integration/targets/incidental_setup_postgresql_db/vars/FreeBSD-12.1-py3.yml
new file mode 100644
index 0000000000..cd7c83a4c1
--- /dev/null
+++ b/test/integration/targets/incidental_setup_postgresql_db/vars/FreeBSD-12.1-py3.yml
@@ -0,0 +1,12 @@
+postgresql_packages:
+ - postgresql11-server
+ - py36-psycopg2
+
+pg_dir: /var/db/postgres/data11
+pg_hba_location: "{{ pg_dir }}/pg_hba.conf"
+pg_ver: 11
+pg_user: postgres
+pg_group: postgres
+
+locale_latin_suffix: .ISO8859-1
+locale_utf8_suffix: .UTF-8
diff --git a/test/integration/targets/incidental_setup_postgresql_db/vars/FreeBSD-12.1.yml b/test/integration/targets/incidental_setup_postgresql_db/vars/FreeBSD-12.1.yml
new file mode 100644
index 0000000000..0b1ab5b26e
--- /dev/null
+++ b/test/integration/targets/incidental_setup_postgresql_db/vars/FreeBSD-12.1.yml
@@ -0,0 +1,12 @@
+postgresql_packages:
+ - postgresql11-server
+ - py27-psycopg2
+
+pg_dir: /var/db/postgres/data11
+pg_hba_location: "{{ pg_dir }}/pg_hba.conf"
+pg_ver: 11
+pg_user: postgres
+pg_group: postgres
+
+locale_latin_suffix: .ISO8859-1
+locale_utf8_suffix: .UTF-8
diff --git a/test/integration/targets/incidental_setup_postgresql_db/vars/RedHat-py3.yml b/test/integration/targets/incidental_setup_postgresql_db/vars/RedHat-py3.yml
new file mode 100644
index 0000000000..ee08372226
--- /dev/null
+++ b/test/integration/targets/incidental_setup_postgresql_db/vars/RedHat-py3.yml
@@ -0,0 +1,8 @@
+postgresql_packages:
+ - "postgresql-server"
+ - "python3-psycopg2"
+ - "bzip2"
+ - "xz"
+
+pg_hba_location: "/var/lib/pgsql/data/pg_hba.conf"
+pg_dir: "/var/lib/pgsql/data"
diff --git a/test/integration/targets/incidental_setup_postgresql_db/vars/RedHat.yml b/test/integration/targets/incidental_setup_postgresql_db/vars/RedHat.yml
new file mode 100644
index 0000000000..20c4b1f5b7
--- /dev/null
+++ b/test/integration/targets/incidental_setup_postgresql_db/vars/RedHat.yml
@@ -0,0 +1,7 @@
+postgresql_packages:
+ - "postgresql-server"
+ - "python-psycopg2"
+ - "bzip2"
+
+pg_hba_location: "/var/lib/pgsql/data/pg_hba.conf"
+pg_dir: "/var/lib/pgsql/data"
diff --git a/test/integration/targets/incidental_setup_postgresql_db/vars/Ubuntu-12.yml b/test/integration/targets/incidental_setup_postgresql_db/vars/Ubuntu-12.yml
new file mode 100644
index 0000000000..4b6e744b44
--- /dev/null
+++ b/test/integration/targets/incidental_setup_postgresql_db/vars/Ubuntu-12.yml
@@ -0,0 +1,8 @@
+postgresql_packages:
+ - "postgresql"
+ - "postgresql-common"
+ - "python-psycopg2"
+
+pg_hba_location: "/etc/postgresql/9.1/main/pg_hba.conf"
+pg_dir: "/var/lib/postgresql/9.1/main"
+pg_ver: 9.1
diff --git a/test/integration/targets/incidental_setup_postgresql_db/vars/Ubuntu-14.yml b/test/integration/targets/incidental_setup_postgresql_db/vars/Ubuntu-14.yml
new file mode 100644
index 0000000000..ffcc8dd496
--- /dev/null
+++ b/test/integration/targets/incidental_setup_postgresql_db/vars/Ubuntu-14.yml
@@ -0,0 +1,8 @@
+postgresql_packages:
+ - "postgresql"
+ - "postgresql-common"
+ - "python-psycopg2"
+
+pg_hba_location: "/etc/postgresql/9.3/main/pg_hba.conf"
+pg_dir: "/var/lib/postgresql/9.3/main"
+pg_ver: 9.3
diff --git a/test/integration/targets/incidental_setup_postgresql_db/vars/Ubuntu-16-py3.yml b/test/integration/targets/incidental_setup_postgresql_db/vars/Ubuntu-16-py3.yml
new file mode 100644
index 0000000000..b088c3105e
--- /dev/null
+++ b/test/integration/targets/incidental_setup_postgresql_db/vars/Ubuntu-16-py3.yml
@@ -0,0 +1,8 @@
+postgresql_packages:
+ - "postgresql"
+ - "postgresql-common"
+ - "python3-psycopg2"
+
+pg_hba_location: "/etc/postgresql/9.5/main/pg_hba.conf"
+pg_dir: "/var/lib/postgresql/9.5/main"
+pg_ver: 9.5
diff --git a/test/integration/targets/incidental_setup_postgresql_db/vars/Ubuntu-16.yml b/test/integration/targets/incidental_setup_postgresql_db/vars/Ubuntu-16.yml
new file mode 100644
index 0000000000..897efd2c76
--- /dev/null
+++ b/test/integration/targets/incidental_setup_postgresql_db/vars/Ubuntu-16.yml
@@ -0,0 +1,8 @@
+postgresql_packages:
+ - "postgresql"
+ - "postgresql-common"
+ - "python-psycopg2"
+
+pg_hba_location: "/etc/postgresql/9.5/main/pg_hba.conf"
+pg_dir: "/var/lib/postgresql/9.5/main"
+pg_ver: 9.5
diff --git a/test/integration/targets/incidental_setup_postgresql_db/vars/Ubuntu-18-py3.yml b/test/integration/targets/incidental_setup_postgresql_db/vars/Ubuntu-18-py3.yml
new file mode 100644
index 0000000000..10453bdf90
--- /dev/null
+++ b/test/integration/targets/incidental_setup_postgresql_db/vars/Ubuntu-18-py3.yml
@@ -0,0 +1,8 @@
+postgresql_packages:
+ - "postgresql"
+ - "postgresql-common"
+ - "python3-psycopg2"
+
+pg_hba_location: "/etc/postgresql/10/main/pg_hba.conf"
+pg_dir: "/var/lib/postgresql/10/main"
+pg_ver: 10
diff --git a/test/integration/targets/incidental_setup_postgresql_db/vars/default-py3.yml b/test/integration/targets/incidental_setup_postgresql_db/vars/default-py3.yml
new file mode 100644
index 0000000000..19152a6435
--- /dev/null
+++ b/test/integration/targets/incidental_setup_postgresql_db/vars/default-py3.yml
@@ -0,0 +1,6 @@
+postgresql_packages:
+ - "postgresql-server"
+ - "python3-psycopg2"
+
+pg_hba_location: "/var/lib/pgsql/data/pg_hba.conf"
+pg_dir: "/var/lib/pgsql/data"
diff --git a/test/integration/targets/incidental_setup_postgresql_db/vars/default.yml b/test/integration/targets/incidental_setup_postgresql_db/vars/default.yml
new file mode 100644
index 0000000000..ab36dd9f1d
--- /dev/null
+++ b/test/integration/targets/incidental_setup_postgresql_db/vars/default.yml
@@ -0,0 +1,6 @@
+postgresql_packages:
+ - "postgresql-server"
+ - "python-psycopg2"
+
+pg_hba_location: "/var/lib/pgsql/data/pg_hba.conf"
+pg_dir: "/var/lib/pgsql/data"
diff --git a/test/integration/targets/incidental_setup_rabbitmq/aliases b/test/integration/targets/incidental_setup_rabbitmq/aliases
new file mode 100644
index 0000000000..136c05e0d0
--- /dev/null
+++ b/test/integration/targets/incidental_setup_rabbitmq/aliases
@@ -0,0 +1 @@
+hidden
diff --git a/test/integration/targets/incidental_setup_rabbitmq/files/rabbitmq.conf b/test/integration/targets/incidental_setup_rabbitmq/files/rabbitmq.conf
new file mode 100644
index 0000000000..1e602175bd
--- /dev/null
+++ b/test/integration/targets/incidental_setup_rabbitmq/files/rabbitmq.conf
@@ -0,0 +1,8 @@
+listeners.ssl.default = 5671
+
+ssl_options.cacertfile = /tls/ca_certificate.pem
+ssl_options.certfile = /tls/server_certificate.pem
+ssl_options.keyfile = /tls/server_key.pem
+ssl_options.password = bunnies
+ssl_options.verify = verify_peer
+ssl_options.fail_if_no_peer_cert = false
diff --git a/test/integration/targets/incidental_setup_rabbitmq/meta/main.yml b/test/integration/targets/incidental_setup_rabbitmq/meta/main.yml
new file mode 100644
index 0000000000..7a6c3e01e2
--- /dev/null
+++ b/test/integration/targets/incidental_setup_rabbitmq/meta/main.yml
@@ -0,0 +1,3 @@
+---
+dependencies:
+ - incidental_setup_tls
diff --git a/test/integration/targets/incidental_setup_rabbitmq/tasks/main.yml b/test/integration/targets/incidental_setup_rabbitmq/tasks/main.yml
new file mode 100644
index 0000000000..4f35f16f62
--- /dev/null
+++ b/test/integration/targets/incidental_setup_rabbitmq/tasks/main.yml
@@ -0,0 +1,3 @@
+---
+- include: ubuntu.yml
+ when: ansible_distribution == 'Ubuntu'
diff --git a/test/integration/targets/incidental_setup_rabbitmq/tasks/ubuntu.yml b/test/integration/targets/incidental_setup_rabbitmq/tasks/ubuntu.yml
new file mode 100644
index 0000000000..6d7a3ef0d9
--- /dev/null
+++ b/test/integration/targets/incidental_setup_rabbitmq/tasks/ubuntu.yml
@@ -0,0 +1,63 @@
+---
+# https://www.rabbitmq.com/install-debian.html#apt-pinning
+- name: Pin erlang version that rabbitmq supports
+ copy:
+ dest: /etc/apt/preferences.d/erlang
+ content: |
+ Package: erlang*
+ Pin: version 1:20.3.8.18-1
+ Pin-Priority: 1000
+
+ Package: esl-erlang
+ Pin: version 1:20.3.6
+ Pin-Priority: 1000
+
+- name: Install https transport for apt
+ apt:
+ name: apt-transport-https
+ state: latest
+ force: yes
+
+- name: Add RabbitMQ release signing key
+ apt_key:
+ url: https://ansible-ci-files.s3.amazonaws.com/test/integration/targets/setup_rabbitmq/rabbitmq-release-signing-key.asc
+ state: present
+
+- name: Add RabbitMQ Erlang repository
+ apt_repository:
+ repo: "deb https://dl.bintray.com/rabbitmq-erlang/debian {{ ansible_distribution_release }} erlang-20.x"
+ filename: 'rabbitmq-erlang'
+ state: present
+ update_cache: yes
+
+# Required by the rabbitmq modules that uses the management API
+- name: Install requests
+ pip:
+ name: requests
+
+- name: Install RabbitMQ Server
+ apt:
+ deb: https://s3.amazonaws.com/ansible-ci-files/test/integration/targets/setup_rabbitmq/rabbitmq-server_3.7.14-1_all.deb
+
+- name: Install RabbitMQ TLS dependencies
+ apt:
+ name: "{{ item }}"
+ state: latest
+ loop:
+ - erlang-asn1
+ - erlang-crypto
+ - erlang-public-key
+ - erlang-ssl
+
+- name: Ensure TLS config
+ copy:
+ src: rabbitmq.conf
+ dest: /etc/rabbitmq/rabbitmq.conf
+
+- name: Start RabbitMQ service
+ service:
+ name: rabbitmq-server
+ state: started
+
+- name: Enable management
+ command: rabbitmq-plugins enable --online rabbitmq_management
diff --git a/test/integration/targets/incidental_setup_tls/aliases b/test/integration/targets/incidental_setup_tls/aliases
new file mode 100644
index 0000000000..136c05e0d0
--- /dev/null
+++ b/test/integration/targets/incidental_setup_tls/aliases
@@ -0,0 +1 @@
+hidden
diff --git a/test/integration/targets/incidental_setup_tls/files/ca_certificate.pem b/test/integration/targets/incidental_setup_tls/files/ca_certificate.pem
new file mode 100644
index 0000000000..a438d9266e
--- /dev/null
+++ b/test/integration/targets/incidental_setup_tls/files/ca_certificate.pem
@@ -0,0 +1,19 @@
+-----BEGIN CERTIFICATE-----
+MIIDAjCCAeqgAwIBAgIJANguFROhaWocMA0GCSqGSIb3DQEBCwUAMDExIDAeBgNV
+BAMMF1RMU0dlblNlbGZTaWduZWR0Um9vdENBMQ0wCwYDVQQHDAQkJCQkMB4XDTE5
+MDExMTA4MzMxNVoXDTI5MDEwODA4MzMxNVowMTEgMB4GA1UEAwwXVExTR2VuU2Vs
+ZlNpZ25lZHRSb290Q0ExDTALBgNVBAcMBCQkJCQwggEiMA0GCSqGSIb3DQEBAQUA
+A4IBDwAwggEKAoIBAQDqVt84czSxWnWW4Ng6hmKE3NarbLsycwtjrYBokV7Kk7Mp
+7PrBbYF05FOgSdJLvL6grlRSQK2VPsXdLfEv5uFXX6gyd2WQwKCiGGf4UY4ZIl4l
+JVpSDsBV2orR4pOIf1s1+iSwvcRQkX46SVjoKWbDUc4VLo1uy8UvavQI+DMioYyy
+0K2MbRs7oG2rdKks8zisfT0ymKnrFTdVeUjIrg0sStaMnf9VVkcEeYkfNY0vWqdn
+CV5wPfDBlnnxGMgqGdLSpzfyJ7qafFET+q+gOvjsEqzn7DvlPkmk86hIIWXKi3aM
+A9swknL3rnagJL6GioWRpYUwKdRKmZxdyr4I2JTTAgMBAAGjHTAbMAwGA1UdEwQF
+MAMBAf8wCwYDVR0PBAQDAgEGMA0GCSqGSIb3DQEBCwUAA4IBAQACTpPBf5WSwZ7r
+hrbPUN3qVh70HI0ZNK2jlK6b5fpSdw3JI/GQl0Kw3eGICLzwTByWvhD62U7IigL5
+0UWxWuEod310Y/qo/7OxRVPp5PH/0oNGoKHhEzas2ii0heQYGsHQUKGzYNNyVfjy
+nqBFz5AcKf067LcXivYqod6JDQHqFq/5/hWlIsHHrZIeijqqtthPq39GlGAYO+AB
+U66nzlH7YQgmfYfy6l7O4LsjXf/bz9rWvueO3NqCsmXV+FacDkOkwWA5Kf6rcgNL
+3G+2HAVTRIXDnO4ShnK6aYMW+UklpYRlVYBBUOdwoNIp5gI+BlSc1IuF6PdLVt3q
+VdjN1MjY
+-----END CERTIFICATE-----
diff --git a/test/integration/targets/incidental_setup_tls/files/ca_key.pem b/test/integration/targets/incidental_setup_tls/files/ca_key.pem
new file mode 100644
index 0000000000..0a950eda06
--- /dev/null
+++ b/test/integration/targets/incidental_setup_tls/files/ca_key.pem
@@ -0,0 +1,28 @@
+-----BEGIN PRIVATE KEY-----
+MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDqVt84czSxWnWW
+4Ng6hmKE3NarbLsycwtjrYBokV7Kk7Mp7PrBbYF05FOgSdJLvL6grlRSQK2VPsXd
+LfEv5uFXX6gyd2WQwKCiGGf4UY4ZIl4lJVpSDsBV2orR4pOIf1s1+iSwvcRQkX46
+SVjoKWbDUc4VLo1uy8UvavQI+DMioYyy0K2MbRs7oG2rdKks8zisfT0ymKnrFTdV
+eUjIrg0sStaMnf9VVkcEeYkfNY0vWqdnCV5wPfDBlnnxGMgqGdLSpzfyJ7qafFET
++q+gOvjsEqzn7DvlPkmk86hIIWXKi3aMA9swknL3rnagJL6GioWRpYUwKdRKmZxd
+yr4I2JTTAgMBAAECggEBALpg9ZDUMCiOpc+mbNO/ZkP90M7u38Q0M+7HY8XHOPkt
+l+XUkWueSMRLhSeLDzMlnwf1HyN8RZLaJkzP6XAL1VXEwuXAiIskaZ4Cg07Arp/W
+8cHhf4CcMuUVuCtOZcC+ajD4Do5zn9vkm9yH0ap0o0LdoWa/a8WfU+luy0EHBsSW
+6qqI+nqNFmISluVbfWt7t3zp273+8sir6YeHQu9G91/jzggv8rHmu4EHhi3cnU0K
+vY6OPCGBL7nrg9Rv1LSFpH95TvlIM6/Cm0AjgW7m6XwWUTaI9p+GvKzrYUSLd9L/
+QxlmAwiu/sBTXLrsWyr8XEtj+lVGxQ6eFbf6E+lUm8ECgYEA+8Wgmhf3VsC3gvJz
+w2jApEoOioD5iGOWGClGVURkfaBhFELr4XCTVMdBuCtxT7LYTMHTAlBqIbdWDjB4
+m/E417hLGogSDy7j0R0Mx75OOGEitxYUhe0VGDNoytgCNd2UnTMt42lp+9vAHZag
+INhVDOnxRNdtNTf1yYkWUMEbh1sCgYEA7kZNJXPVYJtR78+km/Gcv64Umci7KUV+
+hYc7chR5xv3cXvXg5eojKa4G7CyMQTX7VnRa6CiQKdN73AbIAhS4Oy5UlCOKtmb8
+xnBiOAYwSpOfIeZhjq0RvEeZX0t6u7XsErBZ03rEPKXF2nNDo1x8byrlKPtlUzwJ
+gb5yjmK/mekCgYEA1TWQAs5m4+2Bun+tbv7nnHkmhT4hktGays0xRYYMf6Jwc6MU
+dC5MZg/zZI5Nf8uZhq7hDWWh6vmCA7QifxSxKWVlHIu8l2UDAhRSvVg4j2Aa8Obe
+7GdQZNUsWhLBFHKXpuQvaRTc7q8yqxvicM4igDQg4EZ6sgW4vDm+TxapRF8CgYAz
+n6mhPqpxRtWGxo8cdkmGwfmWpAXg2DykQ3teqQ8FTQUM0erLBWJe6mR3kONGUaLF
+xWnYuMkbNsW0EwgMY17S+6O5gMXR5RhJChpNlxGpZrhoiNiEJ/0atMyG9/x8ZNrj
+5a9ggU248hWe0bBK2YPgNgP2UBlQ4kYRBSkerkhi2QKBgF+tlpyqcU+0iY82qRS2
+wMf7oI2pWR8nX9LPAY/nnvwWvqwcAFJPMlSMTu8Ext6h7l9yu+7JGL6JWwsO57Lb
+Gm/RxbuZ/kG/13+lSNmZiyHrhj6hZhkAMeFM34fpT4+DBXqSxZuvdrmwBc5B2jYg
+F9Bv8gcmZlGhqONL23evr9Gu
+-----END PRIVATE KEY-----
diff --git a/test/integration/targets/incidental_setup_tls/files/client_certificate.pem b/test/integration/targets/incidental_setup_tls/files/client_certificate.pem
new file mode 100644
index 0000000000..501d83897f
--- /dev/null
+++ b/test/integration/targets/incidental_setup_tls/files/client_certificate.pem
@@ -0,0 +1,20 @@
+-----BEGIN CERTIFICATE-----
+MIIDRjCCAi6gAwIBAgIBAjANBgkqhkiG9w0BAQsFADAxMSAwHgYDVQQDDBdUTFNH
+ZW5TZWxmU2lnbmVkdFJvb3RDQTENMAsGA1UEBwwEJCQkJDAeFw0xOTAxMTEwODMz
+MThaFw0yOTAxMDgwODMzMThaMC0xGjAYBgNVBAMMEWFuc2libGUudGxzLnRlc3Rz
+MQ8wDQYDVQQKDAZjbGllbnQwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB
+AQCoM+OQ3HCnCUAAz9KGGTwWB9hQbUfAZXm/stlb2/uOAp3rNwxAlCs/giymBHE6
+Iu6mrK006Vn+Z9ibqIrD2LuCOxcu25y8goqG62TgdP5sa9wR+597s0XssnwnaY8y
+bJ3p2zWAJvMgqQ0iNW/ZynpWbO85K5SryUykF7FAeNU9ogGGlIwCPjHhPvnwjkqd
+yDqaA1VaJKDUWIF9joI7sV4VLgGhQvzXRrHULsTeIF2m0+ebL0PTNEWHQ0dtgLYX
+kW7YO4Y6+n3cjHNH4qTof8V30EK8pk8kTdJ/x6ubwf+klFCAyroOxNOaxUy299Oo
+yD6qIPJPnGkPhrKtWnWIhNzJAgMBAAGjbTBrMAkGA1UdEwQCMAAwCwYDVR0PBAQD
+AgWgMBMGA1UdJQQMMAoGCCsGAQUFBwMCMDwGA1UdEQQ1MDOCEWFuc2libGUudGxz
+LnRlc3RzghNNYWNCb29rLVByby00LmxvY2Fsgglsb2NhbGhvc3QwDQYJKoZIhvcN
+AQELBQADggEBAK214+VVXnGnsUlvd9Q6A2Ea6UGrr6b7xkmlnIaNd+6xoUsDsHob
+srHYm7UC0uLi1KwSunI7AU5ZELVEUfAmJzh3O4d6C5sQyqKYPqd5harWOQ3BOD0I
+plHpp7qMtsPDuJBtmE/bmvF85eto0H7pPz+cTTXRlOaVVeiHjMggFcXdy1MzGo9C
+X/4wLQmsFeypTfe+ZGqvDh99VV+ffNMIsMh+opWEloaKiHmDKB6S9aC/MsVVM4RR
+nHm/UKTOukaGE9QIPkSSaygv3sBkVnQ2SHMvvtnjPHVHlizNoq6+YTnuOvKpo4o5
+V7Bij+W7rkBQLsEfwv2IC+gzmRz2yxr2tXk=
+-----END CERTIFICATE-----
diff --git a/test/integration/targets/incidental_setup_tls/files/client_key.pem b/test/integration/targets/incidental_setup_tls/files/client_key.pem
new file mode 100644
index 0000000000..850260a87f
--- /dev/null
+++ b/test/integration/targets/incidental_setup_tls/files/client_key.pem
@@ -0,0 +1,27 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEowIBAAKCAQEAqDPjkNxwpwlAAM/Shhk8FgfYUG1HwGV5v7LZW9v7jgKd6zcM
+QJQrP4IspgRxOiLupqytNOlZ/mfYm6iKw9i7gjsXLtucvIKKhutk4HT+bGvcEfuf
+e7NF7LJ8J2mPMmyd6ds1gCbzIKkNIjVv2cp6VmzvOSuUq8lMpBexQHjVPaIBhpSM
+Aj4x4T758I5Kncg6mgNVWiSg1FiBfY6CO7FeFS4BoUL810ax1C7E3iBdptPnmy9D
+0zRFh0NHbYC2F5Fu2DuGOvp93IxzR+Kk6H/Fd9BCvKZPJE3Sf8erm8H/pJRQgMq6
+DsTTmsVMtvfTqMg+qiDyT5xpD4ayrVp1iITcyQIDAQABAoIBAHPszzpXs4xr46Cr
+mvyxB6hnX76OkpUXWwGz0fptcsI9K3mhRuB7PhNXNE53YVIgITreZ8G/0jZ0e+VM
+E9dG2HS5JRE2ap/BmJfERJIuD+vJqrL6KMCondi0arz/E6I9GdjDK+xW69nmqRaa
+nawM0KQgD//m+WAsLJYrfg5hORZwI2SHaahawnCp0QaMmz3bdDWKRacM3q0UFX46
+Ze6CaZkUn+e1rHsTMcZBvxQWIVzysFNXh150idIB/PxL5YfCQqTSAj1c/nxaxz6a
+BvHFlpaYR3tvXXlexxfjglCwsGyckbvTyP1cBZqpv5oES+VKt2PrOve9Zyax+CYT
+0uQf6cECgYEA09+46QHXLfWh6jiJYu9skC9UrLU5czfCNB6PrUtFcjPFMYjZDcw9
+inJmcuTPXmfplxc47YDfpwotU+szTJDF+R8kknnfw9zVr/sIwZ5wsFfUQl/56Svn
+AIOVvHHvcvMX95XKGiuTsoCIJZNjJN3l3ztu/bRciuiVLyizglwIVrMCgYEAyzvK
+PFlWilbp3GPJlnW7x1bUxe1ziLE/Um+ujZx96+fy34hJLFdNdNzpNUjoOf3IDTGq
+6xl+vXcf12gimWMFcD3qNIGKHBDM9cIB2RDbb6YcqI8lOqopsmOyGmVLPkRpCoUK
+72kacQwvw6M9xjmpiG3dN8lE881jDmZi+hyCnJMCgYEAoIQnQAhP8Jbeo2dP1q+T
+bS0elnX532uH6xqYOW8EXwAPznZiEw0ANspzCWqGHHzXQMusKmtvhcq1CpXvWHt6
+MUHB4GMK/wVosxmZya5yq3bu7ZZu7JOBQCdwosMi6NB5AO7vnaIUFLFB9E3UWBLw
+243YicdCMU8B7yeD0ChPfPcCgYA1dYHKBBn+g8Q6Y8lIGaoOUmnfsok8gJtOfPAm
+ce6xmi7J29iboE9QmTeC+62Sa44u4ky6UNeE0QwAJnVLcb+hebfcneKNZWH0l1bT
+GVsPcFuDfzvkxZP4R782sERtmaMj0EFDHpuE9xatWIhMVyigKX4SSZAorXML+6S3
+c75rnwKBgBR+WU934wS+DbwTLlUB2mJWqJMEbOH/CUwPC7+VN4h1h3/i455iAeiU
+BizLS0SlD+MoSbC7URcZuquqGkmMlnJXoxF+NdxoWZK78tYNftryWoR87TloiVc/
+LhkxZxje4tgW/mTLqH3zKDoyyzDzG6Q6tAUN2ZTjJFEws7qF30Qe
+-----END RSA PRIVATE KEY-----
diff --git a/test/integration/targets/incidental_setup_tls/files/server_certificate.pem b/test/integration/targets/incidental_setup_tls/files/server_certificate.pem
new file mode 100644
index 0000000000..4a0ebc6ec0
--- /dev/null
+++ b/test/integration/targets/incidental_setup_tls/files/server_certificate.pem
@@ -0,0 +1,20 @@
+-----BEGIN CERTIFICATE-----
+MIIDRjCCAi6gAwIBAgIBATANBgkqhkiG9w0BAQsFADAxMSAwHgYDVQQDDBdUTFNH
+ZW5TZWxmU2lnbmVkdFJvb3RDQTENMAsGA1UEBwwEJCQkJDAeFw0xOTAxMTEwODMz
+MTZaFw0yOTAxMDgwODMzMTZaMC0xGjAYBgNVBAMMEWFuc2libGUudGxzLnRlc3Rz
+MQ8wDQYDVQQKDAZzZXJ2ZXIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB
+AQDIwErHwAesRBfd9HiZkmB3VYh28c1QkE9I8nYyHJKX2ZBUhAzK+h80BkcTJJ94
+265qWyACH/wl54Xe/ofFUFrGa4vz0qz4UkL/KI0OGw28Y4qnKdorb9DumbiIPB+9
+I9TJT9vhtXTxBNlBTpv3ONHL8EzdV6ZmuvELU11H27oQ4xoUYhfXPXLMLK0sOnXZ
+lt0BOMMd5fVpJVa8fvXiw3626a0aXCr4e/MWUsBFRnzrXfgoW+AjYoTjKKS2hLYo
+8//MM05h7ROIXrNe990sf9C1G+fOThmOMszK9sjMhu2xHranRcz5aA0UTfyOjTs8
+9WexUYhC5VorYyRWtVZu2mDjAgMBAAGjbTBrMAkGA1UdEwQCMAAwCwYDVR0PBAQD
+AgWgMBMGA1UdJQQMMAoGCCsGAQUFBwMBMDwGA1UdEQQ1MDOCEWFuc2libGUudGxz
+LnRlc3RzghNNYWNCb29rLVByby00LmxvY2Fsgglsb2NhbGhvc3QwDQYJKoZIhvcN
+AQELBQADggEBAFoPBeB6tQhFS1198sia5NDHDDrghDOIlE0QbaoA+MSKzsaIy8Mu
+mNcM2ewYpT600XXTBxcqF6/vuKL9OEbvivtRYQu1YfkifN1jzREoWTieUkR5ytzt
+8ATfFkgTWJmiRiOIb/fNgewvhd+aKxep0OGwDiSKKl1ab6F17Cp4iK8sDBWmnUb6
+0Wf7pfver1Gl0Gp8vRXGUuc8a7udA9a8mV70HJlLkMdMvR9U8Bqih0+iRaqNWXRZ
+7Lc6v5LbzrW/ntilmgU6F0lwxPydg49MY4UrSXcjYLZs9T4iYHwTfLxFjFMIgGwn
+peYMKRj18akP9i2mjj5O2mRu4K+ecuUSOGI=
+-----END CERTIFICATE-----
diff --git a/test/integration/targets/incidental_setup_tls/files/server_key.pem b/test/integration/targets/incidental_setup_tls/files/server_key.pem
new file mode 100644
index 0000000000..c79ab64804
--- /dev/null
+++ b/test/integration/targets/incidental_setup_tls/files/server_key.pem
@@ -0,0 +1,27 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEowIBAAKCAQEAyMBKx8AHrEQX3fR4mZJgd1WIdvHNUJBPSPJ2MhySl9mQVIQM
+yvofNAZHEySfeNuualsgAh/8JeeF3v6HxVBaxmuL89Ks+FJC/yiNDhsNvGOKpyna
+K2/Q7pm4iDwfvSPUyU/b4bV08QTZQU6b9zjRy/BM3VemZrrxC1NdR9u6EOMaFGIX
+1z1yzCytLDp12ZbdATjDHeX1aSVWvH714sN+tumtGlwq+HvzFlLARUZ86134KFvg
+I2KE4yiktoS2KPP/zDNOYe0TiF6zXvfdLH/QtRvnzk4ZjjLMyvbIzIbtsR62p0XM
++WgNFE38jo07PPVnsVGIQuVaK2MkVrVWbtpg4wIDAQABAoIBAHw3wA3pnNXTLJGC
+fD1KfbZZjp9K76gyI10X6lsHow2i6dPiAah3LGecms4VkzfNdxcIW7303Kj3obZh
++ND277RnR6oPakgdXqdUCDP6OX2gemMFWqIWBkodhDmIOntmeHw4le4LwdiBD42B
+frBy0B5JCsbLPYPDmPNRGh8krvVS+Eir4hb4tK95TPMSL0vEjvHYFbCxv7//Ri1p
+3CROGp2CGX0WZ+Zs0crRNoIhRRM6kLAhROcqejtnEy6o7l5CWpCAL2vxlE9y8/kL
+iRawSZRFZnz/zGnqpx0vswgvijkuPfcNGMSzdwaiDgQz8D0GkJ7s9VgzZJazNy+1
+ET/4YIECgYEA612rwP9Ar9qdYbmmMPaJzITnaIrNGfO2JvaQqZt+DG8sVgdxL7V5
+D6emcw406drKRZvFAxnW6ZW2bVpmit02osl0re2A/nOTXLNuo338Qkap/hG8YZrF
+bw7w75pFa/rwlDtedjBnGHO2KbRXeU5Hn5wLoKjYgJoF6Ht+PPdL0IsCgYEA2lnC
+pQEhM51iRMDqNdmVJyvsTNU1ikoO8HaXHq+LwOQETaKMnDwp4Bn14E815CTulAc/
+tsDTKSDk6umZ+IufG1a2v7CqgKVwkB4HkgxKFQs2gQdTFfoMi5eeHR+njuNtklp1
+9fWfKHsP/ddrg+iTVTRZBLWexgKK89IMHYalpAkCgYEAy0Q3a9NF81mTJ+3kOE8C
+zO1OyLtuzGXsvxOb9c6C+owctyNwPeq05a89EgqH6hr5K0qOx9HOCCcyyJgVDQJl
+CAuByB/gkmAQOTQBbhMFA9vxPanljknTDsnRjKwoHkw2712ig+Hjd3ufK79C+FGB
+i7eBVzva1p2uUowshsxv3mcCgYAOFiRciMofjlO8o8V4W+Undcn02vxtQ4HbOYte
+S2z0sMEmUQpJOghpkMMwCWwsn8VUf3M40w/MY3bhQNjSFA/br6hyjW8yhXnRkl5i
+qbBN0z9c66AMlukgSFPHBTfGHB4Bhxx9Fa+C6Q2LDs6839BBevMTPrRTie509GQb
+s4gUIQKBgAvE8wLcmozno0GLDnBdKRZP/C7tmVnAINuraITPUBTASwI+Qo8ILigQ
+LRLaDqF84BEpjb8vdzkYFQqRQSZ8BI8NydfuKEFSBfL27sBvSGMYQJVm6bryUmPq
+T3ayaeZ4Wb3FFDijgtM9dRKyf7p4hQPOqM44QrntAtb43b2Q5L7M
+-----END RSA PRIVATE KEY-----
diff --git a/test/integration/targets/incidental_setup_tls/tasks/main.yml b/test/integration/targets/incidental_setup_tls/tasks/main.yml
new file mode 100644
index 0000000000..c5b7a23a22
--- /dev/null
+++ b/test/integration/targets/incidental_setup_tls/tasks/main.yml
@@ -0,0 +1,21 @@
+---
+# Generated certificate with: https://github.com/michaelklishin/tls-gen
+# ~/tls-gen/basic# make PASSWORD=bunnies CN=ansible.tls.tests
+# verify with: make info
+
+- name: ensure target directory is present
+ file:
+ path: /tls
+ state: directory
+
+- name: ensure TLS files are present
+ copy:
+ src: "{{ item }}"
+ dest: "/tls/{{ item }}"
+ loop:
+ - ca_certificate.pem
+ - ca_key.pem
+ - client_certificate.pem
+ - client_key.pem
+ - server_certificate.pem
+ - server_key.pem
diff --git a/test/integration/targets/incidental_setup_zabbix/aliases b/test/integration/targets/incidental_setup_zabbix/aliases
new file mode 100644
index 0000000000..03121b3d06
--- /dev/null
+++ b/test/integration/targets/incidental_setup_zabbix/aliases
@@ -0,0 +1,6 @@
+destructive
+shippable/posix/group1
+skip/osx
+skip/freebsd
+skip/rhel
+hidden
diff --git a/test/integration/targets/incidental_setup_zabbix/defaults/main.yml b/test/integration/targets/incidental_setup_zabbix/defaults/main.yml
new file mode 100644
index 0000000000..d6437a7568
--- /dev/null
+++ b/test/integration/targets/incidental_setup_zabbix/defaults/main.yml
@@ -0,0 +1,13 @@
+---
+db_name: 'zabbix'
+db_user: 'zabbix'
+db_password: 'fLhijUs3PgekNhwJ'
+
+zabbix_version: 4.4
+zabbix_apt_repository: 'deb http://repo.zabbix.com/zabbix/{{ zabbix_version }}/{{ ansible_distribution.lower() }}/ {{ ansible_distribution_release }} main'
+zabbix_apt_repository_key: 'http://repo.zabbix.com/zabbix-official-repo.key'
+
+zabbix_packages:
+ - zabbix-server-mysql
+ - zabbix-frontend-php
+ - zabbix-apache-conf
diff --git a/test/integration/targets/incidental_setup_zabbix/handlers/main.yml b/test/integration/targets/incidental_setup_zabbix/handlers/main.yml
new file mode 100644
index 0000000000..a39556215a
--- /dev/null
+++ b/test/integration/targets/incidental_setup_zabbix/handlers/main.yml
@@ -0,0 +1,15 @@
+- name: remove zabbix repository
+ apt_repository:
+ repo: "{{ zabbix_apt_repository }}"
+ filename: zabbix
+ state: absent
+
+- name: remove zabbix packages
+ apt:
+ name: "{{ zabbix_packages }}"
+ state: absent
+
+- name: remove zabbix pip packages
+ pip:
+ name: zabbix-api
+ state: absent
diff --git a/test/integration/targets/incidental_setup_zabbix/meta/main.yml b/test/integration/targets/incidental_setup_zabbix/meta/main.yml
new file mode 100644
index 0000000000..bf750af390
--- /dev/null
+++ b/test/integration/targets/incidental_setup_zabbix/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - incidental_setup_mysql_db
diff --git a/test/integration/targets/incidental_setup_zabbix/tasks/main.yml b/test/integration/targets/incidental_setup_zabbix/tasks/main.yml
new file mode 100644
index 0000000000..6d082fff21
--- /dev/null
+++ b/test/integration/targets/incidental_setup_zabbix/tasks/main.yml
@@ -0,0 +1,3 @@
+---
+- include: setup.yml
+ when: ansible_os_family == 'Debian'
diff --git a/test/integration/targets/incidental_setup_zabbix/tasks/setup.yml b/test/integration/targets/incidental_setup_zabbix/tasks/setup.yml
new file mode 100644
index 0000000000..2af51f9431
--- /dev/null
+++ b/test/integration/targets/incidental_setup_zabbix/tasks/setup.yml
@@ -0,0 +1,89 @@
+# sets up and starts Zabbix with default settings using a MySQL database.
+
+- name: install zabbix repository key
+ apt_key:
+ url: "{{ zabbix_apt_repository_key }}"
+ state: present
+
+- name: install zabbix repository
+ apt_repository:
+ repo: "{{ zabbix_apt_repository }}"
+ filename: zabbix
+ state: present
+ notify: remove zabbix repository
+
+- name: check if dpkg is set to exclude specific destinations
+ stat:
+ path: /etc/dpkg/dpkg.cfg.d/excludes
+ register: dpkg_excludes
+
+- name: ensure documentation installations are allowed for zabbix
+ lineinfile:
+ path: /etc/dpkg/dpkg.cfg.d/excludes
+ regexp: '^path-include=/usr/share/doc/zabbix*$'
+ line: 'path-include=/usr/share/doc/zabbix*'
+ state: present
+ when: dpkg_excludes.stat.exists
+
+- name: install zabbix apt dependencies
+ apt:
+ name: "{{ zabbix_packages }}"
+ state: latest
+ update_cache: yes
+ notify: remove zabbix packages
+
+- name: install zabbix-api python package
+ pip:
+ name: zabbix-api
+ state: latest
+ notify: remove zabbix pip packages
+
+- name: create mysql user {{ db_user }}
+ mysql_user:
+ name: "{{ db_user }}"
+ password: "{{ db_password }}"
+ state: present
+ priv: "{{ db_name }}.*:ALL"
+ login_unix_socket: '{{ mysql_socket }}'
+
+- name: import initial zabbix database
+ mysql_db:
+ name: "{{ db_name }}"
+ login_user: "{{ db_user }}"
+ login_password: "{{ db_password }}"
+ state: import
+ target: /usr/share/doc/zabbix-server-mysql/create.sql.gz
+
+- name: deploy zabbix-server configuration
+ template:
+ src: zabbix_server.conf.j2
+ dest: /etc/zabbix/zabbix_server.conf
+ owner: root
+ group: zabbix
+ mode: 0640
+
+- name: deploy zabbix web frontend configuration
+ template:
+ src: zabbix.conf.php.j2
+ dest: /etc/zabbix/web/zabbix.conf.php
+ mode: 0644
+
+- name: Create proper run directory for zabbix-server
+ file:
+ path: /var/run/zabbix
+ state: directory
+ owner: zabbix
+ group: zabbix
+ mode: 0775
+
+- name: restart zabbix-server
+ service:
+ name: zabbix-server
+ state: restarted
+ enabled: yes
+
+- name: restart apache2
+ service:
+ name: apache2
+ state: restarted
+ enabled: yes
diff --git a/test/integration/targets/incidental_setup_zabbix/templates/zabbix.conf.php.j2 b/test/integration/targets/incidental_setup_zabbix/templates/zabbix.conf.php.j2
new file mode 100644
index 0000000000..ad0a8328d2
--- /dev/null
+++ b/test/integration/targets/incidental_setup_zabbix/templates/zabbix.conf.php.j2
@@ -0,0 +1,20 @@
+<?php
+// Zabbix GUI configuration file
+global $DB;
+
+$DB['TYPE'] = 'MYSQL';
+$DB['SERVER'] = 'localhost';
+$DB['PORT'] = '0';
+$DB['DATABASE'] = '{{ db_name }}';
+$DB['USER'] = '{{ db_user }}';
+$DB['PASSWORD'] = '{{ db_password }}';
+
+// SCHEMA is relevant only for IBM_DB2 database
+$DB['SCHEMA'] = '';
+
+$ZBX_SERVER = 'localhost';
+$ZBX_SERVER_PORT = '10051';
+$ZBX_SERVER_NAME = '';
+
+$IMAGE_FORMAT_DEFAULT = IMAGE_FORMAT_PNG;
+?>
diff --git a/test/integration/targets/incidental_setup_zabbix/templates/zabbix_server.conf.j2 b/test/integration/targets/incidental_setup_zabbix/templates/zabbix_server.conf.j2
new file mode 100644
index 0000000000..f4c201af59
--- /dev/null
+++ b/test/integration/targets/incidental_setup_zabbix/templates/zabbix_server.conf.j2
@@ -0,0 +1,7 @@
+PidFile=/var/run/zabbix/zabbix_server.pid
+LogFile=/tmp/zabbix_server.log
+DBName={{ db_name }}
+DBUser={{ db_user }}
+DBPassword={{ db_password }}
+Timeout=4
+LogSlowQueries=3000
diff --git a/test/integration/targets/incidental_synchronize/aliases b/test/integration/targets/incidental_synchronize/aliases
new file mode 100644
index 0000000000..31c6a8b454
--- /dev/null
+++ b/test/integration/targets/incidental_synchronize/aliases
@@ -0,0 +1 @@
+shippable/posix/incidental
diff --git a/test/integration/targets/incidental_synchronize/files/bar.txt b/test/integration/targets/incidental_synchronize/files/bar.txt
new file mode 100644
index 0000000000..3e96db9b3e
--- /dev/null
+++ b/test/integration/targets/incidental_synchronize/files/bar.txt
@@ -0,0 +1 @@
+templated_var_loaded
diff --git a/test/integration/targets/incidental_synchronize/files/foo.txt b/test/integration/targets/incidental_synchronize/files/foo.txt
new file mode 100644
index 0000000000..3e96db9b3e
--- /dev/null
+++ b/test/integration/targets/incidental_synchronize/files/foo.txt
@@ -0,0 +1 @@
+templated_var_loaded
diff --git a/test/integration/targets/incidental_synchronize/tasks/main.yml b/test/integration/targets/incidental_synchronize/tasks/main.yml
new file mode 100644
index 0000000000..80e052a6a3
--- /dev/null
+++ b/test/integration/targets/incidental_synchronize/tasks/main.yml
@@ -0,0 +1,273 @@
+# test code for the synchronize module
+# (c) 2014, James Tanner <tanner.jc@gmail.com>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+- name: install rsync
+ package:
+ name: rsync
+ when: ansible_distribution != "MacOSX"
+
+- name: cleanup old files
+ shell: rm -rf {{output_dir}}/*
+
+- name: create test new files
+ copy: dest={{output_dir}}/{{item}} mode=0644 content="hello world"
+ with_items:
+ - foo.txt
+ - bar.txt
+
+- name: synchronize file to new filename
+ synchronize: src={{output_dir}}/foo.txt dest={{output_dir}}/foo.result
+ register: sync_result
+
+- assert:
+ that:
+ - "'changed' in sync_result"
+ - "sync_result.changed == true"
+ - "'cmd' in sync_result"
+ - "'rsync' in sync_result.cmd"
+ - "'msg' in sync_result"
+ - "sync_result.msg.startswith('>f+')"
+ - "sync_result.msg.endswith('+ foo.txt\n')"
+
+- name: test that the file was really copied over
+ stat:
+ path: "{{ output_dir }}/foo.result"
+ register: stat_result
+
+- assert:
+ that:
+ - "stat_result.stat.exists == True"
+ - "stat_result.stat.checksum == '2aae6c35c94fcfb415dbe95f408b9ce91ee846ed'"
+
+- name: test that the file is not copied a second time
+ synchronize: src={{output_dir}}/foo.txt dest={{output_dir}}/foo.result
+ register: sync_result
+
+- assert:
+ that:
+ - "sync_result.changed == False"
+
+- name: Cleanup
+ file:
+ state: absent
+ path: "{{output_dir}}/{{item}}"
+ with_items:
+ - foo.result
+ - bar.result
+
+- name: Synchronize using the mode=push param
+ synchronize:
+ src: "{{output_dir}}/foo.txt"
+ dest: "{{output_dir}}/foo.result"
+ mode: push
+ register: sync_result
+
+- assert:
+ that:
+ - "'changed' in sync_result"
+ - "sync_result.changed == true"
+ - "'cmd' in sync_result"
+ - "'rsync' in sync_result.cmd"
+ - "'msg' in sync_result"
+ - "sync_result.msg.startswith('>f+')"
+ - "sync_result.msg.endswith('+ foo.txt\n')"
+
+- name: test that the file was really copied over
+ stat:
+ path: "{{ output_dir }}/foo.result"
+ register: stat_result
+
+- assert:
+ that:
+ - "stat_result.stat.exists == True"
+ - "stat_result.stat.checksum == '2aae6c35c94fcfb415dbe95f408b9ce91ee846ed'"
+
+- name: test that the file is not copied a second time
+ synchronize:
+ src: "{{output_dir}}/foo.txt"
+ dest: "{{output_dir}}/foo.result"
+ mode: push
+ register: sync_result
+
+- assert:
+ that:
+ - "sync_result.changed == False"
+
+- name: Cleanup
+ file:
+ state: absent
+ path: "{{output_dir}}/{{item}}"
+ with_items:
+ - foo.result
+ - bar.result
+
+- name: Synchronize using the mode=pull param
+ synchronize:
+ src: "{{output_dir}}/foo.txt"
+ dest: "{{output_dir}}/foo.result"
+ mode: pull
+ register: sync_result
+
+- assert:
+ that:
+ - "'changed' in sync_result"
+ - "sync_result.changed == true"
+ - "'cmd' in sync_result"
+ - "'rsync' in sync_result.cmd"
+ - "'msg' in sync_result"
+ - "sync_result.msg.startswith('>f+')"
+ - "sync_result.msg.endswith('+ foo.txt\n')"
+
+- name: test that the file was really copied over
+ stat:
+ path: "{{ output_dir }}/foo.result"
+ register: stat_result
+
+- assert:
+ that:
+ - "stat_result.stat.exists == True"
+ - "stat_result.stat.checksum == '2aae6c35c94fcfb415dbe95f408b9ce91ee846ed'"
+
+- name: test that the file is not copied a second time
+ synchronize:
+ src: "{{output_dir}}/foo.txt"
+ dest: "{{output_dir}}/foo.result"
+ mode: pull
+ register: sync_result
+
+- assert:
+ that:
+ - "sync_result.changed == False"
+
+- name: Cleanup
+ file:
+ state: absent
+ path: "{{output_dir}}/{{item}}"
+ with_items:
+ - foo.result
+ - bar.result
+
+- name: synchronize files using with_items (issue#5965)
+ synchronize: src={{output_dir}}/{{item}} dest={{output_dir}}/{{item}}.result
+ with_items:
+ - foo.txt
+ - bar.txt
+ register: sync_result
+
+- assert:
+ that:
+ - "sync_result.changed"
+ - "sync_result.msg == 'All items completed'"
+ - "'results' in sync_result"
+ - "sync_result.results|length == 2"
+ - "sync_result.results[0].msg.endswith('+ foo.txt\n')"
+ - "sync_result.results[1].msg.endswith('+ bar.txt\n')"
+
+- name: Cleanup
+ file:
+ state: absent
+ path: "{{output_dir}}/{{item}}.result"
+ with_items:
+ - foo.txt
+ - bar.txt
+
+- name: synchronize files using rsync_path (issue#7182)
+ synchronize: src={{output_dir}}/foo.txt dest={{output_dir}}/foo.rsync_path rsync_path="sudo rsync"
+ register: sync_result
+
+- assert:
+ that:
+ - "'changed' in sync_result"
+ - "sync_result.changed == true"
+ - "'cmd' in sync_result"
+ - "'rsync' in sync_result.cmd"
+ - "'rsync_path' in sync_result.cmd"
+ - "'msg' in sync_result"
+ - "sync_result.msg.startswith('>f+')"
+ - "sync_result.msg.endswith('+ foo.txt\n')"
+
+- name: Cleanup
+ file:
+ state: absent
+ path: "{{output_dir}}/{{item}}"
+ with_items:
+ - foo.rsync_path
+
+- name: add subdirectories for link-dest test
+ file:
+ path: "{{output_dir}}/{{item}}/"
+ state: directory
+ mode: 0755
+ with_items:
+ - directory_a
+ - directory_b
+
+- name: copy foo.txt into the first directory
+ synchronize:
+ src: "{{output_dir}}/foo.txt"
+ dest: "{{output_dir}}/{{item}}/foo.txt"
+ with_items:
+ - directory_a
+
+- name: synchronize files using link_dest
+ synchronize:
+ src: "{{output_dir}}/directory_a/foo.txt"
+ dest: "{{output_dir}}/directory_b/foo.txt"
+ link_dest:
+ - "{{output_dir}}/directory_a"
+ register: sync_result
+
+- name: get stat information for directory_a
+ stat:
+ path: "{{ output_dir }}/directory_a/foo.txt"
+ register: stat_result_a
+
+- name: get stat information for directory_b
+ stat:
+ path: "{{ output_dir }}/directory_b/foo.txt"
+ register: stat_result_b
+
+- assert:
+ that:
+ - "'changed' in sync_result"
+ - "sync_result.changed == true"
+ - "stat_result_a.stat.inode == stat_result_b.stat.inode"
+
+- name: synchronize files using link_dest that would be recursive
+ synchronize:
+ src: "{{output_dir}}/foo.txt"
+ dest: "{{output_dir}}/foo.result"
+ link_dest:
+ - "{{output_dir}}"
+ register: sync_result
+ ignore_errors: yes
+
+- assert:
+ that:
+ - sync_result is not changed
+ - sync_result is failed
+
+- name: Cleanup
+ file:
+ state: absent
+ path: "{{output_dir}}/{{item}}"
+ with_items:
+ - "directory_b/foo.txt"
+ - "directory_a/foo.txt"
+ - "directory_a"
+ - "directory_b"
diff --git a/test/integration/targets/incidental_timezone/aliases b/test/integration/targets/incidental_timezone/aliases
new file mode 100644
index 0000000000..78b47900ab
--- /dev/null
+++ b/test/integration/targets/incidental_timezone/aliases
@@ -0,0 +1,4 @@
+destructive
+shippable/posix/incidental
+skip/aix
+skip/osx
diff --git a/test/integration/targets/incidental_timezone/tasks/main.yml b/test/integration/targets/incidental_timezone/tasks/main.yml
new file mode 100644
index 0000000000..91858918a2
--- /dev/null
+++ b/test/integration/targets/incidental_timezone/tasks/main.yml
@@ -0,0 +1,68 @@
+# Because hwclock usually isn't available inside Docker containers in Shippable
+# these tasks will detect if hwclock works and only run hwclock tests if it is
+# supported. That is why it is recommended to run these tests locally with
+# `--docker-privileged` on centos6, centos7 and ubuntu1404 images. Example
+# command to run on centos6:
+#
+# ansible-test integration --docker centos6 --docker-privileged -v timezone
+
+##
+## set path to timezone config files
+##
+
+- name: set config file path on Debian
+ set_fact:
+ timezone_config_file: '/etc/timezone'
+ when: ansible_os_family == 'Debian'
+
+- name: set config file path on RedHat
+ set_fact:
+ timezone_config_file: '/etc/sysconfig/clock'
+ when: ansible_os_family == 'RedHat'
+
+##
+## set path to hwclock config files
+##
+
+- name: set config file path on Debian
+ set_fact:
+ hwclock_config_file: '/etc/default/rcS'
+ when: ansible_os_family == 'Debian'
+
+- name: set config file path on RedHat
+ set_fact:
+ hwclock_config_file: '/etc/sysconfig/clock'
+ when: ansible_os_family == 'RedHat'
+
+####
+#### timezone tests
+####
+
+- name: make sure the dbus service is started under systemd
+ systemd:
+ name: dbus
+ state: started
+ when:
+ - ansible_service_mgr == 'systemd'
+ - ansible_distribution == 'Fedora'
+ - ansible_facts.distribution_major_version is version('31', '<')
+
+
+- name: Run tests
+ # Skip tests on Fedora 31 because dbus fails to start unless the container is run in priveleged mode.
+ # Even then, it starts unreliably. This may be due to the move to cgroup v2 in Fedora 31.
+ # https://www.redhat.com/sysadmin/fedora-31-control-group-v2
+ when: ansible_facts.distribution ~ ansible_facts.distribution_major_version != 'Fedora31'
+ block:
+ - name: set timezone to Etc/UTC
+ timezone:
+ name: Etc/UTC
+ register: original_timezone
+
+ - block:
+ - include_tasks: test.yml
+ always:
+ - name: Restore original system timezone - {{ original_timezone.diff.before.name }}
+ timezone:
+ name: "{{ original_timezone.diff.before.name }}"
+ when: original_timezone is changed
diff --git a/test/integration/targets/incidental_timezone/tasks/test.yml b/test/integration/targets/incidental_timezone/tasks/test.yml
new file mode 100644
index 0000000000..ec0d854df0
--- /dev/null
+++ b/test/integration/targets/incidental_timezone/tasks/test.yml
@@ -0,0 +1,607 @@
+##
+## test setting timezone, idempotency and checkmode
+##
+
+- name: set timezone to Australia/Brisbane (checkmode)
+ timezone:
+ name: Australia/Brisbane
+ check_mode: yes
+ register: timezone_set_checkmode
+
+- name: ensure timezone reported as changed in checkmode
+ assert:
+ that:
+ - timezone_set_checkmode.changed
+ - timezone_set_checkmode.diff.after.name == 'Australia/Brisbane'
+ - timezone_set_checkmode.diff.before.name == 'Etc/UTC'
+
+- name: ensure checkmode didn't change the timezone
+ command: cmp /etc/localtime /usr/share/zoneinfo/Australia/Brisbane
+ register: result
+ failed_when: result is not failed
+ changed_when: no
+
+- name: ensure that checkmode didn't update the timezone in the config file
+ command: egrep '^(TIME)?ZONE="Etc/UTC"' {{ timezone_config_file }}
+ when:
+ - ansible_service_mgr != 'systemd'
+ - ansible_os_family == 'RedHat'
+
+- name: ensure that checkmode didn't update the timezone in the config file
+ command: egrep '^Etc/UTC' {{ timezone_config_file }}
+ when:
+ - ansible_service_mgr != 'systemd'
+ - ansible_os_family == 'Debian'
+
+- name: set timezone to Australia/Brisbane
+ timezone:
+ name: Australia/Brisbane
+ register: timezone_set
+
+- name: ensure timezone changed
+ assert:
+ that:
+ - timezone_set.changed
+ - timezone_set.diff.after.name == 'Australia/Brisbane'
+ - timezone_set.diff.before.name == 'Etc/UTC'
+
+- name: ensure that the timezone is actually set
+ command: cmp /etc/localtime /usr/share/zoneinfo/Australia/Brisbane
+ changed_when: no
+
+- name: ensure that the timezone is updated in the config file
+ command: egrep '^(TIME)?ZONE="Australia/Brisbane"' {{ timezone_config_file }}
+ when:
+ - ansible_service_mgr != 'systemd'
+ - ansible_os_family == 'RedHat'
+
+- name: ensure that the timezone is updated in the config file
+ command: egrep '^Australia/Brisbane' {{ timezone_config_file }}
+ when:
+ - ansible_service_mgr != 'systemd'
+ - ansible_os_family == 'Debian'
+
+- name: set timezone to Australia/Brisbane again
+ timezone:
+ name: Australia/Brisbane
+ register: timezone_again
+
+- name: ensure timezone idempotency
+ assert:
+ that:
+ - not timezone_again.changed
+
+- name: set timezone to Australia/Brisbane again in checkmode
+ timezone:
+ name: Australia/Brisbane
+ register: timezone_again_checkmode
+
+- name: set timezone idempotency (checkmode)
+ assert:
+ that:
+ - not timezone_again_checkmode.changed
+
+##
+## tests for same timezones with different names
+##
+
+- name: check dpkg-reconfigure
+ shell: type dpkg-reconfigure
+ register: check_dpkg_reconfigure
+ ignore_errors: yes
+ changed_when: no
+
+- name: check timedatectl
+ shell: type timedatectl && timedatectl
+ register: check_timedatectl
+ ignore_errors: yes
+ changed_when: no
+
+- block:
+ - name: set timezone to Etc/UTC
+ timezone:
+ name: Etc/UTC
+
+ - name: change timezone from Etc/UTC to UTC
+ timezone:
+ name: UTC
+ register: timezone_etcutc_to_utc
+
+ - name: check timezone changed from Etc/UTC to UTC
+ assert:
+ that:
+ - timezone_etcutc_to_utc.changed
+ - timezone_etcutc_to_utc.diff.before.name == 'Etc/UTC'
+ - timezone_etcutc_to_utc.diff.after.name == 'UTC'
+
+ - name: change timezone from UTC to Etc/UTC
+ timezone:
+ name: Etc/UTC
+ register: timezone_utc_to_etcutc
+
+ - name: check timezone changed from UTC to Etc/UTC
+ assert:
+ that:
+ - timezone_utc_to_etcutc.changed
+ - timezone_utc_to_etcutc.diff.before.name == 'UTC'
+ - timezone_utc_to_etcutc.diff.after.name == 'Etc/UTC'
+
+ when:
+ # FIXME: Due to the bug of the dpkg-reconfigure, those tests failed on non-systemd debian
+ - check_dpkg_reconfigure.rc != 0 or check_timedatectl.rc == 0
+
+##
+## no systemd tests for timezone
+##
+
+- block:
+ ##
+ ## test with empty config file
+ ##
+
+ - name: empty config file
+ command: cp /dev/null {{ timezone_config_file }}
+
+ - name: set timezone to Europe/Belgrade (empty config file)
+ timezone:
+ name: Europe/Belgrade
+ register: timezone_empty_conf
+
+ - name: check if timezone set (empty config file)
+ assert:
+ that:
+ - timezone_empty_conf.changed
+ - timezone_empty_conf.diff.after.name == 'Europe/Belgrade'
+ - timezone_empty_conf.diff.before.name == 'n/a'
+
+ - name: check if the timezone is actually set (empty config file)
+ command: cmp /etc/localtime /usr/share/zoneinfo/Europe/Belgrade
+ changed_when: no
+
+
+ ##
+ ## test with deleted config file
+ ##
+
+ - name: remove config file
+ file:
+ path: '{{ timezone_config_file }}'
+ state: absent
+
+ - name: set timezone to Europe/Belgrade (no config file)
+ timezone:
+ name: Europe/Belgrade
+ register: timezone_missing_conf
+
+ - name: check if timezone set (no config file)
+ assert:
+ that:
+ - timezone_missing_conf.changed
+ - timezone_missing_conf.diff.after.name == 'Europe/Belgrade'
+ - timezone_missing_conf.diff.before.name == 'n/a'
+
+ - name: check if the timezone is actually set (no config file)
+ command: cmp /etc/localtime /usr/share/zoneinfo/Europe/Belgrade
+ changed_when: no
+
+
+ ##
+ ## test with /etc/localtime as symbolic link to a zoneinfo file
+ ##
+
+ - name: create symlink /etc/locatime -> /usr/share/zoneinfo/Etc/UTC
+ file:
+ src: /usr/share/zoneinfo/Etc/UTC
+ dest: /etc/localtime
+ state: link
+ force: yes
+
+ - name: set timezone to Europe/Belgrade (over symlink)
+ timezone:
+ name: Europe/Belgrade
+ register: timezone_symllink
+
+ - name: check if timezone set (over symlink)
+ assert:
+ that:
+ - timezone_symllink.changed
+ - timezone_symllink.diff.after.name == 'Europe/Belgrade'
+ - timezone_symllink.diff.before.name == 'Etc/UTC'
+
+ - name: check if the timezone is actually set (over symlink)
+ command: cmp /etc/localtime /usr/share/zoneinfo/Europe/Belgrade
+ changed_when: no
+
+
+ ##
+ ## test with /etc/localtime as broken symbolic link
+ ##
+
+ - name: set timezone to a broken symlink
+ file:
+ src: /tmp/foo
+ dest: /etc/localtime
+ state: link
+ force: yes
+
+ - name: set timezone to Europe/Belgrade (over broken symlink)
+ timezone:
+ name: Europe/Belgrade
+ register: timezone_symllink_broken
+
+ - name: check if timezone set (over broken symlink)
+ assert:
+ that:
+ - timezone_symllink_broken.changed
+ - timezone_symllink_broken.diff.after.name == 'Europe/Belgrade'
+ - timezone_symllink_broken.diff.before.name == 'n/a'
+
+ - name: check if the timezone is actually set (over broken symlink)
+ command: cmp /etc/localtime /usr/share/zoneinfo/Europe/Belgrade
+ changed_when: no
+
+
+ ##
+ ## test with /etc/localtime set manually using copy
+ ##
+
+ - name: set timezone manually by coping zone info file to /etc/localtime
+ copy:
+ src: /usr/share/zoneinfo/Etc/UTC
+ dest: /etc/localtime
+ remote_src: yes
+
+ - name: set timezone to Europe/Belgrade (over copied file)
+ timezone:
+ name: Europe/Belgrade
+ register: timezone_copied
+
+ - name: check if timezone set (over copied file)
+ assert:
+ that:
+ - timezone_copied.changed
+ - timezone_copied.diff.after.name == 'Europe/Belgrade'
+ - timezone_copied.diff.before.name == 'n/a'
+
+ - name: check if the timezone is actually set (over copied file)
+ command: cmp /etc/localtime /usr/share/zoneinfo/Europe/Belgrade
+ changed_when: no
+ when:
+ - ansible_service_mgr != 'systemd'
+ - timezone_config_file is defined
+
+
+####
+#### hwclock tests
+####
+
+- name: check if hwclock is supported in the environment
+ command: hwclock --test
+ register: hwclock_test
+ ignore_errors: yes
+
+- name: check if timedatectl works in the environment
+ command: timedatectl
+ register: timedatectl_test
+ ignore_errors: yes
+
+- name:
+ set_fact:
+ hwclock_supported: '{{ hwclock_test is successful or timedatectl_test is successful }}'
+##
+## test set hwclock, idempotency and checkmode
+##
+
+- block:
+ - name: set hwclock to local
+ timezone:
+ hwclock: local
+
+ - name: set hwclock to UTC (checkmode)
+ timezone:
+ hwclock: UTC
+ check_mode: yes
+ register: hwclock_set_checkmode
+
+ - name: ensure hwclock reported as changed (checkmode)
+ assert:
+ that:
+ - hwclock_set_checkmode.changed
+ - hwclock_set_checkmode.diff.after.hwclock == 'UTC'
+ - hwclock_set_checkmode.diff.before.hwclock == 'local'
+
+ - block:
+ - name: ensure that checkmode didn't update hwclock in /etc/adjtime
+ command: grep ^UTC /etc/adjtime
+ register: result
+ failed_when: result is not failed
+
+ - name: ensure that checkmode didn't update hwclock the config file
+ command: grep ^UTC=no {{ hwclock_config_file }}
+ when: ansible_service_mgr != 'systemd'
+
+ - name: set hwclock to UTC
+ timezone:
+ hwclock: UTC
+ register: hwclock_set
+
+ - name: ensure hwclock changed
+ assert:
+ that:
+ - hwclock_set.changed
+ - hwclock_set.diff.after.hwclock == 'UTC'
+ - hwclock_set.diff.before.hwclock == 'local'
+
+ - block:
+ - name: ensure that hwclock is updated in /etc/adjtime
+ command: grep ^UTC /etc/adjtime
+
+ - name: ensure that hwclock is updated in the config file
+ command: grep ^UTC=yes {{ hwclock_config_file }}
+ when: ansible_service_mgr != 'systemd'
+
+ - name: set hwclock to RTC again
+ timezone:
+ hwclock: UTC
+ register: hwclock_again
+
+ - name: set hwclock idempotency
+ assert:
+ that:
+ - not hwclock_again.changed
+
+ - name: set hwclock to RTC again (checkmode)
+ timezone:
+ hwclock: UTC
+ check_mode: yes
+ register: hwclock_again_checkmode
+
+ - name: set hwclock idempotency (checkmode)
+ assert:
+ that:
+ - not hwclock_again_checkmode.changed
+
+
+ ##
+ ## no systemd tests for hwclock
+ ##
+
+ - block:
+ ##
+ ## test set hwclock with both /etc/adjtime and conf file deleted
+ ##
+
+ - name: remove /etc/adjtime and conf file
+ file:
+ path: '{{ item }}'
+ state: absent
+ with_items:
+ - /etc/adjtime
+ - '{{ hwclock_config_file }}'
+
+ - name: set hwclock to UTC with deleted /etc/adjtime and conf file
+ timezone:
+ hwclock: UTC
+ register: hwclock_set_utc_deleted_adjtime_and_conf
+
+ - name: ensure hwclock changed with deleted /etc/adjtime and conf
+ assert:
+ that:
+ - hwclock_set_utc_deleted_adjtime_and_conf.changed
+ - hwclock_set_utc_deleted_adjtime_and_conf.diff.after.hwclock == 'UTC'
+ - hwclock_set_utc_deleted_adjtime_and_conf.diff.before.hwclock == 'n/a'
+
+
+ ##
+ ## test set hwclock with /etc/adjtime deleted
+ ##
+
+ - name: remove /etc/adjtime
+ file:
+ path: '{{ item }}'
+ state: absent
+ with_items:
+ - /etc/adjtime
+
+ - name: set hwclock to UTC with deleted /etc/adjtime
+ timezone:
+ hwclock: UTC
+ register: hwclock_set_utc_deleted_adjtime_utc
+
+ - name: ensure hwclock changed with deleted /etc/adjtime
+ assert:
+ that:
+ - not hwclock_set_utc_deleted_adjtime_utc.changed
+ - hwclock_set_utc_deleted_adjtime_utc.diff.after.hwclock == 'UTC'
+ - hwclock_set_utc_deleted_adjtime_utc.diff.before.hwclock == 'UTC'
+
+ - name: set hwclock to LOCAL with deleted /etc/adjtime
+ timezone:
+ hwclock: local
+ register: hwclock_set_local_deleted_adjtime_local
+
+ - name: ensure hwclock changed to LOCAL with deleted /etc/adjtime
+ assert:
+ that:
+ - hwclock_set_local_deleted_adjtime_local.changed
+ - hwclock_set_local_deleted_adjtime_local.diff.after.hwclock == 'local'
+ - hwclock_set_local_deleted_adjtime_local.diff.before.hwclock == 'UTC'
+
+
+ ##
+ ## test set hwclock with conf file deleted
+ ##
+
+ - name: remove conf file
+ file:
+ path: '{{ item }}'
+ state: absent
+ with_items:
+ - '{{ hwclock_config_file }}'
+
+ - name: set hwclock to UTC with deleted conf
+ timezone:
+ hwclock: UTC
+ register: hwclock_set_utc_deleted_conf
+
+ - name: ensure hwclock changed with deleted /etc/adjtime
+ assert:
+ that:
+ - hwclock_set_utc_deleted_conf.changed
+ - hwclock_set_utc_deleted_conf.diff.after.hwclock == 'UTC'
+ - hwclock_set_utc_deleted_conf.diff.before.hwclock == 'n/a'
+
+
+ ##
+ ## test set hwclock with /etc/adjtime missing UTC/LOCAL strings
+ ##
+
+ - name: create /etc/adjtime without UTC/LOCAL
+ copy:
+ content: '0.0 0 0\n0'
+ dest: /etc/adjtime
+
+ - name: set hwclock to UTC with broken /etc/adjtime
+ timezone:
+ hwclock: UTC
+ register: hwclock_set_utc_broken_adjtime
+
+ - name: ensure hwclock doesn't report changed with broken /etc/adjtime
+ assert:
+ that:
+ - not hwclock_set_utc_broken_adjtime.changed
+ - hwclock_set_utc_broken_adjtime.diff.after.hwclock == 'UTC'
+ - hwclock_set_utc_broken_adjtime.diff.before.hwclock == 'UTC'
+
+ - name: set hwclock to LOCAL with broken /etc/adjtime
+ timezone:
+ hwclock: local
+ register: hwclock_set_local_broken_adjtime
+
+ - name: ensure hwclock changed to LOCAL with broken /etc/adjtime
+ assert:
+ that:
+ - hwclock_set_local_broken_adjtime.changed
+ - hwclock_set_local_broken_adjtime.diff.after.hwclock == 'local'
+ - hwclock_set_local_broken_adjtime.diff.before.hwclock == 'UTC'
+ when:
+ - ansible_service_mgr != 'systemd'
+ - hwclock_config_file is defined
+
+ ####
+ #### timezone + hwclock tests
+ ####
+
+ ##
+ ## test set timezone and hwclock, idempotency and checkmode
+ ##
+
+ - name: set timezone to Etc/UTC and hwclock to local
+ timezone:
+ name: Etc/UTC
+ hwclock: local
+
+ - name: set timezone to Europe/Belgrade and hwclock to UTC (checkmode)
+ timezone:
+ name: Europe/Belgrade
+ hwclock: UTC
+ check_mode: yes
+ register: tzclock_set_checkmode
+
+ - name: ensure timezone and hwclock reported as changed in checkmode
+ assert:
+ that:
+ - tzclock_set_checkmode.changed
+ - tzclock_set_checkmode.diff.after.name == 'Europe/Belgrade'
+ - tzclock_set_checkmode.diff.before.name == 'Etc/UTC'
+ - tzclock_set_checkmode.diff.after.hwclock == 'UTC'
+ - tzclock_set_checkmode.diff.before.hwclock == 'local'
+
+ - name: ensure checkmode didn't change the timezone
+ command: cmp /etc/localtime /usr/share/zoneinfo/Australia/Brisbane
+ register: result
+ failed_when: result is not failed
+ changed_when: no
+
+ - block:
+ - name: ensure that checkmode didn't update the timezone in the config file
+ command: egrep '^(TIME)?ZONE="Etc/UTC"' {{ timezone_config_file }}
+ when:
+ - ansible_os_family == 'RedHat'
+
+ - name: ensure that checkmode didn't update the timezone in the config file
+ command: egrep '^Etc/UTC' {{ timezone_config_file }}
+ when:
+ - ansible_os_family == 'Debian'
+
+ - name: ensure that checkmode didn't update hwclock in /etc/adjtime
+ command: grep ^UTC /etc/adjtime
+ register: result
+ failed_when: result is not failed
+
+ - name: ensure that checkmode didn't update hwclock the config file
+ command: grep ^UTC=no {{ hwclock_config_file }}
+ when: ansible_service_mgr != 'systemd'
+
+ - name: set timezone to Europe/Belgrade and hwclock to UTC
+ timezone:
+ name: Europe/Belgrade
+ hwclock: UTC
+ register: tzclock_set
+
+ - name: ensure timezone and hwclock changed
+ assert:
+ that:
+ - tzclock_set.changed
+ - tzclock_set.diff.after.name == 'Europe/Belgrade'
+ - tzclock_set.diff.before.name == 'Etc/UTC'
+ - tzclock_set.diff.after.hwclock == 'UTC'
+ - tzclock_set.diff.before.hwclock == 'local'
+
+ - name: ensure that the timezone is actually set
+ command: cmp /etc/localtime /usr/share/zoneinfo/Europe/Belgrade
+ changed_when: no
+
+ - block:
+ - name: ensure that the timezone is updated in the config file
+ command: egrep '^(TIME)?ZONE="Europe/Belgrade"' {{ timezone_config_file }}
+ when:
+ - ansible_os_family == 'RedHat'
+
+ - name: ensure that the timezone is updated in the config file
+ command: egrep 'Europe/Belgrade' {{ timezone_config_file }}
+ when:
+ - ansible_os_family == 'Debian'
+
+ - name: ensure that hwclock is updated in /etc/adjtime
+ command: grep ^UTC /etc/adjtime
+
+ - name: ensure that hwclock is updated in the config file
+ command: grep ^UTC=yes {{ hwclock_config_file }}
+ when: ansible_service_mgr != 'systemd'
+
+ - name: set timezone to Europe/Belgrade and hwclock to UTC again
+ timezone:
+ name: Europe/Belgrade
+ hwclock: UTC
+ register: tzclock_set_again
+
+ - name: set timezone and hwclock idempotency
+ assert:
+ that:
+ - not tzclock_set_again.changed
+
+ - name: set timezone to Europe/Belgrade and hwclock to UTC again (checkmode)
+ timezone:
+ name: Europe/Belgrade
+ hwclock: UTC
+ register: tzclock_set_again_checkmode
+
+ - name: set timezone and hwclock idempotency in checkmode
+ assert:
+ that:
+ - not tzclock_set_again_checkmode.changed
+
+ when:
+ - ansible_system == 'Linux'
+ - hwclock_supported
diff --git a/test/integration/targets/incidental_ufw/aliases b/test/integration/targets/incidental_ufw/aliases
new file mode 100644
index 0000000000..ff7ad7ce4a
--- /dev/null
+++ b/test/integration/targets/incidental_ufw/aliases
@@ -0,0 +1,11 @@
+shippable/posix/incidental
+skip/aix
+skip/osx
+skip/freebsd
+skip/rhel8.0
+skip/rhel8.0b
+skip/rhel8.1b
+skip/docker
+needs/root
+destructive
+needs/target/setup_epel
diff --git a/test/integration/targets/incidental_ufw/tasks/main.yml b/test/integration/targets/incidental_ufw/tasks/main.yml
new file mode 100644
index 0000000000..28198cd600
--- /dev/null
+++ b/test/integration/targets/incidental_ufw/tasks/main.yml
@@ -0,0 +1,34 @@
+---
+# Make sure ufw is installed
+- name: Install EPEL repository (RHEL only)
+ include_role:
+ name: setup_epel
+ when: ansible_distribution == 'RedHat'
+- name: Install iptables (SuSE only)
+ package:
+ name: iptables
+ become: yes
+ when: ansible_os_family == 'Suse'
+- name: Install ufw
+ become: yes
+ package:
+ name: ufw
+
+# Run the tests
+- block:
+ - include_tasks: run-test.yml
+ with_fileglob:
+ - "tests/*.yml"
+ become: yes
+
+ # Cleanup
+ always:
+ - pause:
+ # ufw creates backups of the rule files with a timestamp; if reset is called
+ # twice in a row fast enough (so that both timestamps are taken in the same second),
+ # the second call will notice that the backup files are already there and fail.
+ # Waiting one second fixes this problem.
+ seconds: 1
+ - name: Reset ufw to factory defaults and disable
+ ufw:
+ state: reset
diff --git a/test/integration/targets/incidental_ufw/tasks/run-test.yml b/test/integration/targets/incidental_ufw/tasks/run-test.yml
new file mode 100644
index 0000000000..e9c5d2929c
--- /dev/null
+++ b/test/integration/targets/incidental_ufw/tasks/run-test.yml
@@ -0,0 +1,21 @@
+---
+- pause:
+ # ufw creates backups of the rule files with a timestamp; if reset is called
+ # twice in a row fast enough (so that both timestamps are taken in the same second),
+ # the second call will notice that the backup files are already there and fail.
+ # Waiting one second fixes this problem.
+ seconds: 1
+- name: Reset ufw to factory defaults
+ ufw:
+ state: reset
+- name: Disable ufw
+ ufw:
+ # Some versions of ufw have a bug which won't disable on reset.
+ # That's why we explicitly deactivate here. See
+ # https://bugs.launchpad.net/ufw/+bug/1810082
+ state: disabled
+- name: "Loading tasks from {{ item }}"
+ include_tasks: "{{ item }}"
+- name: Reset to factory defaults
+ ufw:
+ state: reset
diff --git a/test/integration/targets/incidental_ufw/tasks/tests/basic.yml b/test/integration/targets/incidental_ufw/tasks/tests/basic.yml
new file mode 100644
index 0000000000..3c625112f3
--- /dev/null
+++ b/test/integration/targets/incidental_ufw/tasks/tests/basic.yml
@@ -0,0 +1,402 @@
+---
+# ############################################
+- name: Make sure it is off
+ ufw:
+ state: disabled
+- name: Enable (check mode)
+ ufw:
+ state: enabled
+ check_mode: yes
+ register: enable_check
+- name: Enable
+ ufw:
+ state: enabled
+ register: enable
+- name: Enable (idempotency)
+ ufw:
+ state: enabled
+ register: enable_idem
+- name: Enable (idempotency, check mode)
+ ufw:
+ state: enabled
+ check_mode: yes
+ register: enable_idem_check
+- assert:
+ that:
+ - enable_check is changed
+ - enable is changed
+ - enable_idem is not changed
+ - enable_idem_check is not changed
+
+# ############################################
+- name: ipv4 allow (check mode)
+ ufw:
+ rule: allow
+ port: 23
+ to_ip: 0.0.0.0
+ check_mode: yes
+ register: ipv4_allow_check
+- name: ipv4 allow
+ ufw:
+ rule: allow
+ port: 23
+ to_ip: 0.0.0.0
+ register: ipv4_allow
+- name: ipv4 allow (idempotency)
+ ufw:
+ rule: allow
+ port: 23
+ to_ip: 0.0.0.0
+ register: ipv4_allow_idem
+- name: ipv4 allow (idempotency, check mode)
+ ufw:
+ rule: allow
+ port: 23
+ to_ip: 0.0.0.0
+ check_mode: yes
+ register: ipv4_allow_idem_check
+- assert:
+ that:
+ - ipv4_allow_check is changed
+ - ipv4_allow is changed
+ - ipv4_allow_idem is not changed
+ - ipv4_allow_idem_check is not changed
+
+# ############################################
+- name: delete ipv4 allow (check mode)
+ ufw:
+ rule: allow
+ port: 23
+ to_ip: 0.0.0.0
+ delete: yes
+ check_mode: yes
+ register: delete_ipv4_allow_check
+- name: delete ipv4 allow
+ ufw:
+ rule: allow
+ port: 23
+ to_ip: 0.0.0.0
+ delete: yes
+ register: delete_ipv4_allow
+- name: delete ipv4 allow (idempotency)
+ ufw:
+ rule: allow
+ port: 23
+ to_ip: 0.0.0.0
+ delete: yes
+ register: delete_ipv4_allow_idem
+- name: delete ipv4 allow (idempotency, check mode)
+ ufw:
+ rule: allow
+ port: 23
+ to_ip: 0.0.0.0
+ delete: yes
+ check_mode: yes
+ register: delete_ipv4_allow_idem_check
+- assert:
+ that:
+ - delete_ipv4_allow_check is changed
+ - delete_ipv4_allow is changed
+ - delete_ipv4_allow_idem is not changed
+ - delete_ipv4_allow_idem_check is not changed
+
+# ############################################
+- name: ipv6 allow (check mode)
+ ufw:
+ rule: allow
+ port: 23
+ to_ip: "::"
+ check_mode: yes
+ register: ipv6_allow_check
+- name: ipv6 allow
+ ufw:
+ rule: allow
+ port: 23
+ to_ip: "::"
+ register: ipv6_allow
+- name: ipv6 allow (idempotency)
+ ufw:
+ rule: allow
+ port: 23
+ to_ip: "::"
+ register: ipv6_allow_idem
+- name: ipv6 allow (idempotency, check mode)
+ ufw:
+ rule: allow
+ port: 23
+ to_ip: "::"
+ check_mode: yes
+ register: ipv6_allow_idem_check
+- assert:
+ that:
+ - ipv6_allow_check is changed
+ - ipv6_allow is changed
+ - ipv6_allow_idem is not changed
+ - ipv6_allow_idem_check is not changed
+
+# ############################################
+- name: delete ipv6 allow (check mode)
+ ufw:
+ rule: allow
+ port: 23
+ to_ip: "::"
+ delete: yes
+ check_mode: yes
+ register: delete_ipv6_allow_check
+- name: delete ipv6 allow
+ ufw:
+ rule: allow
+ port: 23
+ to_ip: "::"
+ delete: yes
+ register: delete_ipv6_allow
+- name: delete ipv6 allow (idempotency)
+ ufw:
+ rule: allow
+ port: 23
+ to_ip: "::"
+ delete: yes
+ register: delete_ipv6_allow_idem
+- name: delete ipv6 allow (idempotency, check mode)
+ ufw:
+ rule: allow
+ port: 23
+ to_ip: "::"
+ delete: yes
+ check_mode: yes
+ register: delete_ipv6_allow_idem_check
+- assert:
+ that:
+ - delete_ipv6_allow_check is changed
+ - delete_ipv6_allow is changed
+ - delete_ipv6_allow_idem is not changed
+ - delete_ipv6_allow_idem_check is not changed
+
+
+# ############################################
+- name: ipv4 allow (check mode)
+ ufw:
+ rule: allow
+ port: 23
+ to_ip: 0.0.0.0
+ check_mode: yes
+ register: ipv4_allow_check
+- name: ipv4 allow
+ ufw:
+ rule: allow
+ port: 23
+ to_ip: 0.0.0.0
+ register: ipv4_allow
+- name: ipv4 allow (idempotency)
+ ufw:
+ rule: allow
+ port: 23
+ to_ip: 0.0.0.0
+ register: ipv4_allow_idem
+- name: ipv4 allow (idempotency, check mode)
+ ufw:
+ rule: allow
+ port: 23
+ to_ip: 0.0.0.0
+ check_mode: yes
+ register: ipv4_allow_idem_check
+- assert:
+ that:
+ - ipv4_allow_check is changed
+ - ipv4_allow is changed
+ - ipv4_allow_idem is not changed
+ - ipv4_allow_idem_check is not changed
+
+# ############################################
+- name: delete ipv4 allow (check mode)
+ ufw:
+ rule: allow
+ port: 23
+ to_ip: 0.0.0.0
+ delete: yes
+ check_mode: yes
+ register: delete_ipv4_allow_check
+- name: delete ipv4 allow
+ ufw:
+ rule: allow
+ port: 23
+ to_ip: 0.0.0.0
+ delete: yes
+ register: delete_ipv4_allow
+- name: delete ipv4 allow (idempotency)
+ ufw:
+ rule: allow
+ port: 23
+ to_ip: 0.0.0.0
+ delete: yes
+ register: delete_ipv4_allow_idem
+- name: delete ipv4 allow (idempotency, check mode)
+ ufw:
+ rule: allow
+ port: 23
+ to_ip: 0.0.0.0
+ delete: yes
+ check_mode: yes
+ register: delete_ipv4_allow_idem_check
+- assert:
+ that:
+ - delete_ipv4_allow_check is changed
+ - delete_ipv4_allow is changed
+ - delete_ipv4_allow_idem is not changed
+ - delete_ipv4_allow_idem_check is not changed
+
+# ############################################
+- name: ipv6 allow (check mode)
+ ufw:
+ rule: allow
+ port: 23
+ to_ip: "::"
+ check_mode: yes
+ register: ipv6_allow_check
+- name: ipv6 allow
+ ufw:
+ rule: allow
+ port: 23
+ to_ip: "::"
+ register: ipv6_allow
+- name: ipv6 allow (idempotency)
+ ufw:
+ rule: allow
+ port: 23
+ to_ip: "::"
+ register: ipv6_allow_idem
+- name: ipv6 allow (idempotency, check mode)
+ ufw:
+ rule: allow
+ port: 23
+ to_ip: "::"
+ check_mode: yes
+ register: ipv6_allow_idem_check
+- assert:
+ that:
+ - ipv6_allow_check is changed
+ - ipv6_allow is changed
+ - ipv6_allow_idem is not changed
+ - ipv6_allow_idem_check is not changed
+
+# ############################################
+- name: delete ipv6 allow (check mode)
+ ufw:
+ rule: allow
+ port: 23
+ to_ip: "::"
+ delete: yes
+ check_mode: yes
+ register: delete_ipv6_allow_check
+- name: delete ipv6 allow
+ ufw:
+ rule: allow
+ port: 23
+ to_ip: "::"
+ delete: yes
+ register: delete_ipv6_allow
+- name: delete ipv6 allow (idempotency)
+ ufw:
+ rule: allow
+ port: 23
+ to_ip: "::"
+ delete: yes
+ register: delete_ipv6_allow_idem
+- name: delete ipv6 allow (idempotency, check mode)
+ ufw:
+ rule: allow
+ port: 23
+ to_ip: "::"
+ delete: yes
+ check_mode: yes
+ register: delete_ipv6_allow_idem_check
+- assert:
+ that:
+ - delete_ipv6_allow_check is changed
+ - delete_ipv6_allow is changed
+ - delete_ipv6_allow_idem is not changed
+ - delete_ipv6_allow_idem_check is not changed
+
+# ############################################
+- name: Reload ufw
+ ufw:
+ state: reloaded
+ register: reload
+- name: Reload ufw (check mode)
+ ufw:
+ state: reloaded
+ check_mode: yes
+ register: reload_check
+- assert:
+ that:
+ - reload is changed
+ - reload_check is changed
+
+# ############################################
+- name: Disable (check mode)
+ ufw:
+ state: disabled
+ check_mode: yes
+ register: disable_check
+- name: Disable
+ ufw:
+ state: disabled
+ register: disable
+- name: Disable (idempotency)
+ ufw:
+ state: disabled
+ register: disable_idem
+- name: Disable (idempotency, check mode)
+ ufw:
+ state: disabled
+ check_mode: yes
+ register: disable_idem_check
+- assert:
+ that:
+ - disable_check is changed
+ - disable is changed
+ - disable_idem is not changed
+ - disable_idem_check is not changed
+
+# ############################################
+- name: Re-enable
+ ufw:
+ state: enabled
+- name: Reset (check mode)
+ ufw:
+ state: reset
+ check_mode: yes
+ register: reset_check
+- pause:
+ # Should not be needed, but since ufw is ignoring --dry-run for reset
+ # (https://bugs.launchpad.net/ufw/+bug/1810082) we have to wait here as well.
+ seconds: 1
+- name: Reset
+ ufw:
+ state: reset
+ register: reset
+- pause:
+ # ufw creates backups of the rule files with a timestamp; if reset is called
+ # twice in a row fast enough (so that both timestamps are taken in the same second),
+ # the second call will notice that the backup files are already there and fail.
+ # Waiting one second fixes this problem.
+ seconds: 1
+- name: Reset (idempotency)
+ ufw:
+ state: reset
+ register: reset_idem
+- pause:
+ # Should not be needed, but since ufw is ignoring --dry-run for reset
+ # (https://bugs.launchpad.net/ufw/+bug/1810082) we have to wait here as well.
+ seconds: 1
+- name: Reset (idempotency, check mode)
+ ufw:
+ state: reset
+ check_mode: yes
+ register: reset_idem_check
+- assert:
+ that:
+ - reset_check is changed
+ - reset is changed
+ - reset_idem is changed
+ - reset_idem_check is changed
diff --git a/test/integration/targets/incidental_ufw/tasks/tests/global-state.yml b/test/integration/targets/incidental_ufw/tasks/tests/global-state.yml
new file mode 100644
index 0000000000..69b2cde938
--- /dev/null
+++ b/test/integration/targets/incidental_ufw/tasks/tests/global-state.yml
@@ -0,0 +1,150 @@
+---
+- name: Enable ufw
+ ufw:
+ state: enabled
+
+# ############################################
+- name: Make sure logging is off
+ ufw:
+ logging: no
+- name: Logging (check mode)
+ ufw:
+ logging: yes
+ check_mode: yes
+ register: logging_check
+- name: Logging
+ ufw:
+ logging: yes
+ register: logging
+- name: Get logging
+ shell: |
+ ufw status verbose | grep "^Logging:"
+ register: ufw_logging
+ environment:
+ LC_ALL: C
+- name: Logging (idempotency)
+ ufw:
+ logging: yes
+ register: logging_idem
+- name: Logging (idempotency, check mode)
+ ufw:
+ logging: yes
+ check_mode: yes
+ register: logging_idem_check
+- name: Logging (change, check mode)
+ ufw:
+ logging: full
+ check_mode: yes
+ register: logging_change_check
+- name: Logging (change)
+ ufw:
+ logging: full
+ register: logging_change
+- name: Get logging
+ shell: |
+ ufw status verbose | grep "^Logging:"
+ register: ufw_logging_change
+ environment:
+ LC_ALL: C
+- assert:
+ that:
+ - logging_check is changed
+ - logging is changed
+ - "ufw_logging.stdout == 'Logging: on (low)'"
+ - logging_idem is not changed
+ - logging_idem_check is not changed
+ - "ufw_logging_change.stdout == 'Logging: on (full)'"
+ - logging_change is changed
+ - logging_change_check is changed
+
+# ############################################
+- name: Default (check mode)
+ ufw:
+ default: reject
+ direction: incoming
+ check_mode: yes
+ register: default_check
+- name: Default
+ ufw:
+ default: reject
+ direction: incoming
+ register: default
+- name: Get defaults
+ shell: |
+ ufw status verbose | grep "^Default:"
+ register: ufw_defaults
+ environment:
+ LC_ALL: C
+- name: Default (idempotency)
+ ufw:
+ default: reject
+ direction: incoming
+ register: default_idem
+- name: Default (idempotency, check mode)
+ ufw:
+ default: reject
+ direction: incoming
+ check_mode: yes
+ register: default_idem_check
+- name: Default (change, check mode)
+ ufw:
+ default: allow
+ direction: incoming
+ check_mode: yes
+ register: default_change_check
+- name: Default (change)
+ ufw:
+ default: allow
+ direction: incoming
+ register: default_change
+- name: Get defaults
+ shell: |
+ ufw status verbose | grep "^Default:"
+ register: ufw_defaults_change
+ environment:
+ LC_ALL: C
+- name: Default (change again)
+ ufw:
+ default: deny
+ direction: incoming
+ register: default_change_2
+- name: Default (change incoming implicitly, check mode)
+ ufw:
+ default: allow
+ check_mode: yes
+ register: default_change_implicit_check
+- name: Default (change incoming implicitly)
+ ufw:
+ default: allow
+ register: default_change_implicit
+- name: Get defaults
+ shell: |
+ ufw status verbose | grep "^Default:"
+ register: ufw_defaults_change_implicit
+ environment:
+ LC_ALL: C
+- name: Default (change incoming implicitly, idempotent, check mode)
+ ufw:
+ default: allow
+ check_mode: yes
+ register: default_change_implicit_idem_check
+- name: Default (change incoming implicitly, idempotent)
+ ufw:
+ default: allow
+ register: default_change_implicit_idem
+- assert:
+ that:
+ - default_check is changed
+ - default is changed
+ - "'reject (incoming)' in ufw_defaults.stdout"
+ - default_idem is not changed
+ - default_idem_check is not changed
+ - default_change_check is changed
+ - default_change is changed
+ - "'allow (incoming)' in ufw_defaults_change.stdout"
+ - default_change_2 is changed
+ - default_change_implicit_check is changed
+ - default_change_implicit is changed
+ - default_change_implicit_idem_check is not changed
+ - default_change_implicit_idem is not changed
+ - "'allow (incoming)' in ufw_defaults_change_implicit.stdout"
diff --git a/test/integration/targets/incidental_ufw/tasks/tests/insert_relative_to.yml b/test/integration/targets/incidental_ufw/tasks/tests/insert_relative_to.yml
new file mode 100644
index 0000000000..3bb44a0e27
--- /dev/null
+++ b/test/integration/targets/incidental_ufw/tasks/tests/insert_relative_to.yml
@@ -0,0 +1,80 @@
+---
+- name: Enable
+ ufw:
+ state: enabled
+ register: enable
+
+# ## CREATE RULES ############################
+- name: ipv4
+ ufw:
+ rule: deny
+ port: 22
+ to_ip: 0.0.0.0
+- name: ipv4
+ ufw:
+ rule: deny
+ port: 23
+ to_ip: 0.0.0.0
+
+- name: ipv6
+ ufw:
+ rule: deny
+ port: 122
+ to_ip: "::"
+- name: ipv6
+ ufw:
+ rule: deny
+ port: 123
+ to_ip: "::"
+
+- name: first-ipv4
+ ufw:
+ rule: deny
+ port: 10
+ to_ip: 0.0.0.0
+ insert: 0
+ insert_relative_to: first-ipv4
+- name: last-ipv4
+ ufw:
+ rule: deny
+ port: 11
+ to_ip: 0.0.0.0
+ insert: 0
+ insert_relative_to: last-ipv4
+
+- name: first-ipv6
+ ufw:
+ rule: deny
+ port: 110
+ to_ip: "::"
+ insert: 0
+ insert_relative_to: first-ipv6
+- name: last-ipv6
+ ufw:
+ rule: deny
+ port: 111
+ to_ip: "::"
+ insert: 0
+ insert_relative_to: last-ipv6
+
+# ## CHECK RESULT ############################
+- name: Get rules
+ shell: |
+ ufw status | grep DENY | cut -f 1-2 -d ' ' | grep -E "^(0\.0\.0\.0|::) [123]+"
+ # Note that there was also a rule "ff02::fb mDNS" on at least one CI run;
+ # to ignore these, the extra filtering (grepping for DENY and the regex) makes
+ # sure to remove all rules not added here.
+ register: ufw_status
+- assert:
+ that:
+ - ufw_status.stdout_lines == expected_stdout
+ vars:
+ expected_stdout:
+ - "0.0.0.0 10"
+ - "0.0.0.0 22"
+ - "0.0.0.0 11"
+ - "0.0.0.0 23"
+ - ":: 110"
+ - ":: 122"
+ - ":: 111"
+ - ":: 123"
diff --git a/test/integration/targets/incidental_ufw/tasks/tests/interface.yml b/test/integration/targets/incidental_ufw/tasks/tests/interface.yml
new file mode 100644
index 0000000000..776a72f879
--- /dev/null
+++ b/test/integration/targets/incidental_ufw/tasks/tests/interface.yml
@@ -0,0 +1,81 @@
+- name: Enable
+ ufw:
+ state: enabled
+
+- name: Route with interface in and out
+ ufw:
+ rule: allow
+ route: yes
+ interface_in: foo
+ interface_out: bar
+ proto: tcp
+ from_ip: 1.1.1.1
+ to_ip: 8.8.8.8
+ from_port: 1111
+ to_port: 2222
+
+- name: Route with interface in
+ ufw:
+ rule: allow
+ route: yes
+ interface_in: foo
+ proto: tcp
+ from_ip: 1.1.1.1
+ from_port: 1111
+
+- name: Route with interface out
+ ufw:
+ rule: allow
+ route: yes
+ interface_out: bar
+ proto: tcp
+ from_ip: 1.1.1.1
+ from_port: 1111
+
+- name: Non-route with interface in
+ ufw:
+ rule: allow
+ interface_in: foo
+ proto: tcp
+ from_ip: 1.1.1.1
+ from_port: 3333
+
+- name: Non-route with interface out
+ ufw:
+ rule: allow
+ interface_out: bar
+ proto: tcp
+ from_ip: 1.1.1.1
+ from_port: 4444
+
+- name: Check result
+ shell: ufw status |grep -E '(ALLOW|DENY|REJECT|LIMIT)' |sed -E 's/[ \t]+/ /g'
+ register: ufw_status
+
+- assert:
+ that:
+ - '"8.8.8.8 2222/tcp on bar ALLOW FWD 1.1.1.1 1111/tcp on foo " in stdout'
+ - '"Anywhere ALLOW FWD 1.1.1.1 1111/tcp on foo " in stdout'
+ - '"Anywhere on bar ALLOW FWD 1.1.1.1 1111/tcp " in stdout'
+ - '"Anywhere on foo ALLOW 1.1.1.1 3333/tcp " in stdout'
+ - '"Anywhere ALLOW OUT 1.1.1.1 4444/tcp on bar " in stdout'
+ vars:
+ stdout: '{{ ufw_status.stdout_lines }}'
+
+- name: Non-route with interface_in and interface_out
+ ufw:
+ rule: allow
+ interface_in: foo
+ interface_out: bar
+ proto: tcp
+ from_ip: 1.1.1.1
+ from_port: 1111
+ to_ip: 8.8.8.8
+ to_port: 2222
+ ignore_errors: yes
+ register: ufw_non_route_iface
+
+- assert:
+ that:
+ - ufw_non_route_iface is failed
+ - '"Only route rules" in ufw_non_route_iface.msg'
diff --git a/test/integration/targets/incidental_x509_crl/aliases b/test/integration/targets/incidental_x509_crl/aliases
new file mode 100644
index 0000000000..54b54aa59a
--- /dev/null
+++ b/test/integration/targets/incidental_x509_crl/aliases
@@ -0,0 +1,4 @@
+x509_crl_info
+shippable/posix/incidental
+destructive
+skip/aix
diff --git a/test/integration/targets/incidental_x509_crl/meta/main.yml b/test/integration/targets/incidental_x509_crl/meta/main.yml
new file mode 100644
index 0000000000..0b241de3cc
--- /dev/null
+++ b/test/integration/targets/incidental_x509_crl/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - incidental_setup_openssl
diff --git a/test/integration/targets/incidental_x509_crl/tasks/impl.yml b/test/integration/targets/incidental_x509_crl/tasks/impl.yml
new file mode 100644
index 0000000000..eafb2dad2b
--- /dev/null
+++ b/test/integration/targets/incidental_x509_crl/tasks/impl.yml
@@ -0,0 +1,289 @@
+---
+- name: Create CRL 1 (check mode)
+ x509_crl:
+ path: '{{ output_dir }}/ca-crl1.crl'
+ privatekey_path: '{{ output_dir }}/ca.key'
+ issuer:
+ CN: Ansible
+ last_update: 20191013000000Z
+ next_update: 20191113000000Z
+ revoked_certificates:
+ - path: '{{ output_dir }}/cert-1.pem'
+ revocation_date: 20191013000000Z
+ - path: '{{ output_dir }}/cert-2.pem'
+ revocation_date: 20191013000000Z
+ reason: key_compromise
+ reason_critical: yes
+ invalidity_date: 20191012000000Z
+ - serial_number: 1234
+ revocation_date: 20191001000000Z
+ check_mode: yes
+ register: crl_1_check
+- name: Create CRL 1
+ x509_crl:
+ path: '{{ output_dir }}/ca-crl1.crl'
+ privatekey_path: '{{ output_dir }}/ca.key'
+ issuer:
+ CN: Ansible
+ last_update: 20191013000000Z
+ next_update: 20191113000000Z
+ revoked_certificates:
+ - path: '{{ output_dir }}/cert-1.pem'
+ revocation_date: 20191013000000Z
+ - path: '{{ output_dir }}/cert-2.pem'
+ revocation_date: 20191013000000Z
+ reason: key_compromise
+ reason_critical: yes
+ invalidity_date: 20191012000000Z
+ - serial_number: 1234
+ revocation_date: 20191001000000Z
+ register: crl_1
+- name: Retrieve CRL 1 infos
+ x509_crl_info:
+ path: '{{ output_dir }}/ca-crl1.crl'
+ register: crl_1_info_1
+- name: Retrieve CRL 1 infos via file content
+ x509_crl_info:
+ content: '{{ lookup("file", output_dir ~ "/ca-crl1.crl") }}'
+ register: crl_1_info_2
+- name: Create CRL 1 (idempotent, check mode)
+ x509_crl:
+ path: '{{ output_dir }}/ca-crl1.crl'
+ privatekey_path: '{{ output_dir }}/ca.key'
+ issuer:
+ CN: Ansible
+ last_update: 20191013000000Z
+ next_update: 20191113000000Z
+ revoked_certificates:
+ - path: '{{ output_dir }}/cert-1.pem'
+ revocation_date: 20191013000000Z
+ - path: '{{ output_dir }}/cert-2.pem'
+ revocation_date: 20191013000000Z
+ reason: key_compromise
+ reason_critical: yes
+ invalidity_date: 20191012000000Z
+ - serial_number: 1234
+ revocation_date: 20191001000000Z
+ check_mode: yes
+ register: crl_1_idem_check
+- name: Create CRL 1 (idempotent)
+ x509_crl:
+ path: '{{ output_dir }}/ca-crl1.crl'
+ privatekey_path: '{{ output_dir }}/ca.key'
+ issuer:
+ CN: Ansible
+ last_update: 20191013000000Z
+ next_update: 20191113000000Z
+ revoked_certificates:
+ - path: '{{ output_dir }}/cert-1.pem'
+ revocation_date: 20191013000000Z
+ - path: '{{ output_dir }}/cert-2.pem'
+ revocation_date: 20191013000000Z
+ reason: key_compromise
+ reason_critical: yes
+ invalidity_date: 20191012000000Z
+ - serial_number: 1234
+ revocation_date: 20191001000000Z
+ register: crl_1_idem
+- name: Create CRL 1 (idempotent with content, check mode)
+ x509_crl:
+ path: '{{ output_dir }}/ca-crl1.crl'
+ privatekey_content: "{{ lookup('file', output_dir ~ '/ca.key') }}"
+ issuer:
+ CN: Ansible
+ last_update: 20191013000000Z
+ next_update: 20191113000000Z
+ revoked_certificates:
+ - content: "{{ lookup('file', output_dir ~ '/cert-1.pem') }}"
+ revocation_date: 20191013000000Z
+ - content: "{{ lookup('file', output_dir ~ '/cert-2.pem') }}"
+ revocation_date: 20191013000000Z
+ reason: key_compromise
+ reason_critical: yes
+ invalidity_date: 20191012000000Z
+ - serial_number: 1234
+ revocation_date: 20191001000000Z
+ check_mode: yes
+ register: crl_1_idem_content_check
+- name: Create CRL 1 (idempotent with content)
+ x509_crl:
+ path: '{{ output_dir }}/ca-crl1.crl'
+ privatekey_content: "{{ lookup('file', output_dir ~ '/ca.key') }}"
+ issuer:
+ CN: Ansible
+ last_update: 20191013000000Z
+ next_update: 20191113000000Z
+ revoked_certificates:
+ - content: "{{ lookup('file', output_dir ~ '/cert-1.pem') }}"
+ revocation_date: 20191013000000Z
+ - content: "{{ lookup('file', output_dir ~ '/cert-2.pem') }}"
+ revocation_date: 20191013000000Z
+ reason: key_compromise
+ reason_critical: yes
+ invalidity_date: 20191012000000Z
+ - serial_number: 1234
+ revocation_date: 20191001000000Z
+ register: crl_1_idem_content
+
+- name: Create CRL 2 (check mode)
+ x509_crl:
+ path: '{{ output_dir }}/ca-crl2.crl'
+ privatekey_path: '{{ output_dir }}/ca.key'
+ issuer:
+ CN: Ansible
+ last_update: +0d
+ next_update: +0d
+ revoked_certificates:
+ - path: '{{ output_dir }}/cert-1.pem'
+ - path: '{{ output_dir }}/cert-2.pem'
+ reason: key_compromise
+ reason_critical: yes
+ invalidity_date: 20191012000000Z
+ - serial_number: 1234
+ check_mode: yes
+ register: crl_2_check
+- name: Create CRL 2
+ x509_crl:
+ path: '{{ output_dir }}/ca-crl2.crl'
+ privatekey_path: '{{ output_dir }}/ca.key'
+ issuer:
+ CN: Ansible
+ last_update: +0d
+ next_update: +0d
+ revoked_certificates:
+ - path: '{{ output_dir }}/cert-1.pem'
+ - path: '{{ output_dir }}/cert-2.pem'
+ reason: key_compromise
+ reason_critical: yes
+ invalidity_date: 20191012000000Z
+ - serial_number: 1234
+ register: crl_2
+- name: Create CRL 2 (idempotent, check mode)
+ x509_crl:
+ path: '{{ output_dir }}/ca-crl2.crl'
+ privatekey_path: '{{ output_dir }}/ca.key'
+ issuer:
+ CN: Ansible
+ last_update: +0d
+ next_update: +0d
+ revoked_certificates:
+ - path: '{{ output_dir }}/cert-1.pem'
+ - path: '{{ output_dir }}/cert-2.pem'
+ reason: key_compromise
+ reason_critical: yes
+ invalidity_date: 20191012000000Z
+ - serial_number: 1234
+ ignore_timestamps: yes
+ check_mode: yes
+ register: crl_2_idem_check
+- name: Create CRL 2 (idempotent)
+ x509_crl:
+ path: '{{ output_dir }}/ca-crl2.crl'
+ privatekey_path: '{{ output_dir }}/ca.key'
+ issuer:
+ CN: Ansible
+ last_update: +0d
+ next_update: +0d
+ revoked_certificates:
+ - path: '{{ output_dir }}/cert-1.pem'
+ - path: '{{ output_dir }}/cert-2.pem'
+ reason: key_compromise
+ reason_critical: yes
+ invalidity_date: 20191012000000Z
+ - serial_number: 1234
+ ignore_timestamps: yes
+ register: crl_2_idem
+- name: Create CRL 2 (idempotent update, check mode)
+ x509_crl:
+ path: '{{ output_dir }}/ca-crl2.crl'
+ privatekey_path: '{{ output_dir }}/ca.key'
+ issuer:
+ CN: Ansible
+ last_update: +0d
+ next_update: +0d
+ revoked_certificates:
+ - serial_number: 1235
+ ignore_timestamps: yes
+ mode: update
+ check_mode: yes
+ register: crl_2_idem_update_change_check
+- name: Create CRL 2 (idempotent update)
+ x509_crl:
+ path: '{{ output_dir }}/ca-crl2.crl'
+ privatekey_path: '{{ output_dir }}/ca.key'
+ issuer:
+ CN: Ansible
+ last_update: +0d
+ next_update: +0d
+ revoked_certificates:
+ - serial_number: 1235
+ ignore_timestamps: yes
+ mode: update
+ register: crl_2_idem_update_change
+- name: Create CRL 2 (idempotent update, check mode)
+ x509_crl:
+ path: '{{ output_dir }}/ca-crl2.crl'
+ privatekey_path: '{{ output_dir }}/ca.key'
+ issuer:
+ CN: Ansible
+ last_update: +0d
+ next_update: +0d
+ revoked_certificates:
+ - path: '{{ output_dir }}/cert-2.pem'
+ reason: key_compromise
+ reason_critical: yes
+ invalidity_date: 20191012000000Z
+ ignore_timestamps: yes
+ mode: update
+ check_mode: yes
+ register: crl_2_idem_update_check
+- name: Create CRL 2 (idempotent update)
+ x509_crl:
+ path: '{{ output_dir }}/ca-crl2.crl'
+ privatekey_path: '{{ output_dir }}/ca.key'
+ issuer:
+ CN: Ansible
+ last_update: +0d
+ next_update: +0d
+ revoked_certificates:
+ - path: '{{ output_dir }}/cert-2.pem'
+ reason: key_compromise
+ reason_critical: yes
+ invalidity_date: 20191012000000Z
+ ignore_timestamps: yes
+ mode: update
+ register: crl_2_idem_update
+- name: Create CRL 2 (changed timestamps, check mode)
+ x509_crl:
+ path: '{{ output_dir }}/ca-crl2.crl'
+ privatekey_path: '{{ output_dir }}/ca.key'
+ issuer:
+ CN: Ansible
+ last_update: +0d
+ next_update: +0d
+ revoked_certificates:
+ - path: '{{ output_dir }}/cert-2.pem'
+ reason: key_compromise
+ reason_critical: yes
+ invalidity_date: 20191012000000Z
+ ignore_timestamps: no
+ mode: update
+ check_mode: yes
+ register: crl_2_change_check
+- name: Create CRL 2 (changed timestamps)
+ x509_crl:
+ path: '{{ output_dir }}/ca-crl2.crl'
+ privatekey_path: '{{ output_dir }}/ca.key'
+ issuer:
+ CN: Ansible
+ last_update: +0d
+ next_update: +0d
+ revoked_certificates:
+ - path: '{{ output_dir }}/cert-2.pem'
+ reason: key_compromise
+ reason_critical: yes
+ invalidity_date: 20191012000000Z
+ ignore_timestamps: no
+ mode: update
+ return_content: yes
+ register: crl_2_change
diff --git a/test/integration/targets/incidental_x509_crl/tasks/main.yml b/test/integration/targets/incidental_x509_crl/tasks/main.yml
new file mode 100644
index 0000000000..1f82ff9e1b
--- /dev/null
+++ b/test/integration/targets/incidental_x509_crl/tasks/main.yml
@@ -0,0 +1,83 @@
+---
+- set_fact:
+ certificates:
+ - name: ca
+ subject:
+ commonName: Ansible
+ is_ca: yes
+ - name: ca-2
+ subject:
+ commonName: Ansible Other CA
+ is_ca: yes
+ - name: cert-1
+ subject_alt_name:
+ - DNS:ansible.com
+ - name: cert-2
+ subject_alt_name:
+ - DNS:example.com
+ - name: cert-3
+ subject_alt_name:
+ - DNS:example.org
+ - IP:1.2.3.4
+ - name: cert-4
+ subject_alt_name:
+ - DNS:test.ansible.com
+ - DNS:b64.ansible.com
+
+- name: Generate private keys
+ openssl_privatekey:
+ path: '{{ output_dir }}/{{ item.name }}.key'
+ type: ECC
+ curve: secp256r1
+ loop: "{{ certificates }}"
+
+- name: Generate CSRs
+ openssl_csr:
+ path: '{{ output_dir }}/{{ item.name }}.csr'
+ privatekey_path: '{{ output_dir }}/{{ item.name }}.key'
+ subject: "{{ item.subject | default(omit) }}"
+ subject_alt_name: "{{ item.subject_alt_name | default(omit) }}"
+ basic_constraints: "{{ 'CA:TRUE' if item.is_ca | default(false) else omit }}"
+ use_common_name_for_san: no
+ loop: "{{ certificates }}"
+
+- name: Generate CA certificates
+ openssl_certificate:
+ path: '{{ output_dir }}/{{ item.name }}.pem'
+ csr_path: '{{ output_dir }}/{{ item.name }}.csr'
+ privatekey_path: '{{ output_dir }}/{{ item.name }}.key'
+ provider: selfsigned
+ loop: "{{ certificates }}"
+ when: item.is_ca | default(false)
+
+- name: Generate other certificates
+ openssl_certificate:
+ path: '{{ output_dir }}/{{ item.name }}.pem'
+ csr_path: '{{ output_dir }}/{{ item.name }}.csr'
+ provider: ownca
+ ownca_path: '{{ output_dir }}/ca.pem'
+ ownca_privatekey_path: '{{ output_dir }}/ca.key'
+ loop: "{{ certificates }}"
+ when: not (item.is_ca | default(false))
+
+- name: Get certificate infos
+ openssl_certificate_info:
+ path: '{{ output_dir }}/{{ item }}.pem'
+ loop:
+ - cert-1
+ - cert-2
+ - cert-3
+ - cert-4
+ register: certificate_infos
+
+- block:
+ - name: Running tests with cryptography backend
+ include_tasks: impl.yml
+ vars:
+ select_crypto_backend: cryptography
+
+ - import_tasks: ../tests/validate.yml
+ vars:
+ select_crypto_backend: cryptography
+
+ when: cryptography_version.stdout is version('1.2', '>=')
diff --git a/test/integration/targets/incidental_x509_crl/tests/validate.yml b/test/integration/targets/incidental_x509_crl/tests/validate.yml
new file mode 100644
index 0000000000..17b31f34ad
--- /dev/null
+++ b/test/integration/targets/incidental_x509_crl/tests/validate.yml
@@ -0,0 +1,61 @@
+---
+- name: Validate CRL 1
+ assert:
+ that:
+ - crl_1_check is changed
+ - crl_1 is changed
+ - crl_1_idem_check is not changed
+ - crl_1_idem is not changed
+ - crl_1_idem_content_check is not changed
+ - crl_1_idem_content is not changed
+
+- name: Validate CRL 1 info
+ assert:
+ that:
+ - crl_1_info_1 == crl_1_info_2
+ - crl_1_info_1.digest == 'ecdsa-with-SHA256'
+ - crl_1_info_1.issuer | length == 1
+ - crl_1_info_1.issuer.commonName == 'Ansible'
+ - crl_1_info_1.issuer_ordered | length == 1
+ - crl_1_info_1.last_update == '20191013000000Z'
+ - crl_1_info_1.next_update == '20191113000000Z'
+ - crl_1_info_1.revoked_certificates | length == 3
+ - crl_1_info_1.revoked_certificates[0].invalidity_date is none
+ - crl_1_info_1.revoked_certificates[0].invalidity_date_critical == false
+ - crl_1_info_1.revoked_certificates[0].issuer is none
+ - crl_1_info_1.revoked_certificates[0].issuer_critical == false
+ - crl_1_info_1.revoked_certificates[0].reason is none
+ - crl_1_info_1.revoked_certificates[0].reason_critical == false
+ - crl_1_info_1.revoked_certificates[0].revocation_date == '20191013000000Z'
+ - crl_1_info_1.revoked_certificates[0].serial_number == certificate_infos.results[0].serial_number
+ - crl_1_info_1.revoked_certificates[1].invalidity_date == '20191012000000Z'
+ - crl_1_info_1.revoked_certificates[1].invalidity_date_critical == false
+ - crl_1_info_1.revoked_certificates[1].issuer is none
+ - crl_1_info_1.revoked_certificates[1].issuer_critical == false
+ - crl_1_info_1.revoked_certificates[1].reason == 'key_compromise'
+ - crl_1_info_1.revoked_certificates[1].reason_critical == true
+ - crl_1_info_1.revoked_certificates[1].revocation_date == '20191013000000Z'
+ - crl_1_info_1.revoked_certificates[1].serial_number == certificate_infos.results[1].serial_number
+ - crl_1_info_1.revoked_certificates[2].invalidity_date is none
+ - crl_1_info_1.revoked_certificates[2].invalidity_date_critical == false
+ - crl_1_info_1.revoked_certificates[2].issuer is none
+ - crl_1_info_1.revoked_certificates[2].issuer_critical == false
+ - crl_1_info_1.revoked_certificates[2].reason is none
+ - crl_1_info_1.revoked_certificates[2].reason_critical == false
+ - crl_1_info_1.revoked_certificates[2].revocation_date == '20191001000000Z'
+ - crl_1_info_1.revoked_certificates[2].serial_number == 1234
+
+- name: Validate CRL 2
+ assert:
+ that:
+ - crl_2_check is changed
+ - crl_2 is changed
+ - crl_2_idem_check is not changed
+ - crl_2_idem is not changed
+ - crl_2_idem_update_change_check is changed
+ - crl_2_idem_update_change is changed
+ - crl_2_idem_update_check is not changed
+ - crl_2_idem_update is not changed
+ - crl_2_change_check is changed
+ - crl_2_change is changed
+ - crl_2_change.crl == lookup('file', output_dir ~ '/ca-crl2.crl', rstrip=False)
diff --git a/test/integration/targets/incidental_xml/aliases b/test/integration/targets/incidental_xml/aliases
new file mode 100644
index 0000000000..b9ab365826
--- /dev/null
+++ b/test/integration/targets/incidental_xml/aliases
@@ -0,0 +1,3 @@
+destructive
+shippable/posix/incidental
+skip/aix
diff --git a/test/integration/targets/incidental_xml/fixtures/ansible-xml-beers-unicode.xml b/test/integration/targets/incidental_xml/fixtures/ansible-xml-beers-unicode.xml
new file mode 100644
index 0000000000..d0e3e39af4
--- /dev/null
+++ b/test/integration/targets/incidental_xml/fixtures/ansible-xml-beers-unicode.xml
@@ -0,0 +1,13 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business type="bar">
+ <name>Толстый бар</name>
+ <beers>
+ <beer>Окское</beer>
+ <beer>Невское</beer>
+ </beers>
+ <rating subjective="да">десять</rating>
+ <website>
+ <mobilefriendly/>
+ <address>http://tolstyybar.com</address>
+ </website>
+</business>
diff --git a/test/integration/targets/incidental_xml/fixtures/ansible-xml-beers.xml b/test/integration/targets/incidental_xml/fixtures/ansible-xml-beers.xml
new file mode 100644
index 0000000000..5afc797414
--- /dev/null
+++ b/test/integration/targets/incidental_xml/fixtures/ansible-xml-beers.xml
@@ -0,0 +1,14 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers>
+ <beer>Rochefort 10</beer>
+ <beer>St. Bernardus Abbot 12</beer>
+ <beer>Schlitz</beer>
+ </beers>
+ <rating subjective="true">10</rating>
+ <website>
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ </website>
+</business> \ No newline at end of file
diff --git a/test/integration/targets/incidental_xml/fixtures/ansible-xml-namespaced-beers.xml b/test/integration/targets/incidental_xml/fixtures/ansible-xml-namespaced-beers.xml
new file mode 100644
index 0000000000..61747d4bbb
--- /dev/null
+++ b/test/integration/targets/incidental_xml/fixtures/ansible-xml-namespaced-beers.xml
@@ -0,0 +1,14 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business xmlns="http://test.business" xmlns:attr="http://test.attribute" type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers xmlns="http://test.beers">
+ <beer>Rochefort 10</beer>
+ <beer>St. Bernardus Abbot 12</beer>
+ <beer>Schlitz</beer>
+ </beers>
+ <rating xmlns="http://test.rating" attr:subjective="true">10</rating>
+ <website xmlns="http://test.website">
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ </website>
+</business> \ No newline at end of file
diff --git a/test/integration/targets/incidental_xml/results/test-add-children-elements-unicode.xml b/test/integration/targets/incidental_xml/results/test-add-children-elements-unicode.xml
new file mode 100644
index 0000000000..525330c217
--- /dev/null
+++ b/test/integration/targets/incidental_xml/results/test-add-children-elements-unicode.xml
@@ -0,0 +1,14 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers>
+ <beer>Rochefort 10</beer>
+ <beer>St. Bernardus Abbot 12</beer>
+ <beer>Schlitz</beer>
+ <beer>Окское</beer></beers>
+ <rating subjective="true">10</rating>
+ <website>
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ </website>
+</business> \ No newline at end of file
diff --git a/test/integration/targets/incidental_xml/results/test-add-children-elements.xml b/test/integration/targets/incidental_xml/results/test-add-children-elements.xml
new file mode 100644
index 0000000000..f9ff25176a
--- /dev/null
+++ b/test/integration/targets/incidental_xml/results/test-add-children-elements.xml
@@ -0,0 +1,14 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers>
+ <beer>Rochefort 10</beer>
+ <beer>St. Bernardus Abbot 12</beer>
+ <beer>Schlitz</beer>
+ <beer>Old Rasputin</beer></beers>
+ <rating subjective="true">10</rating>
+ <website>
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ </website>
+</business> \ No newline at end of file
diff --git a/test/integration/targets/incidental_xml/results/test-add-children-from-groupvars.xml b/test/integration/targets/incidental_xml/results/test-add-children-from-groupvars.xml
new file mode 100644
index 0000000000..565ba402b6
--- /dev/null
+++ b/test/integration/targets/incidental_xml/results/test-add-children-from-groupvars.xml
@@ -0,0 +1,14 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers>
+ <beer>Rochefort 10</beer>
+ <beer>St. Bernardus Abbot 12</beer>
+ <beer>Schlitz</beer>
+ <beer>Natty Lite</beer><beer>Miller Lite</beer><beer>Coors Lite</beer></beers>
+ <rating subjective="true">10</rating>
+ <website>
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ </website>
+</business> \ No newline at end of file
diff --git a/test/integration/targets/incidental_xml/results/test-add-children-insertafter.xml b/test/integration/targets/incidental_xml/results/test-add-children-insertafter.xml
new file mode 100644
index 0000000000..8da9633636
--- /dev/null
+++ b/test/integration/targets/incidental_xml/results/test-add-children-insertafter.xml
@@ -0,0 +1,17 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers>
+ <beer>Rochefort 10</beer>
+ <beer>St. Bernardus Abbot 12</beer>
+ <beer>Old Rasputin</beer>
+ <beer>Old Motor Oil</beer>
+ <beer>Old Curmudgeon</beer>
+ <beer>Schlitz</beer>
+ </beers>
+ <rating subjective="true">10</rating>
+ <website>
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ </website>
+</business>
diff --git a/test/integration/targets/incidental_xml/results/test-add-children-insertbefore.xml b/test/integration/targets/incidental_xml/results/test-add-children-insertbefore.xml
new file mode 100644
index 0000000000..c409e54bfa
--- /dev/null
+++ b/test/integration/targets/incidental_xml/results/test-add-children-insertbefore.xml
@@ -0,0 +1,17 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers>
+ <beer>Rochefort 10</beer>
+ <beer>Old Rasputin</beer>
+ <beer>Old Motor Oil</beer>
+ <beer>Old Curmudgeon</beer>
+ <beer>St. Bernardus Abbot 12</beer>
+ <beer>Schlitz</beer>
+ </beers>
+ <rating subjective="true">10</rating>
+ <website>
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ </website>
+</business>
diff --git a/test/integration/targets/incidental_xml/results/test-add-children-with-attributes-unicode.xml b/test/integration/targets/incidental_xml/results/test-add-children-with-attributes-unicode.xml
new file mode 100644
index 0000000000..374652244f
--- /dev/null
+++ b/test/integration/targets/incidental_xml/results/test-add-children-with-attributes-unicode.xml
@@ -0,0 +1,14 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers>
+ <beer>Rochefort 10</beer>
+ <beer>St. Bernardus Abbot 12</beer>
+ <beer>Schlitz</beer>
+ <beer name="Окское" type="экстра"/></beers>
+ <rating subjective="true">10</rating>
+ <website>
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ </website>
+</business> \ No newline at end of file
diff --git a/test/integration/targets/incidental_xml/results/test-add-children-with-attributes.xml b/test/integration/targets/incidental_xml/results/test-add-children-with-attributes.xml
new file mode 100644
index 0000000000..5a3907f6f2
--- /dev/null
+++ b/test/integration/targets/incidental_xml/results/test-add-children-with-attributes.xml
@@ -0,0 +1,14 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers>
+ <beer>Rochefort 10</beer>
+ <beer>St. Bernardus Abbot 12</beer>
+ <beer>Schlitz</beer>
+ <beer name="Ansible Brew" type="light"/></beers>
+ <rating subjective="true">10</rating>
+ <website>
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ </website>
+</business> \ No newline at end of file
diff --git a/test/integration/targets/incidental_xml/results/test-add-element-implicitly.yml b/test/integration/targets/incidental_xml/results/test-add-element-implicitly.yml
new file mode 100644
index 0000000000..fa1ddfca2f
--- /dev/null
+++ b/test/integration/targets/incidental_xml/results/test-add-element-implicitly.yml
@@ -0,0 +1,32 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers>
+ <beer>Rochefort 10</beer>
+ <beer>St. Bernardus Abbot 12</beer>
+ <beer>Schlitz</beer>
+ <beer color="red">George Killian's Irish Red</beer>
+ <beer origin="CZ" color="blonde">Pilsner Urquell</beer>
+ </beers>
+ <rating subjective="true">10</rating>
+ <website>
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ <validxhtml validateon=""/>
+ </website>
+ <phonenumber>555-555-1234</phonenumber>
+ <owner dob="1976-04-12">
+ <name>
+ <last>Smith</last>
+ <first>John</first>
+ <middle>Q</middle>
+ </name>
+ </owner>
+ <website_bis>
+ <validxhtml validateon=""/>
+ </website_bis>
+ <testnormalelement>xml tag with no special characters</testnormalelement>
+ <test-with-dash>xml tag with dashes</test-with-dash>
+ <test-with-dash.and.dot>xml tag with dashes and dots</test-with-dash.and.dot>
+ <test-with.dash_and.dot_and-underscores>xml tag with dashes, dots and underscores</test-with.dash_and.dot_and-underscores>
+</business>
diff --git a/test/integration/targets/incidental_xml/results/test-add-namespaced-children-elements.xml b/test/integration/targets/incidental_xml/results/test-add-namespaced-children-elements.xml
new file mode 100644
index 0000000000..3d27e8aa3c
--- /dev/null
+++ b/test/integration/targets/incidental_xml/results/test-add-namespaced-children-elements.xml
@@ -0,0 +1,14 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business xmlns="http://test.business" xmlns:attr="http://test.attribute" type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers xmlns="http://test.beers">
+ <beer>Rochefort 10</beer>
+ <beer>St. Bernardus Abbot 12</beer>
+ <beer>Schlitz</beer>
+ <beer>Old Rasputin</beer></beers>
+ <rating xmlns="http://test.rating" attr:subjective="true">10</rating>
+ <website xmlns="http://test.website">
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ </website>
+</business> \ No newline at end of file
diff --git a/test/integration/targets/incidental_xml/results/test-pretty-print-only.xml b/test/integration/targets/incidental_xml/results/test-pretty-print-only.xml
new file mode 100644
index 0000000000..f47909ac69
--- /dev/null
+++ b/test/integration/targets/incidental_xml/results/test-pretty-print-only.xml
@@ -0,0 +1,14 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers>
+ <beer>Rochefort 10</beer>
+ <beer>St. Bernardus Abbot 12</beer>
+ <beer>Schlitz</beer>
+ </beers>
+ <rating subjective="true">10</rating>
+ <website>
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ </website>
+</business>
diff --git a/test/integration/targets/incidental_xml/results/test-pretty-print.xml b/test/integration/targets/incidental_xml/results/test-pretty-print.xml
new file mode 100644
index 0000000000..b5c38262fd
--- /dev/null
+++ b/test/integration/targets/incidental_xml/results/test-pretty-print.xml
@@ -0,0 +1,15 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers>
+ <beer>Rochefort 10</beer>
+ <beer>St. Bernardus Abbot 12</beer>
+ <beer>Schlitz</beer>
+ <beer>Old Rasputin</beer>
+ </beers>
+ <rating subjective="true">10</rating>
+ <website>
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ </website>
+</business>
diff --git a/test/integration/targets/incidental_xml/results/test-remove-attribute.xml b/test/integration/targets/incidental_xml/results/test-remove-attribute.xml
new file mode 100644
index 0000000000..8a621cf144
--- /dev/null
+++ b/test/integration/targets/incidental_xml/results/test-remove-attribute.xml
@@ -0,0 +1,14 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers>
+ <beer>Rochefort 10</beer>
+ <beer>St. Bernardus Abbot 12</beer>
+ <beer>Schlitz</beer>
+ </beers>
+ <rating>10</rating>
+ <website>
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ </website>
+</business> \ No newline at end of file
diff --git a/test/integration/targets/incidental_xml/results/test-remove-element.xml b/test/integration/targets/incidental_xml/results/test-remove-element.xml
new file mode 100644
index 0000000000..454d905cd4
--- /dev/null
+++ b/test/integration/targets/incidental_xml/results/test-remove-element.xml
@@ -0,0 +1,13 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers>
+ <beer>Rochefort 10</beer>
+ <beer>St. Bernardus Abbot 12</beer>
+ <beer>Schlitz</beer>
+ </beers>
+ <website>
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ </website>
+</business> \ No newline at end of file
diff --git a/test/integration/targets/incidental_xml/results/test-remove-namespaced-attribute.xml b/test/integration/targets/incidental_xml/results/test-remove-namespaced-attribute.xml
new file mode 100644
index 0000000000..732a0ed224
--- /dev/null
+++ b/test/integration/targets/incidental_xml/results/test-remove-namespaced-attribute.xml
@@ -0,0 +1,14 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business xmlns="http://test.business" xmlns:attr="http://test.attribute" type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers xmlns="http://test.beers">
+ <beer>Rochefort 10</beer>
+ <beer>St. Bernardus Abbot 12</beer>
+ <beer>Schlitz</beer>
+ </beers>
+ <rating xmlns="http://test.rating">10</rating>
+ <website xmlns="http://test.website">
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ </website>
+</business> \ No newline at end of file
diff --git a/test/integration/targets/incidental_xml/results/test-remove-namespaced-element.xml b/test/integration/targets/incidental_xml/results/test-remove-namespaced-element.xml
new file mode 100644
index 0000000000..16df98e201
--- /dev/null
+++ b/test/integration/targets/incidental_xml/results/test-remove-namespaced-element.xml
@@ -0,0 +1,13 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business xmlns="http://test.business" xmlns:attr="http://test.attribute" type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers xmlns="http://test.beers">
+ <beer>Rochefort 10</beer>
+ <beer>St. Bernardus Abbot 12</beer>
+ <beer>Schlitz</beer>
+ </beers>
+ <website xmlns="http://test.website">
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ </website>
+</business> \ No newline at end of file
diff --git a/test/integration/targets/incidental_xml/results/test-set-attribute-value-unicode.xml b/test/integration/targets/incidental_xml/results/test-set-attribute-value-unicode.xml
new file mode 100644
index 0000000000..de3bc3f600
--- /dev/null
+++ b/test/integration/targets/incidental_xml/results/test-set-attribute-value-unicode.xml
@@ -0,0 +1,14 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers>
+ <beer>Rochefort 10</beer>
+ <beer>St. Bernardus Abbot 12</beer>
+ <beer>Schlitz</beer>
+ </beers>
+ <rating subjective="нет">10</rating>
+ <website>
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ </website>
+</business> \ No newline at end of file
diff --git a/test/integration/targets/incidental_xml/results/test-set-attribute-value.xml b/test/integration/targets/incidental_xml/results/test-set-attribute-value.xml
new file mode 100644
index 0000000000..143fe7bf4e
--- /dev/null
+++ b/test/integration/targets/incidental_xml/results/test-set-attribute-value.xml
@@ -0,0 +1,14 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers>
+ <beer>Rochefort 10</beer>
+ <beer>St. Bernardus Abbot 12</beer>
+ <beer>Schlitz</beer>
+ </beers>
+ <rating subjective="false">10</rating>
+ <website>
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ </website>
+</business> \ No newline at end of file
diff --git a/test/integration/targets/incidental_xml/results/test-set-children-elements-level.xml b/test/integration/targets/incidental_xml/results/test-set-children-elements-level.xml
new file mode 100644
index 0000000000..0ef2b7e6e6
--- /dev/null
+++ b/test/integration/targets/incidental_xml/results/test-set-children-elements-level.xml
@@ -0,0 +1,11 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers>
+ <beer alcohol="0.5" name="90 Minute IPA"><Water liter="0.2" quantity="200g"/><Starch quantity="10g"/><Hops quantity="50g"/><Yeast quantity="20g"/></beer><beer alcohol="0.3" name="Harvest Pumpkin Ale"><Water liter="0.2" quantity="200g"/><Hops quantity="25g"/><Yeast quantity="20g"/></beer></beers>
+ <rating subjective="true">10</rating>
+ <website>
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ </website>
+</business> \ No newline at end of file
diff --git a/test/integration/targets/incidental_xml/results/test-set-children-elements-unicode.xml b/test/integration/targets/incidental_xml/results/test-set-children-elements-unicode.xml
new file mode 100644
index 0000000000..f19d53566a
--- /dev/null
+++ b/test/integration/targets/incidental_xml/results/test-set-children-elements-unicode.xml
@@ -0,0 +1,11 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers>
+ <beer>Окское</beer><beer>Невское</beer></beers>
+ <rating subjective="true">10</rating>
+ <website>
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ </website>
+</business> \ No newline at end of file
diff --git a/test/integration/targets/incidental_xml/results/test-set-children-elements.xml b/test/integration/targets/incidental_xml/results/test-set-children-elements.xml
new file mode 100644
index 0000000000..be313a5a8d
--- /dev/null
+++ b/test/integration/targets/incidental_xml/results/test-set-children-elements.xml
@@ -0,0 +1,11 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers>
+ <beer>90 Minute IPA</beer><beer>Harvest Pumpkin Ale</beer></beers>
+ <rating subjective="true">10</rating>
+ <website>
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ </website>
+</business> \ No newline at end of file
diff --git a/test/integration/targets/incidental_xml/results/test-set-element-value-empty.xml b/test/integration/targets/incidental_xml/results/test-set-element-value-empty.xml
new file mode 100644
index 0000000000..785beb645d
--- /dev/null
+++ b/test/integration/targets/incidental_xml/results/test-set-element-value-empty.xml
@@ -0,0 +1,14 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers>
+ <beer>Rochefort 10</beer>
+ <beer>St. Bernardus Abbot 12</beer>
+ <beer>Schlitz</beer>
+ </beers>
+ <rating subjective="true">10</rating>
+ <website>
+ <mobilefriendly/>
+ <address></address>
+ </website>
+</business> \ No newline at end of file
diff --git a/test/integration/targets/incidental_xml/results/test-set-element-value-unicode.xml b/test/integration/targets/incidental_xml/results/test-set-element-value-unicode.xml
new file mode 100644
index 0000000000..734fe6dbf1
--- /dev/null
+++ b/test/integration/targets/incidental_xml/results/test-set-element-value-unicode.xml
@@ -0,0 +1,14 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers>
+ <beer>Rochefort 10</beer>
+ <beer>St. Bernardus Abbot 12</beer>
+ <beer>Schlitz</beer>
+ </beers>
+ <rating subjective="true">пять</rating>
+ <website>
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ </website>
+<rating>пять</rating></business> \ No newline at end of file
diff --git a/test/integration/targets/incidental_xml/results/test-set-element-value.xml b/test/integration/targets/incidental_xml/results/test-set-element-value.xml
new file mode 100644
index 0000000000..fc97ec3bed
--- /dev/null
+++ b/test/integration/targets/incidental_xml/results/test-set-element-value.xml
@@ -0,0 +1,14 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers>
+ <beer>Rochefort 10</beer>
+ <beer>St. Bernardus Abbot 12</beer>
+ <beer>Schlitz</beer>
+ </beers>
+ <rating subjective="true">5</rating>
+ <website>
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ </website>
+<rating>5</rating></business> \ No newline at end of file
diff --git a/test/integration/targets/incidental_xml/results/test-set-namespaced-attribute-value.xml b/test/integration/targets/incidental_xml/results/test-set-namespaced-attribute-value.xml
new file mode 100644
index 0000000000..44abda43f0
--- /dev/null
+++ b/test/integration/targets/incidental_xml/results/test-set-namespaced-attribute-value.xml
@@ -0,0 +1,14 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business xmlns="http://test.business" xmlns:attr="http://test.attribute" type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers xmlns="http://test.beers">
+ <beer>Rochefort 10</beer>
+ <beer>St. Bernardus Abbot 12</beer>
+ <beer>Schlitz</beer>
+ </beers>
+ <rating xmlns="http://test.rating" attr:subjective="false">10</rating>
+ <website xmlns="http://test.website">
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ </website>
+</business> \ No newline at end of file
diff --git a/test/integration/targets/incidental_xml/results/test-set-namespaced-element-value.xml b/test/integration/targets/incidental_xml/results/test-set-namespaced-element-value.xml
new file mode 100644
index 0000000000..0cc8a79e39
--- /dev/null
+++ b/test/integration/targets/incidental_xml/results/test-set-namespaced-element-value.xml
@@ -0,0 +1,14 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business xmlns="http://test.business" xmlns:attr="http://test.attribute" type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers xmlns="http://test.beers">
+ <beer>Rochefort 10</beer>
+ <beer>St. Bernardus Abbot 12</beer>
+ <beer>Schlitz</beer>
+ </beers>
+ <rating xmlns="http://test.rating" attr:subjective="true">11</rating>
+ <website xmlns="http://test.website">
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ </website>
+</business> \ No newline at end of file
diff --git a/test/integration/targets/incidental_xml/tasks/main.yml b/test/integration/targets/incidental_xml/tasks/main.yml
new file mode 100644
index 0000000000..9b8f2c3678
--- /dev/null
+++ b/test/integration/targets/incidental_xml/tasks/main.yml
@@ -0,0 +1,67 @@
+- name: Gather facts
+ setup:
+
+- name: Install lxml (FreeBSD)
+ package:
+ name: '{{ "py27-lxml" if ansible_python.version.major == 2 else "py36-lxml" }}'
+ state: present
+ when: ansible_os_family == "FreeBSD"
+
+# Needed for MacOSX !
+- name: Install lxml
+ pip:
+ name: lxml
+ state: present
+# when: ansible_os_family == "Darwin"
+
+- name: Get lxml version
+ command: "{{ ansible_python_interpreter }} -c 'from lxml import etree; print(\".\".join(str(v) for v in etree.LXML_VERSION))'"
+ register: lxml_version
+
+- name: Set lxml capabilities as variables
+ set_fact:
+ # NOTE: Some tests require predictable element attribute order,
+ # which is only guaranteed starting from lxml v3.0alpha1
+ lxml_predictable_attribute_order: '{{ lxml_version.stdout is version("3", ">=") }}'
+
+ # NOTE: The xml module requires at least lxml v2.3.0
+ lxml_xpath_attribute_result_attrname: '{{ lxml_version.stdout is version("2.3.0", ">=") }}'
+
+- name: Only run the tests when lxml v2.3.0+
+ when: lxml_xpath_attribute_result_attrname
+ block:
+
+ - include_tasks: test-add-children-elements.yml
+ - include_tasks: test-add-children-from-groupvars.yml
+ - include_tasks: test-add-children-insertafter.yml
+ - include_tasks: test-add-children-insertbefore.yml
+ - include_tasks: test-add-children-with-attributes.yml
+ - include_tasks: test-add-element-implicitly.yml
+ - include_tasks: test-count.yml
+ - include_tasks: test-mutually-exclusive-attributes.yml
+ - include_tasks: test-remove-attribute.yml
+ - include_tasks: test-remove-element.yml
+ - include_tasks: test-set-attribute-value.yml
+ - include_tasks: test-set-children-elements.yml
+ - include_tasks: test-set-children-elements-level.yml
+ - include_tasks: test-set-element-value.yml
+ - include_tasks: test-set-element-value-empty.yml
+ - include_tasks: test-pretty-print.yml
+ - include_tasks: test-pretty-print-only.yml
+ - include_tasks: test-add-namespaced-children-elements.yml
+ - include_tasks: test-remove-namespaced-attribute.yml
+ - include_tasks: test-set-namespaced-attribute-value.yml
+ - include_tasks: test-set-namespaced-element-value.yml
+ - include_tasks: test-set-namespaced-children-elements.yml
+ - include_tasks: test-get-element-content.yml
+ - include_tasks: test-xmlstring.yml
+ - include_tasks: test-children-elements-xml.yml
+
+ # Unicode tests
+ - include_tasks: test-add-children-elements-unicode.yml
+ - include_tasks: test-add-children-with-attributes-unicode.yml
+ - include_tasks: test-set-attribute-value-unicode.yml
+ - include_tasks: test-count-unicode.yml
+ - include_tasks: test-get-element-content.yml
+ - include_tasks: test-set-children-elements-unicode.yml
+ - include_tasks: test-set-element-value-unicode.yml
diff --git a/test/integration/targets/incidental_xml/tasks/test-add-children-elements-unicode.yml b/test/integration/targets/incidental_xml/tasks/test-add-children-elements-unicode.yml
new file mode 100644
index 0000000000..8ad91501c3
--- /dev/null
+++ b/test/integration/targets/incidental_xml/tasks/test-add-children-elements-unicode.yml
@@ -0,0 +1,29 @@
+---
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
+
+
+ - name: Add child element
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/beers
+ add_children:
+ - beer: Окское
+ register: add_children_elements_unicode
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-add-children-elements-unicode.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: yes
+ diff: yes
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - add_children_elements_unicode.changed == true
+ - comparison.changed == false # identical
+ #command: diff -u {{ role_path }}/results/test-add-children-elements-unicode.xml /tmp/ansible-xml-beers.xml
diff --git a/test/integration/targets/incidental_xml/tasks/test-add-children-elements.yml b/test/integration/targets/incidental_xml/tasks/test-add-children-elements.yml
new file mode 100644
index 0000000000..8d9b06866d
--- /dev/null
+++ b/test/integration/targets/incidental_xml/tasks/test-add-children-elements.yml
@@ -0,0 +1,29 @@
+---
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
+
+
+ - name: Add child element
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/beers
+ add_children:
+ - beer: Old Rasputin
+ register: add_children_elements
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-add-children-elements.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: yes
+ diff: yes
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - add_children_elements.changed == true
+ - comparison.changed == false # identical
+ #command: diff -u {{ role_path }}/results/test-add-children-elements.xml /tmp/ansible-xml-beers.xml
diff --git a/test/integration/targets/incidental_xml/tasks/test-add-children-from-groupvars.yml b/test/integration/targets/incidental_xml/tasks/test-add-children-from-groupvars.yml
new file mode 100644
index 0000000000..e062de8d14
--- /dev/null
+++ b/test/integration/targets/incidental_xml/tasks/test-add-children-from-groupvars.yml
@@ -0,0 +1,28 @@
+---
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
+
+
+ - name: Add child element
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/beers
+ add_children: '{{ bad_beers }}'
+ register: add_children_from_groupvars
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-add-children-from-groupvars.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: yes
+ diff: yes
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - add_children_from_groupvars.changed == true
+ - comparison.changed == false # identical
+ #command: diff -u {{ role_path }}/results/test-add-children-from-groupvars.xml /tmp/ansible-xml-beers.xml
diff --git a/test/integration/targets/incidental_xml/tasks/test-add-children-insertafter.yml b/test/integration/targets/incidental_xml/tasks/test-add-children-insertafter.yml
new file mode 100644
index 0000000000..2d42e2d54e
--- /dev/null
+++ b/test/integration/targets/incidental_xml/tasks/test-add-children-insertafter.yml
@@ -0,0 +1,32 @@
+---
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
+
+
+ - name: Add child element
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: '/business/beers/beer[text()="St. Bernardus Abbot 12"]'
+ insertafter: yes
+ add_children:
+ - beer: Old Rasputin
+ - beer: Old Motor Oil
+ - beer: Old Curmudgeon
+ pretty_print: yes
+ register: add_children_insertafter
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-add-children-insertafter.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: yes
+ diff: yes
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - add_children_insertafter.changed == true
+ - comparison.changed == false # identical
diff --git a/test/integration/targets/incidental_xml/tasks/test-add-children-insertbefore.yml b/test/integration/targets/incidental_xml/tasks/test-add-children-insertbefore.yml
new file mode 100644
index 0000000000..8550f12cf7
--- /dev/null
+++ b/test/integration/targets/incidental_xml/tasks/test-add-children-insertbefore.yml
@@ -0,0 +1,32 @@
+---
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
+
+
+ - name: Add child element
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: '/business/beers/beer[text()="St. Bernardus Abbot 12"]'
+ insertbefore: yes
+ add_children:
+ - beer: Old Rasputin
+ - beer: Old Motor Oil
+ - beer: Old Curmudgeon
+ pretty_print: yes
+ register: add_children_insertbefore
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-add-children-insertbefore.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: yes
+ diff: yes
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - add_children_insertbefore.changed == true
+ - comparison.changed == false # identical
diff --git a/test/integration/targets/incidental_xml/tasks/test-add-children-with-attributes-unicode.yml b/test/integration/targets/incidental_xml/tasks/test-add-children-with-attributes-unicode.yml
new file mode 100644
index 0000000000..d4a2329f69
--- /dev/null
+++ b/test/integration/targets/incidental_xml/tasks/test-add-children-with-attributes-unicode.yml
@@ -0,0 +1,31 @@
+---
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
+
+
+ - name: Add child element
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/beers
+ add_children:
+ - beer:
+ name: Окское
+ type: экстра
+ register: add_children_with_attributes_unicode
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-add-children-with-attributes-unicode.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: yes
+ diff: yes
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - add_children_with_attributes_unicode.changed == true
+ - comparison.changed == false # identical
+ #command: diff -u {{ role_path }}/results/test-add-children-with-attributes-unicode.xml /tmp/ansible-xml-beers.xml
diff --git a/test/integration/targets/incidental_xml/tasks/test-add-children-with-attributes.yml b/test/integration/targets/incidental_xml/tasks/test-add-children-with-attributes.yml
new file mode 100644
index 0000000000..91e92637fc
--- /dev/null
+++ b/test/integration/targets/incidental_xml/tasks/test-add-children-with-attributes.yml
@@ -0,0 +1,35 @@
+---
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
+
+
+ - name: Add child element
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/beers
+ add_children:
+ - beer:
+ name: Ansible Brew
+ type: light
+ register: add_children_with_attributes
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-add-children-with-attributes.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: yes
+ diff: yes
+ register: comparison
+
+ # NOTE: This test may fail if lxml does not support predictable element attribute order
+ # So we filter the failure out for these platforms (e.g. CentOS 6)
+ # The module still works fine, we simply are not comparing as smart as we should.
+ - name: Test expected result
+ assert:
+ that:
+ - add_children_with_attributes.changed == true
+ - comparison.changed == false # identical
+ when: lxml_predictable_attribute_order
+ #command: diff -u {{ role_path }}/results/test-add-children-with-attributes.xml /tmp/ansible-xml-beers.xml
diff --git a/test/integration/targets/incidental_xml/tasks/test-add-element-implicitly.yml b/test/integration/targets/incidental_xml/tasks/test-add-element-implicitly.yml
new file mode 100644
index 0000000000..db674ba4fc
--- /dev/null
+++ b/test/integration/targets/incidental_xml/tasks/test-add-element-implicitly.yml
@@ -0,0 +1,237 @@
+---
+- name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers-implicit.xml
+
+
+- name: Add a phonenumber element to the business element. Implicit mkdir -p behavior where applicable
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/phonenumber
+ value: 555-555-1234
+
+- name: Add a owner element to the business element, testing implicit mkdir -p behavior 1/2
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/owner/name/last
+ value: Smith
+
+- name: Add a owner element to the business element, testing implicit mkdir -p behavior 2/2
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/owner/name/first
+ value: John
+
+- name: Add a validxhtml element to the website element. Note that ensure is present by default and while value defaults to null for elements, if one doesn't specify it we don't know what to do.
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/website/validxhtml
+
+- name: Add an empty validateon attribute to the validxhtml element. This actually makes the previous example redundant because of the implicit parent-node creation behavior.
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/website/validxhtml/@validateon
+
+- name: Add an empty validateon attribute to the validxhtml element. Actually verifies the implicit parent-node creation behavior.
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/website_bis/validxhtml/@validateon
+
+- name: Add an attribute with a value
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/owner/@dob='1976-04-12'
+
+- name: Add an element with a value, alternate syntax
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/beers/beer/text()="George Killian's Irish Red" # note the quote within an XPath string thing
+
+- name: Add an element without special characters
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/testnormalelement
+ value: xml tag with no special characters
+ pretty_print: yes
+
+- name: Add an element with dash
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/test-with-dash
+ value: xml tag with dashes
+ pretty_print: yes
+
+- name: Add an element with dot
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/test-with-dash.and.dot
+ value: xml tag with dashes and dots
+ pretty_print: yes
+
+- name: Add an element with underscore
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/test-with.dash_and.dot_and-underscores
+ value: xml tag with dashes, dots and underscores
+ pretty_print: yes
+
+- name: Add an attribute on a conditional element
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/beers/beer[text()="George Killian's Irish Red"]/@color='red'
+
+- name: Add two attributes on a conditional element
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/beers/beer[text()="Pilsner Urquell" and @origin='CZ']/@color='blonde'
+
+- name: Add a owner element to the business element, testing implicit mkdir -p behavior 3/2 -- complex lookup
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/owner/name[first/text()='John']/middle
+ value: Q
+
+- name: Pretty Print this!
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ pretty_print: yes
+
+- name: Compare to expected result
+ copy:
+ src: results/test-add-element-implicitly.yml
+ dest: /tmp/ansible-xml-beers-implicit.xml
+ check_mode: yes
+ diff: yes
+ register: comparison
+
+- name: Test expected result
+ assert:
+ that:
+ - comparison.changed == false # identical
+ #command: diff -u {{ role_path }}/results/test-add-element-implicitly.yml /tmp/ansible-xml-beers-implicit.xml
+
+
+# Now we repeat the same, just to ensure proper use of namespaces
+- name: Add a phonenumber element to the business element. Implicit mkdir -p behavior where applicable
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/a:phonenumber
+ value: 555-555-1234
+ namespaces:
+ a: http://example.com/some/namespace
+
+- name: Add a owner element to the business element, testing implicit mkdir -p behavior 1/2
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/a:owner/a:name/a:last
+ value: Smith
+ namespaces:
+ a: http://example.com/some/namespace
+
+- name: Add a owner element to the business element, testing implicit mkdir -p behavior 2/2
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/a:owner/a:name/a:first
+ value: John
+ namespaces:
+ a: http://example.com/some/namespace
+
+- name: Add a validxhtml element to the website element. Note that ensure is present by default and while value defaults to null for elements, if one doesn't specify it we don't know what to do.
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/a:website/a:validxhtml
+ namespaces:
+ a: http://example.com/some/namespace
+
+- name: Add an empty validateon attribute to the validxhtml element. This actually makes the previous example redundant because of the implicit parent-node creation behavior.
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/a:website/a:validxhtml/@a:validateon
+ namespaces:
+ a: http://example.com/some/namespace
+
+- name: Add an empty validateon attribute to the validxhtml element. Actually verifies the implicit parent-node creation behavior.
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/a:website_bis/a:validxhtml/@a:validateon
+ namespaces:
+ a: http://example.com/some/namespace
+
+- name: Add an attribute with a value
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/a:owner/@a:dob='1976-04-12'
+ namespaces:
+ a: http://example.com/some/namespace
+
+- name: Add an element with a value, alternate syntax
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/a:beers/a:beer/text()="George Killian's Irish Red" # note the quote within an XPath string thing
+ namespaces:
+ a: http://example.com/some/namespace
+
+- name: Add an attribute on a conditional element
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/a:beers/a:beer[text()="George Killian's Irish Red"]/@a:color='red'
+ namespaces:
+ a: http://example.com/some/namespace
+
+- name: Add two attributes on a conditional element
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/a:beers/a:beer[text()="Pilsner Urquell" and @a:origin='CZ']/@a:color='blonde'
+ namespaces:
+ a: http://example.com/some/namespace
+
+- name: Add a owner element to the business element, testing implicit mkdir -p behavior 3/2 -- complex lookup
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/a:owner/a:name[a:first/text()='John']/a:middle
+ value: Q
+ namespaces:
+ a: http://example.com/some/namespace
+
+- name: Add an element without special characters
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/testnormalelement
+ value: xml tag with no special characters
+ pretty_print: yes
+ namespaces:
+ a: http://example.com/some/namespace
+
+
+- name: Add an element with dash
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/test-with-dash
+ value: xml tag with dashes
+ pretty_print: yes
+ namespaces:
+ a: http://example.com/some/namespace
+
+- name: Add an element with dot
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/test-with-dash.and.dot
+ value: xml tag with dashes and dots
+ pretty_print: yes
+ namespaces:
+ a: http://example.com/some/namespace
+
+- name: Add an element with underscore
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/test-with.dash_and.dot_and-underscores
+ value: xml tag with dashes, dots and underscores
+ pretty_print: yes
+ namespaces:
+ a: http://example.com/some/namespace
+
+- name: Pretty Print this!
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ pretty_print: yes
diff --git a/test/integration/targets/incidental_xml/tasks/test-add-namespaced-children-elements.yml b/test/integration/targets/incidental_xml/tasks/test-add-namespaced-children-elements.yml
new file mode 100644
index 0000000000..25eca47f5b
--- /dev/null
+++ b/test/integration/targets/incidental_xml/tasks/test-add-namespaced-children-elements.yml
@@ -0,0 +1,32 @@
+---
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-namespaced-beers.xml
+ dest: /tmp/ansible-xml-namespaced-beers.xml
+
+
+ - name: Add namespaced child element
+ xml:
+ path: /tmp/ansible-xml-namespaced-beers.xml
+ xpath: /bus:business/ber:beers
+ namespaces:
+ bus: http://test.business
+ ber: http://test.beers
+ add_children:
+ - beer: Old Rasputin
+ register: add_namespaced_children_elements
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-add-namespaced-children-elements.xml
+ dest: /tmp/ansible-xml-namespaced-beers.xml
+ check_mode: yes
+ diff: yes
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - add_namespaced_children_elements.changed == true
+ - comparison.changed == false # identical
+ #command: diff -u {{ role_path }}/results/test-add-namespaced-children-elements.xml /tmp/ansible-xml-namespaced-beers.xml
diff --git a/test/integration/targets/incidental_xml/tasks/test-children-elements-xml.yml b/test/integration/targets/incidental_xml/tasks/test-children-elements-xml.yml
new file mode 100644
index 0000000000..f4a3c3d582
--- /dev/null
+++ b/test/integration/targets/incidental_xml/tasks/test-children-elements-xml.yml
@@ -0,0 +1,30 @@
+---
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
+
+
+ - name: Add child element with xml format
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/beers
+ input_type: xml
+ add_children:
+ - '<beer>Old Rasputin</beer>'
+ register: children_elements
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-add-children-elements.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: yes
+ diff: yes·
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - children_elements.changed == true
+ - comparison.changed == false # identical
+ #command: diff -u {{ role_path }}/results/test-add-children-elements.xml /tmp/ansible-xml-beers.xml
diff --git a/test/integration/targets/incidental_xml/tasks/test-count-unicode.yml b/test/integration/targets/incidental_xml/tasks/test-count-unicode.yml
new file mode 100644
index 0000000000..47a806bf98
--- /dev/null
+++ b/test/integration/targets/incidental_xml/tasks/test-count-unicode.yml
@@ -0,0 +1,19 @@
+---
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers-unicode.xml
+ dest: /tmp/ansible-xml-beers-unicode.xml
+
+
+ - name: Count child element
+ xml:
+ path: /tmp/ansible-xml-beers-unicode.xml
+ xpath: /business/beers/beer
+ count: yes
+ register: beers
+
+ - name: Test expected result
+ assert:
+ that:
+ - beers.changed == false
+ - beers.count == 2
diff --git a/test/integration/targets/incidental_xml/tasks/test-count.yml b/test/integration/targets/incidental_xml/tasks/test-count.yml
new file mode 100644
index 0000000000..cbc97e323c
--- /dev/null
+++ b/test/integration/targets/incidental_xml/tasks/test-count.yml
@@ -0,0 +1,19 @@
+---
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
+
+
+ - name: Add child element
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/beers/beer
+ count: yes
+ register: beers
+
+ - name: Test expected result
+ assert:
+ that:
+ - beers.changed == false
+ - beers.count == 3
diff --git a/test/integration/targets/incidental_xml/tasks/test-get-element-content-unicode.yml b/test/integration/targets/incidental_xml/tasks/test-get-element-content-unicode.yml
new file mode 100644
index 0000000000..73ae96674f
--- /dev/null
+++ b/test/integration/targets/incidental_xml/tasks/test-get-element-content-unicode.yml
@@ -0,0 +1,32 @@
+---
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers-unicode.xml
+ dest: /tmp/ansible-xml-beers-unicode.xml
+
+
+ - name: Get element attributes
+ xml:
+ path: /tmp/ansible-xml-beers-unicode.xml
+ xpath: /business/rating
+ content: attribute
+ register: get_element_attribute
+
+ - name: Test expected result
+ assert:
+ that:
+ - get_element_attribute.changed == false
+ - get_element_attribute.matches[0]['rating'] is defined and get_element_attribute.matches[0]['rating']['subjective'] == 'да'
+
+ - name: Get element text
+ xml:
+ path: /tmp/ansible-xml-beers-unicode.xml
+ xpath: /business/rating
+ content: text
+ register: get_element_text
+
+ - name: Test expected result
+ assert:
+ that:
+ - get_element_text.changed == false
+ - get_element_text.matches[0]['rating'] == 'десять'
diff --git a/test/integration/targets/incidental_xml/tasks/test-get-element-content.yml b/test/integration/targets/incidental_xml/tasks/test-get-element-content.yml
new file mode 100644
index 0000000000..58ca7767e7
--- /dev/null
+++ b/test/integration/targets/incidental_xml/tasks/test-get-element-content.yml
@@ -0,0 +1,52 @@
+---
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
+
+
+ - name: Get element attributes
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/rating
+ content: attribute
+ register: get_element_attribute
+
+ - name: Test expected result
+ assert:
+ that:
+ - get_element_attribute.changed == false
+ - get_element_attribute.matches[0]['rating'] is defined
+ - get_element_attribute.matches[0]['rating']['subjective'] == 'true'
+
+ # TODO: Remove this in Ansible v2.12 when this incorrect use of attribute is deprecated
+ - name: Get element attributes
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/rating
+ content: attribute
+ attribute: subjective
+ register: get_element_attribute_wrong
+
+ - name: Test expected result
+ assert:
+ that:
+ - get_element_attribute_wrong.changed == false
+ - get_element_attribute_wrong.matches[0]['rating'] is defined
+ - get_element_attribute_wrong.matches[0]['rating']['subjective'] == 'true'
+ - get_element_attribute_wrong.deprecations is defined
+ - get_element_attribute_wrong.deprecations[0].msg == "Parameter 'attribute=subjective' is ignored when using 'content=attribute' only 'xpath' is used. Please remove entry."
+ - get_element_attribute_wrong.deprecations[0].version == '2.12'
+
+ - name: Get element text
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/rating
+ content: text
+ register: get_element_text
+
+ - name: Test expected result
+ assert:
+ that:
+ - get_element_text.changed == false
+ - get_element_text.matches[0]['rating'] == '10'
diff --git a/test/integration/targets/incidental_xml/tasks/test-mutually-exclusive-attributes.yml b/test/integration/targets/incidental_xml/tasks/test-mutually-exclusive-attributes.yml
new file mode 100644
index 0000000000..3f24b0ac84
--- /dev/null
+++ b/test/integration/targets/incidental_xml/tasks/test-mutually-exclusive-attributes.yml
@@ -0,0 +1,22 @@
+---
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
+
+
+ - name: Specify both children to add and a value
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ add_children:
+ - child01
+ - child02
+ value: conflict!
+ register: module_output
+ ignore_errors: yes
+
+ - name: Test expected result
+ assert:
+ that:
+ - module_output.changed == false
+ - module_output.failed == true
diff --git a/test/integration/targets/incidental_xml/tasks/test-pretty-print-only.yml b/test/integration/targets/incidental_xml/tasks/test-pretty-print-only.yml
new file mode 100644
index 0000000000..7c0f7d5fd6
--- /dev/null
+++ b/test/integration/targets/incidental_xml/tasks/test-pretty-print-only.yml
@@ -0,0 +1,29 @@
+---
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml.orig
+
+ - name: Remove spaces from test fixture
+ shell: sed 's/^[ ]*//g' < /tmp/ansible-xml-beers.xml.orig > /tmp/ansible-xml-beers.xml
+
+ - name: Pretty print without modification
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ pretty_print: yes
+ register: pretty_print_only
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-pretty-print-only.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: yes
+ diff: yes
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - pretty_print_only.changed == true
+ - comparison.changed == false # identical
+ #command: diff -u {{ role_path }}/results/test-pretty-print-only.xml /tmp/ansible-xml-beers.xml
diff --git a/test/integration/targets/incidental_xml/tasks/test-pretty-print.yml b/test/integration/targets/incidental_xml/tasks/test-pretty-print.yml
new file mode 100644
index 0000000000..88b618b25d
--- /dev/null
+++ b/test/integration/targets/incidental_xml/tasks/test-pretty-print.yml
@@ -0,0 +1,30 @@
+---
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
+
+
+ - name: Pretty print
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/beers
+ pretty_print: yes
+ add_children:
+ - beer: Old Rasputin
+ register: pretty_print
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-pretty-print.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: yes
+ diff: yes
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - pretty_print.changed == true
+ - comparison.changed == false # identical
+ #command: diff -u {{ role_path }}/results/test-pretty-print.xml /tmp/ansible-xml-beers.xml
diff --git a/test/integration/targets/incidental_xml/tasks/test-remove-attribute.yml b/test/integration/targets/incidental_xml/tasks/test-remove-attribute.yml
new file mode 100644
index 0000000000..9aa395e666
--- /dev/null
+++ b/test/integration/targets/incidental_xml/tasks/test-remove-attribute.yml
@@ -0,0 +1,28 @@
+---
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
+
+
+ - name: Remove '/business/rating/@subjective'
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/rating/@subjective
+ state: absent
+ register: remove_attribute
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-remove-attribute.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: yes
+ diff: yes
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - remove_attribute.changed == true
+ - comparison.changed == false # identical
+ #command: diff -u {{ role_path }}/results/test-remove-attribute.xml /tmp/ansible-xml-beers.xml
diff --git a/test/integration/targets/incidental_xml/tasks/test-remove-element.yml b/test/integration/targets/incidental_xml/tasks/test-remove-element.yml
new file mode 100644
index 0000000000..f2e20ea220
--- /dev/null
+++ b/test/integration/targets/incidental_xml/tasks/test-remove-element.yml
@@ -0,0 +1,28 @@
+---
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
+
+
+ - name: Remove '/business/rating'
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/rating
+ state: absent
+ register: remove_element
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-remove-element.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: yes
+ diff: yes
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - remove_element.changed == true
+ - comparison.changed == false # identical
+ #command: diff -u {{ role_path }}/results/test-remove-element.xml /tmp/ansible-xml-beers.xml
diff --git a/test/integration/targets/incidental_xml/tasks/test-remove-namespaced-attribute.yml b/test/integration/targets/incidental_xml/tasks/test-remove-namespaced-attribute.yml
new file mode 100644
index 0000000000..36682b2202
--- /dev/null
+++ b/test/integration/targets/incidental_xml/tasks/test-remove-namespaced-attribute.yml
@@ -0,0 +1,33 @@
+---
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-namespaced-beers.xml
+ dest: /tmp/ansible-xml-namespaced-beers.xml
+
+
+ - name: Remove namespaced '/bus:business/rat:rating/@attr:subjective'
+ xml:
+ path: /tmp/ansible-xml-namespaced-beers.xml
+ xpath: /bus:business/rat:rating/@attr:subjective
+ namespaces:
+ bus: http://test.business
+ ber: http://test.beers
+ rat: http://test.rating
+ attr: http://test.attribute
+ state: absent
+ register: remove_namespaced_attribute
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-remove-namespaced-attribute.xml
+ dest: /tmp/ansible-xml-namespaced-beers.xml
+ check_mode: yes
+ diff: yes
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - remove_element.changed == true
+ - comparison.changed == false # identical
+ #command: diff -u {{ role_path }}/results/test-remove-namespaced-attribute.xml /tmp/ansible-xml-namespaced-beers.xml
diff --git a/test/integration/targets/incidental_xml/tasks/test-remove-namespaced-element.yml b/test/integration/targets/incidental_xml/tasks/test-remove-namespaced-element.yml
new file mode 100644
index 0000000000..be78af6803
--- /dev/null
+++ b/test/integration/targets/incidental_xml/tasks/test-remove-namespaced-element.yml
@@ -0,0 +1,33 @@
+---
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-namespaced-beers.xml
+ dest: /tmp/ansible-xml-namespaced-beers.xml
+
+
+ - name: Remove namespaced '/bus:business/rat:rating'
+ xml:
+ path: /tmp/ansible-xml-namespaced-beers.xml
+ xpath: /bus:business/rat:rating
+ namespaces:
+ bus: http://test.business
+ ber: http://test.beers
+ rat: http://test.rating
+ attr: http://test.attribute
+ state: absent
+ register: remove_namespaced_element
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-remove-element.xml
+ dest: /tmp/ansible-xml-namespaced-beers.xml
+ check_mode: yes
+ diff: yes
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - remove_namespaced_element.changed == true
+ - comparison.changed == false # identical
+ #command: diff -u {{ role_path }}/results/test-remove-element.xml /tmp/ansible-xml-namespaced-beers.xml
diff --git a/test/integration/targets/incidental_xml/tasks/test-set-attribute-value-unicode.yml b/test/integration/targets/incidental_xml/tasks/test-set-attribute-value-unicode.yml
new file mode 100644
index 0000000000..dabf72a1b7
--- /dev/null
+++ b/test/integration/targets/incidental_xml/tasks/test-set-attribute-value-unicode.yml
@@ -0,0 +1,29 @@
+---
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
+
+
+ - name: Set '/business/rating/@subjective' to 'нет'
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/rating
+ attribute: subjective
+ value: нет
+ register: set_attribute_value_unicode
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-set-attribute-value-unicode.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: yes
+ diff: yes
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - set_attribute_value_unicode.changed == true
+ - comparison.changed == false # identical
+ #command: diff -u {{ role_path }}/results/test-set-attribute-value-unicode.xml /tmp/ansible-xml-beers.xml
diff --git a/test/integration/targets/incidental_xml/tasks/test-set-attribute-value.yml b/test/integration/targets/incidental_xml/tasks/test-set-attribute-value.yml
new file mode 100644
index 0000000000..2aa39fe22f
--- /dev/null
+++ b/test/integration/targets/incidental_xml/tasks/test-set-attribute-value.yml
@@ -0,0 +1,29 @@
+---
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
+
+
+ - name: Set '/business/rating/@subjective' to 'false'
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/rating
+ attribute: subjective
+ value: 'false'
+ register: set_attribute_value
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-set-attribute-value.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: yes
+ diff: yes
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - set_attribute_value.changed == true
+ - comparison.changed == false # identical
+ #command: diff -u {{ role_path }}/results/test-set-attribute-value.xml /tmp/ansible-xml-beers.xml
diff --git a/test/integration/targets/incidental_xml/tasks/test-set-children-elements-level.yml b/test/integration/targets/incidental_xml/tasks/test-set-children-elements-level.yml
new file mode 100644
index 0000000000..3e2c0adb6f
--- /dev/null
+++ b/test/integration/targets/incidental_xml/tasks/test-set-children-elements-level.yml
@@ -0,0 +1,74 @@
+---
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
+
+
+ - name: Set child elements
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/beers
+ set_children: &children
+ - beer:
+ alcohol: "0.5"
+ name: 90 Minute IPA
+ _:
+ - Water:
+ liter: "0.2"
+ quantity: 200g
+ - Starch:
+ quantity: 10g
+ - Hops:
+ quantity: 50g
+ - Yeast:
+ quantity: 20g
+ - beer:
+ alcohol: "0.3"
+ name: Harvest Pumpkin Ale
+ _:
+ - Water:
+ liter: "0.2"
+ quantity: 200g
+ - Hops:
+ quantity: 25g
+ - Yeast:
+ quantity: 20g
+ register: set_children_elements_level
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-set-children-elements-level.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: yes
+ diff: yes
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - set_children_elements_level.changed == true
+ - comparison.changed == false # identical
+ #command: diff -u {{ role_path }}/results/test-set-children-elements-level.xml /tmp/ansible-xml-beers.xml
+
+
+ - name: Set child elements (again)
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/beers
+ set_children: *children
+ register: set_children_again
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-set-children-elements-level.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: yes
+ diff: yes
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - set_children_again.changed == false
+ - comparison.changed == false # identical
diff --git a/test/integration/targets/incidental_xml/tasks/test-set-children-elements-unicode.yml b/test/integration/targets/incidental_xml/tasks/test-set-children-elements-unicode.yml
new file mode 100644
index 0000000000..240b894ac7
--- /dev/null
+++ b/test/integration/targets/incidental_xml/tasks/test-set-children-elements-unicode.yml
@@ -0,0 +1,46 @@
+---
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
+
+
+ - name: Set child elements
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/beers
+ set_children: &children
+ - beer: Окское
+ - beer: Невское
+ register: set_children_elements_unicode
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-set-children-elements-unicode.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: yes
+ diff: yes
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - set_children_elements_unicode.changed == true
+ - comparison.changed == false # identical
+ #command: diff -u {{ role_path }}/results/test-set-children-elements-unicode.xml /tmp/ansible-xml-beers.xml
+
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-set-children-elements-unicode.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: yes
+ diff: yes
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - set_children_again.changed == false
+ - comparison.changed == false # identical
+ #command: diff -u {{ role_path }}/results/test-set-children-elements-unicode.xml /tmp/ansible-xml-beers.xml
diff --git a/test/integration/targets/incidental_xml/tasks/test-set-children-elements.yml b/test/integration/targets/incidental_xml/tasks/test-set-children-elements.yml
new file mode 100644
index 0000000000..7b0f3247ad
--- /dev/null
+++ b/test/integration/targets/incidental_xml/tasks/test-set-children-elements.yml
@@ -0,0 +1,53 @@
+---
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
+
+
+ - name: Set child elements
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/beers
+ set_children: &children
+ - beer: 90 Minute IPA
+ - beer: Harvest Pumpkin Ale
+ register: set_children_elements
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-set-children-elements.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: yes
+ diff: yes
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - set_children_elements.changed == true
+ - comparison.changed == false # identical
+ #command: diff -u {{ role_path }}/results/test-set-children-elements.xml /tmp/ansible-xml-beers.xml
+
+
+ - name: Set child elements (again)
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/beers
+ set_children: *children
+ register: set_children_again
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-set-children-elements.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: yes
+ diff: yes
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - set_children_again.changed == false
+ - comparison.changed == false # identical
+ #command: diff -u {{ role_path }}/results/test-set-children-elements.xml /tmp/ansible-xml-beers.xml
diff --git a/test/integration/targets/incidental_xml/tasks/test-set-element-value-empty.yml b/test/integration/targets/incidental_xml/tasks/test-set-element-value-empty.yml
new file mode 100644
index 0000000000..5814803cb7
--- /dev/null
+++ b/test/integration/targets/incidental_xml/tasks/test-set-element-value-empty.yml
@@ -0,0 +1,28 @@
+---
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
+
+
+ - name: Set '/business/website/address' to empty string.
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/website/address
+ value: ''
+ register: set_element_value_empty
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-set-element-value-empty.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: yes
+ diff: yes
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - set_element_value_empty.changed == true
+ - comparison.changed == false # identical
+ #command: diff -u {{ role_path }}/results/test-set-element-value-empty.xml /tmp/ansible-xml-beers.xml
diff --git a/test/integration/targets/incidental_xml/tasks/test-set-element-value-unicode.yml b/test/integration/targets/incidental_xml/tasks/test-set-element-value-unicode.yml
new file mode 100644
index 0000000000..c3a40b7d93
--- /dev/null
+++ b/test/integration/targets/incidental_xml/tasks/test-set-element-value-unicode.yml
@@ -0,0 +1,43 @@
+---
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
+
+
+ - name: Add 2nd '/business/rating' with value 'пять'
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business
+ add_children:
+ - rating: пять
+
+ - name: Set '/business/rating' to 'пять'
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/rating
+ value: пять
+ register: set_element_first_run
+
+ - name: Set '/business/rating' to 'false'... again
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/rating
+ value: пять
+ register: set_element_second_run
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-set-element-value-unicode.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: yes
+ diff: yes
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - set_element_first_run.changed == true
+ - set_element_second_run.changed == false
+ - comparison.changed == false # identical
+ #command: diff -u {{ role_path }}/results/test-set-element-value-unicode.xml /tmp/ansible-xml-beers.xml
diff --git a/test/integration/targets/incidental_xml/tasks/test-set-element-value.yml b/test/integration/targets/incidental_xml/tasks/test-set-element-value.yml
new file mode 100644
index 0000000000..dbd070f139
--- /dev/null
+++ b/test/integration/targets/incidental_xml/tasks/test-set-element-value.yml
@@ -0,0 +1,43 @@
+---
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
+
+
+ - name: Add 2nd '/business/rating' with value '5'
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business
+ add_children:
+ - rating: '5'
+
+ - name: Set '/business/rating' to '5'
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/rating
+ value: '5'
+ register: set_element_first_run
+
+ - name: Set '/business/rating' to '5'... again
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/rating
+ value: '5'
+ register: set_element_second_run
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-set-element-value.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: yes
+ diff: yes
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - set_element_first_run.changed == true
+ - set_element_second_run.changed == false
+ - comparison.changed == false # identical
+ #command: diff -u {{ role_path }}/results/test-set-element-value.xml /tmp/ansible-xml-beers.xml
diff --git a/test/integration/targets/incidental_xml/tasks/test-set-namespaced-attribute-value.yml b/test/integration/targets/incidental_xml/tasks/test-set-namespaced-attribute-value.yml
new file mode 100644
index 0000000000..e0086efe3a
--- /dev/null
+++ b/test/integration/targets/incidental_xml/tasks/test-set-namespaced-attribute-value.yml
@@ -0,0 +1,34 @@
+---
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-namespaced-beers.xml
+ dest: /tmp/ansible-xml-namespaced-beers.xml
+
+
+ - name: Set namespaced '/bus:business/rat:rating/@attr:subjective' to 'false'
+ xml:
+ path: /tmp/ansible-xml-namespaced-beers.xml
+ xpath: /bus:business/rat:rating
+ namespaces:
+ bus: http://test.business
+ ber: http://test.beers
+ rat: http://test.rating
+ attr: http://test.attribute
+ attribute: attr:subjective
+ value: 'false'
+ register: set_namespaced_attribute_value
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-set-namespaced-attribute-value.xml
+ dest: /tmp/ansible-xml-namespaced-beers.xml
+ check_mode: yes
+ diff: yes
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - set_namespaced_attribute_value.changed == true
+ - comparison.changed == false # identical
+ #command: diff -u {{ role_path }}/results/test-set-namespaced-attribute-value.xml /tmp/ansible-xml-namespaced-beers.xml
diff --git a/test/integration/targets/incidental_xml/tasks/test-set-namespaced-children-elements.yml b/test/integration/targets/incidental_xml/tasks/test-set-namespaced-children-elements.yml
new file mode 100644
index 0000000000..8e66e70eeb
--- /dev/null
+++ b/test/integration/targets/incidental_xml/tasks/test-set-namespaced-children-elements.yml
@@ -0,0 +1,57 @@
+---
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-namespaced-beers.xml
+ dest: /tmp/ansible-xml-namespaced-beers-xml.xml
+
+ - name: Set child elements
+ xml:
+ path: /tmp/ansible-xml-namespaced-beers-xml.xml
+ xpath: /bus:business/ber:beers
+ namespaces:
+ bus: http://test.business
+ ber: http://test.beers
+ set_children:
+ - beer: 90 Minute IPA
+ - beer: Harvest Pumpkin Ale
+
+ - name: Copy state after first set_children
+ copy:
+ src: /tmp/ansible-xml-namespaced-beers.xml
+ dest: /tmp/ansible-xml-namespaced-beers-1.xml
+ remote_src: yes
+
+ - name: Set child elements again
+ xml:
+ path: /tmp/ansible-xml-namespaced-beers-xml.xml
+ xpath: /bus:business/ber:beers
+ namespaces:
+ bus: http://test.business
+ ber: http://test.beers
+ set_children:
+ - beer: 90 Minute IPA
+ - beer: Harvest Pumpkin Ale
+ register: set_children_again
+
+ - name: Copy state after second set_children
+ copy:
+ src: /tmp/ansible-xml-namespaced-beers.xml
+ dest: /tmp/ansible-xml-namespaced-beers-2.xml
+ remote_src: yes
+
+ - name: Compare to expected result
+ copy:
+ src: /tmp/ansible-xml-namespaced-beers-1.xml
+ dest: /tmp/ansible-xml-namespaced-beers-2.xml
+ remote_src: yes
+ check_mode: yes
+ diff: yes
+ register: comparison
+ #command: diff /tmp/ansible-xml-namespaced-beers-1.xml /tmp/ansible-xml-namespaced-beers-2.xml
+
+ - name: Test expected result
+ assert:
+ that:
+ - set_children_again.changed == false # idempotency
+ - set_namespaced_attribute_value.changed == true
+ - comparison.changed == false # identical
diff --git a/test/integration/targets/incidental_xml/tasks/test-set-namespaced-element-value.yml b/test/integration/targets/incidental_xml/tasks/test-set-namespaced-element-value.yml
new file mode 100644
index 0000000000..f77d7537e9
--- /dev/null
+++ b/test/integration/targets/incidental_xml/tasks/test-set-namespaced-element-value.yml
@@ -0,0 +1,46 @@
+---
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-namespaced-beers.xml
+ dest: /tmp/ansible-xml-namespaced-beers.xml
+
+
+ - name: Set namespaced '/bus:business/rat:rating' to '11'
+ xml:
+ path: /tmp/ansible-xml-namespaced-beers.xml
+ namespaces:
+ bus: http://test.business
+ ber: http://test.beers
+ rat: http://test.rating
+ attr: http://test.attribute
+ xpath: /bus:business/rat:rating
+ value: '11'
+ register: set_element_first_run
+
+ - name: Set namespaced '/bus:business/rat:rating' to '11' again
+ xml:
+ path: /tmp/ansible-xml-namespaced-beers.xml
+ namespaces:
+ bus: http://test.business
+ ber: http://test.beers
+ rat: http://test.rating
+ attr: http://test.attribute
+ xpath: /bus:business/rat:rating
+ value: '11'
+ register: set_element_second_run
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-set-namespaced-element-value.xml
+ dest: /tmp/ansible-xml-namespaced-beers.xml
+ check_mode: yes
+ diff: yes
+ register: comparison
+ #command: diff -u {{ role_path }}/results/test-set-namespaced-element-value.xml /tmp/ansible-xml-namespaced-beers.xml
+
+ - name: Test expected result
+ assert:
+ that:
+ - set_element_first_run.changed == true
+ - set_element_second_run.changed == false
+ - comparison.changed == false # identical
diff --git a/test/integration/targets/incidental_xml/tasks/test-xmlstring.yml b/test/integration/targets/incidental_xml/tasks/test-xmlstring.yml
new file mode 100644
index 0000000000..4620d984fa
--- /dev/null
+++ b/test/integration/targets/incidental_xml/tasks/test-xmlstring.yml
@@ -0,0 +1,81 @@
+---
+ - name: Copy expected results to remote
+ copy:
+ src: "results/{{ item }}"
+ dest: "/tmp/{{ item }}"
+ with_items:
+ - test-pretty-print.xml
+ - test-pretty-print-only.xml
+
+ # NOTE: Jinja2 templating eats trailing newlines
+ - name: Read from xmlstring (not using pretty_print)
+ xml:
+ xmlstring: "{{ lookup('file', '{{ role_path }}/fixtures/ansible-xml-beers.xml') }}"
+ xpath: .
+ register: xmlresponse
+
+ - name: Compare to expected result
+ copy:
+ content: "{{ xmlresponse.xmlstring }}\n"
+ dest: '/tmp/test-pretty-print-only.xml'
+ check_mode: yes
+ diff: yes
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - xmlresponse.changed == false
+ - comparison.changed == false # identical
+ #command: diff -u {{ role_path }}/results/test-pretty-print-only.xml /tmp/ansible-xml-beers.xml
+
+
+ # NOTE: Jinja2 templating eats trailing newlines
+ - name: Read from xmlstring (using pretty_print)
+ xml:
+ xmlstring: "{{ lookup('file', '{{ role_path }}/fixtures/ansible-xml-beers.xml') }}"
+ pretty_print: yes
+ register: xmlresponse
+
+ - name: Compare to expected result
+ copy:
+ content: '{{ xmlresponse.xmlstring }}'
+ dest: '/tmp/test-pretty-print-only.xml'
+ check_mode: yes
+ diff: yes
+ register: comparison
+
+ # FIXME: This change is related to the newline added by pretty_print
+ - name: Test expected result
+ assert:
+ that:
+ - xmlresponse.changed == true
+ - comparison.changed == false # identical
+ #command: diff -u {{ role_path }}/results/test-pretty-print-only.xml /tmp/ansible-xml-beers.xml
+
+
+ # NOTE: Jinja2 templating eats trailing newlines
+ - name: Read from xmlstring
+ xml:
+ xmlstring: "{{ lookup('file', '{{ role_path }}/fixtures/ansible-xml-beers.xml') }}"
+ xpath: /business/beers
+ pretty_print: yes
+ add_children:
+ - beer: Old Rasputin
+ register: xmlresponse_modification
+
+ - name: Compare to expected result
+ copy:
+ content: '{{ xmlresponse_modification.xmlstring }}'
+ dest: '/tmp/test-pretty-print.xml'
+ check_mode: yes
+ diff: yes
+ register: comparison
+
+ # FIXME: This change is related to the newline added by pretty_print
+ - name: Test expected result
+ assert:
+ that:
+ - xmlresponse_modification.changed == true
+ - comparison.changed == false # identical
+ #command: diff -u {{ role_path }}/results/test-pretty-print.xml /tmp/ansible-xml-beers.xml
diff --git a/test/integration/targets/incidental_xml/vars/main.yml b/test/integration/targets/incidental_xml/vars/main.yml
new file mode 100644
index 0000000000..7c5675bd93
--- /dev/null
+++ b/test/integration/targets/incidental_xml/vars/main.yml
@@ -0,0 +1,6 @@
+# -*- mode: yaml -*
+---
+bad_beers:
+- beer: "Natty Lite"
+- beer: "Miller Lite"
+- beer: "Coors Lite"
diff --git a/test/integration/targets/incidental_zabbix_host/aliases b/test/integration/targets/incidental_zabbix_host/aliases
new file mode 100644
index 0000000000..f89752b833
--- /dev/null
+++ b/test/integration/targets/incidental_zabbix_host/aliases
@@ -0,0 +1,6 @@
+destructive
+shippable/posix/incidental
+skip/aix
+skip/osx
+skip/freebsd
+skip/rhel
diff --git a/test/integration/targets/incidental_zabbix_host/defaults/main.yml b/test/integration/targets/incidental_zabbix_host/defaults/main.yml
new file mode 100644
index 0000000000..5482107368
--- /dev/null
+++ b/test/integration/targets/incidental_zabbix_host/defaults/main.yml
@@ -0,0 +1,5 @@
+---
+
+zabbix_server_url: http://127.0.0.1/zabbix/
+zabbix_login_user: Admin
+zabbix_login_password: zabbix
diff --git a/test/integration/targets/incidental_zabbix_host/meta/main.yml b/test/integration/targets/incidental_zabbix_host/meta/main.yml
new file mode 100644
index 0000000000..df12e5e785
--- /dev/null
+++ b/test/integration/targets/incidental_zabbix_host/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - incidental_setup_zabbix
diff --git a/test/integration/targets/incidental_zabbix_host/tasks/main.yml b/test/integration/targets/incidental_zabbix_host/tasks/main.yml
new file mode 100644
index 0000000000..914c1e5fcf
--- /dev/null
+++ b/test/integration/targets/incidental_zabbix_host/tasks/main.yml
@@ -0,0 +1,16 @@
+---
+# setup stuff not testing zabbix_host
+- block:
+ - include: zabbix_host_setup.yml
+
+ # zabbix_host module tests
+ - include: zabbix_host_tests.yml
+
+ # documentation example tests
+ - include: zabbix_host_doc.yml
+
+ # tear down stuff set up earlier
+ - include: zabbix_host_teardown.yml
+
+ when:
+ - ansible_distribution == 'Ubuntu'
diff --git a/test/integration/targets/incidental_zabbix_host/tasks/zabbix_host_doc.yml b/test/integration/targets/incidental_zabbix_host/tasks/zabbix_host_doc.yml
new file mode 100644
index 0000000000..40f702bb45
--- /dev/null
+++ b/test/integration/targets/incidental_zabbix_host/tasks/zabbix_host_doc.yml
@@ -0,0 +1,83 @@
+---
+# These two tests are close to documentation example
+
+- name: Create a new host or update an existing host's info
+ zabbix_host:
+ server_url: "{{ zabbix_server_url }}"
+ login_user: "{{ zabbix_login_user }}"
+ login_password: "{{ zabbix_login_password }}"
+ host_name: ExampleHost1
+ visible_name: ExampleName
+ description: My ExampleHost Description
+ host_groups:
+ - Linux servers
+ - Zabbix servers
+ link_templates:
+ - Template App IMAP Service
+ - Template App NTP Service
+ status: enabled
+ state: present
+ inventory_mode: manual
+ inventory_zabbix:
+ tag: test-tag
+ alias: test-alias
+ notes: "Special Informations: test-info"
+ location: test-location
+ site_rack: test-rack
+ os: test-os
+ hardware: test-hw
+ ipmi_authtype: 2
+ ipmi_privilege: 4
+ ipmi_username: username
+ ipmi_password: password
+ interfaces:
+ - type: 1
+ main: 1
+ useip: 1
+ ip: 10.1.1.1
+ dns: ""
+ port: "10050"
+ - type: 4
+ main: 1
+ useip: 1
+ ip: 10.1.1.1
+ dns: ""
+ port: "12345"
+ macros:
+ - macro: '{$EXAMPLEMACRO}'
+ value: ExampleMacroValue
+ - macro: EXAMPLEMACRO2
+ value: ExampleMacroValue2
+ description: Example desc that work only with Zabbix 4.4 and higher
+ tags:
+ - tag: ExampleHostsTag
+ - tag: ExampleHostsTag2
+ value: ExampleTagValue
+ register: zabbix_host1
+
+- name: Update an existing host's tls settings
+ zabbix_host:
+ server_url: "{{ zabbix_server_url }}"
+ login_user: "{{ zabbix_login_user }}"
+ login_password: "{{ zabbix_login_password }}"
+ host_name: ExampleHost2
+ visible_name: ExampleName2
+ interfaces:
+ - type: 1
+ main: 1
+ useip: 1
+ ip: 10.1.1.2
+ dns: ""
+ port: "10050"
+ host_groups:
+ - Linux servers
+ tls_psk_identity: test
+ tls_connect: 2
+ tls_psk: 123456789abcdef123456789abcdef12
+ register: zabbix_host2
+
+- name: expect both to succeed
+ assert:
+ that:
+ - "zabbix_host1 is changed"
+ - "zabbix_host2 is changed"
diff --git a/test/integration/targets/incidental_zabbix_host/tasks/zabbix_host_setup.yml b/test/integration/targets/incidental_zabbix_host/tasks/zabbix_host_setup.yml
new file mode 100644
index 0000000000..498a725f29
--- /dev/null
+++ b/test/integration/targets/incidental_zabbix_host/tasks/zabbix_host_setup.yml
@@ -0,0 +1,20 @@
+---
+# set up a zabbix proxy to test zabbix_host with
+
+- name: Create a new proxy
+ zabbix_proxy:
+ server_url: "{{ zabbix_server_url }}"
+ login_user: "{{ zabbix_login_user }}"
+ login_password: "{{ zabbix_login_password }}"
+ proxy_name: ExampleProxy
+ description: ExampleProxy
+ status: active
+ state: present
+ interface:
+ type: 0
+ main: 1
+ useip: 1
+ ip: 10.5.6.7
+ dns: ""
+ port: 10050
+ register: zabbix_proxy
diff --git a/test/integration/targets/incidental_zabbix_host/tasks/zabbix_host_teardown.yml b/test/integration/targets/incidental_zabbix_host/tasks/zabbix_host_teardown.yml
new file mode 100644
index 0000000000..1d76c516c7
--- /dev/null
+++ b/test/integration/targets/incidental_zabbix_host/tasks/zabbix_host_teardown.yml
@@ -0,0 +1,10 @@
+---
+# remove zabbix_proxy (hopefully) created earlier
+
+- name: remove proxy
+ zabbix_proxy:
+ server_url: "{{ zabbix_server_url }}"
+ login_user: "{{ zabbix_login_user }}"
+ login_password: "{{ zabbix_login_password }}"
+ proxy_name: ExampleProxy
+ state: absent
diff --git a/test/integration/targets/incidental_zabbix_host/tasks/zabbix_host_tests.yml b/test/integration/targets/incidental_zabbix_host/tasks/zabbix_host_tests.yml
new file mode 100644
index 0000000000..63be018988
--- /dev/null
+++ b/test/integration/targets/incidental_zabbix_host/tasks/zabbix_host_tests.yml
@@ -0,0 +1,1169 @@
+---
+
+- name: "test: create host with many options set"
+ zabbix_host:
+ server_url: "{{ zabbix_server_url }}"
+ login_user: "{{ zabbix_login_user }}"
+ login_password: "{{ zabbix_login_password }}"
+ host_name: ExampleHost
+ visible_name: ExampleName
+ description: My ExampleHost Description
+ host_groups:
+ - Linux servers
+ - Zabbix servers
+ link_templates:
+ - Template App IMAP Service
+ - Template App NTP Service
+ status: enabled
+ state: present
+ inventory_mode: manual
+ inventory_zabbix:
+ tag: test-tag
+ alias: test-alias
+ notes: "Special Informations: test-info"
+ location: test-location
+ site_rack: test-rack
+ os: test-os
+ hardware: test-hw
+ interfaces:
+ - type: 1
+ main: 1
+ useip: 1
+ ip: 10.1.1.1
+ dns: ""
+ port: "10050"
+ - type: 1
+ main: 0
+ useip: 1
+ ip: 10.1.1.1
+ dns: ""
+ port: "{$MACRO}"
+ - type: 4
+ main: 1
+ useip: 1
+ ip: 10.1.1.1
+ dns: ""
+ port: "12345"
+ proxy: ExampleProxy
+ tls_psk_identity: test
+ tls_connect: 2
+ tls_psk: 123456789abcdef123456789abcdef12
+ macros:
+ - macro: MACRO1
+ value: test1
+ - macro: '{$MACRO2}'
+ value: test2
+ tags:
+ - tag: Tag1
+ - tag: Tag2
+ value: test2
+ register: zabbix_host1
+
+- name: expect to succeed and that things changed
+ assert:
+ that:
+ - "zabbix_host1 is changed"
+
+- name: "test: try to create the same host with the same settings"
+ zabbix_host:
+ server_url: "{{ zabbix_server_url }}"
+ login_user: "{{ zabbix_login_user }}"
+ login_password: "{{ zabbix_login_password }}"
+ host_name: ExampleHost
+ visible_name: ExampleName
+ description: My ExampleHost Description
+ host_groups:
+ - Linux servers
+ - Zabbix servers
+ link_templates:
+ - Template App IMAP Service
+ - Template App NTP Service
+ status: enabled
+ state: present
+ inventory_mode: manual
+ inventory_zabbix:
+ tag: test-tag
+ alias: test-alias
+ notes: "Special Informations: test-info"
+ location: test-location
+ site_rack: test-rack
+ os: test-os
+ hardware: test-hw
+ interfaces:
+ - type: 1
+ main: 1
+ useip: 1
+ ip: 10.1.1.1
+ dns: ""
+ port: "10050"
+ - type: 1
+ main: 0
+ useip: 1
+ ip: 10.1.1.1
+ dns: ""
+ port: "{$MACRO}"
+ - type: 4
+ main: 1
+ useip: 1
+ ip: 10.1.1.1
+ dns: ""
+ port: "12345"
+ proxy: ExampleProxy
+ tls_psk_identity: test
+ tls_connect: 2
+ tls_psk: 123456789abcdef123456789abcdef12
+ macros:
+ - macro: MACRO1
+ value: test1
+ - macro: '{$MACRO2}'
+ value: test2
+ tags:
+ - tag: Tag1
+ - tag: Tag2
+ value: test2
+ register: zabbix_host1
+
+- name: updating with same values should be idempotent
+ assert:
+ that:
+ - "not zabbix_host1 is changed"
+
+- name: "test: try to create the same host with the same settings and force false"
+ zabbix_host:
+ force: false
+ server_url: "{{ zabbix_server_url }}"
+ login_user: "{{ zabbix_login_user }}"
+ login_password: "{{ zabbix_login_password }}"
+ host_name: ExampleHost
+ visible_name: ExampleName
+ description: My ExampleHost Description
+ host_groups:
+ - Linux servers
+ - Zabbix servers
+ link_templates:
+ - Template App IMAP Service
+ - Template App NTP Service
+ status: enabled
+ state: present
+ inventory_mode: manual
+ inventory_zabbix:
+ tag: test-tag
+ alias: test-alias
+ notes: "Special Informations: test-info"
+ location: test-location
+ site_rack: test-rack
+ os: test-os
+ hardware: test-hw
+ interfaces:
+ - type: 1
+ main: 1
+ useip: 1
+ ip: 10.1.1.1
+ dns: ""
+ port: "10050"
+ - type: 1
+ main: 0
+ useip: 1
+ ip: 10.1.1.1
+ dns: ""
+ port: "{$MACRO}"
+ - type: 4
+ main: 1
+ useip: 1
+ ip: 10.1.1.1
+ dns: ""
+ port: "12345"
+ proxy: ExampleProxy
+ tls_psk_identity: test
+ tls_connect: 2
+ tls_psk: 123456789abcdef123456789abcdef12
+ register: zabbix_host1
+
+- name: updating with same values and force false should be idempotent
+ assert:
+ that:
+ - "not zabbix_host1 is changed"
+
+- name: "test: try to create the same host changing one parameter in the inventory with force false"
+ zabbix_host:
+ force: false
+ server_url: "{{ zabbix_server_url }}"
+ login_user: "{{ zabbix_login_user }}"
+ login_password: "{{ zabbix_login_password }}"
+ host_name: ExampleHost
+ visible_name: ExampleName
+ description: My ExampleHost Description
+ host_groups:
+ - Linux servers
+ - Zabbix servers
+ link_templates:
+ - Template App IMAP Service
+ - Template App NTP Service
+ status: enabled
+ state: present
+ inventory_mode: manual
+ inventory_zabbix:
+ tag: test-tag
+ alias: test-alias
+ notes: "Special Informations: test-info"
+ location: test-location
+ site_rack: test-rack
+ os: test-os
+ hardware: test-hw-modified
+ interfaces:
+ - type: 1
+ main: 1
+ useip: 1
+ ip: 10.1.1.1
+ dns: ""
+ port: "10050"
+ - type: 1
+ main: 0
+ useip: 1
+ ip: 10.1.1.1
+ dns: ""
+ port: "{$MACRO}"
+ - type: 4
+ main: 1
+ useip: 1
+ ip: 10.1.1.1
+ dns: ""
+ port: "12345"
+ proxy: ExampleProxy
+ tls_psk_identity: test
+ tls_connect: 2
+ tls_psk: 123456789abcdef123456789abcdef12
+ register: zabbix_host1
+
+- name: changing the value of an already defined inventory should work and mark task as changed
+ assert:
+ that:
+ - "zabbix_host1 is changed"
+
+- name: "test: change visible_name"
+ zabbix_host:
+ server_url: "{{ zabbix_server_url }}"
+ login_user: "{{ zabbix_login_user }}"
+ login_password: "{{ zabbix_login_password }}"
+ host_name: ExampleHost
+ visible_name: "ExampleName Changed"
+ register: zabbix_host1
+
+- name: expect to succeed and that things changed
+ assert:
+ that:
+ - "zabbix_host1 is changed"
+
+- name: "test: change visible_name (again)"
+ zabbix_host:
+ server_url: "{{ zabbix_server_url }}"
+ login_user: "{{ zabbix_login_user }}"
+ login_password: "{{ zabbix_login_password }}"
+ host_name: ExampleHost
+ visible_name: "ExampleName Changed"
+ register: zabbix_host1
+
+- name: updating with same values should be idempotent
+ assert:
+ that:
+ - "not zabbix_host1 is changed"
+
+- name: "test: change description"
+ zabbix_host:
+ server_url: "{{ zabbix_server_url }}"
+ login_user: "{{ zabbix_login_user }}"
+ login_password: "{{ zabbix_login_password }}"
+ host_name: ExampleHost
+ description: "My ExampleHost Description Changed"
+ register: zabbix_host1
+
+- name: expect to succeed and that things changed
+ assert:
+ that:
+ - "zabbix_host1 is changed"
+
+- name: "test: change description (again)"
+ zabbix_host:
+ server_url: "{{ zabbix_server_url }}"
+ login_user: "{{ zabbix_login_user }}"
+ login_password: "{{ zabbix_login_password }}"
+ host_name: ExampleHost
+ description: "My ExampleHost Description Changed"
+ register: zabbix_host1
+
+- name: updating with same values should be idempotent
+ assert:
+ that:
+ - "not zabbix_host1 is changed"
+
+- name: "test: change host groups (adding one group)"
+ zabbix_host:
+ server_url: "{{ zabbix_server_url }}"
+ login_user: "{{ zabbix_login_user }}"
+ login_password: "{{ zabbix_login_password }}"
+ host_name: ExampleHost
+ host_groups:
+ - Linux servers
+ - Zabbix servers
+ - Virtual machines
+ register: zabbix_host1
+
+- name: expect to succeed and that things changed
+ assert:
+ that:
+ - "zabbix_host1 is changed"
+
+- name: "test: change host groups (remove one group)"
+ zabbix_host:
+ server_url: "{{ zabbix_server_url }}"
+ login_user: "{{ zabbix_login_user }}"
+ login_password: "{{ zabbix_login_password }}"
+ host_name: ExampleHost
+ host_groups:
+ - Linux servers
+ - Zabbix servers
+ register: zabbix_host1
+
+- name: expect to succeed and that things changed
+ assert:
+ that:
+ - "zabbix_host1 is changed"
+
+- name: "test: change host groups (add one group using force=no)"
+ zabbix_host:
+ server_url: "{{ zabbix_server_url }}"
+ login_user: "{{ zabbix_login_user }}"
+ login_password: "{{ zabbix_login_password }}"
+ host_name: ExampleHost
+ host_groups:
+ - Virtual machines
+ force: no
+ register: zabbix_host1
+
+- name: expect to succeed and that things changed
+ assert:
+ that:
+ - "zabbix_host1 is changed"
+
+
+- name: "test: change host groups (check whether we are at three groups)"
+ zabbix_host:
+ server_url: "{{ zabbix_server_url }}"
+ login_user: "{{ zabbix_login_user }}"
+ login_password: "{{ zabbix_login_password }}"
+ host_name: ExampleHost
+ host_groups:
+ - Linux servers
+ - Zabbix servers
+ - Virtual machines
+ register: zabbix_host1
+
+- name: expect to succeed and that things have not changed
+ assert:
+ that:
+ - "not zabbix_host1 is changed"
+
+- name: "test: change host groups (attempt to remove all host groups)"
+ zabbix_host:
+ server_url: "{{ zabbix_server_url }}"
+ login_user: "{{ zabbix_login_user }}"
+ login_password: "{{ zabbix_login_password }}"
+ host_name: ExampleHost
+ host_groups:
+ -
+ register: zabbix_host1
+ ignore_errors: yes
+
+- name: expect to fail
+ assert:
+ that:
+ - "zabbix_host1 is failed"
+
+- name: "test: change host linked templates (same as before)"
+ zabbix_host:
+ server_url: "{{ zabbix_server_url }}"
+ login_user: "{{ zabbix_login_user }}"
+ login_password: "{{ zabbix_login_password }}"
+ host_name: ExampleHost
+ link_templates:
+ - Template App IMAP Service
+ - Template App NTP Service
+ register: zabbix_host1
+
+- name: expect to succeed and that things have not changed
+ assert:
+ that:
+ - "not zabbix_host1 is changed"
+
+- name: "test: change host linked templates (add one template)"
+ zabbix_host:
+ server_url: "{{ zabbix_server_url }}"
+ login_user: "{{ zabbix_login_user }}"
+ login_password: "{{ zabbix_login_password }}"
+ host_name: ExampleHost
+ link_templates:
+ - Template App IMAP Service
+ - Template App NTP Service
+ - Template App HTTP Service
+ register: zabbix_host1
+
+- name: expect to succeed and that things have changed
+ assert:
+ that:
+ - "zabbix_host1 is changed"
+
+- name: "test: change host linked templates (add one template, using force=no)"
+ zabbix_host:
+ server_url: "{{ zabbix_server_url }}"
+ login_user: "{{ zabbix_login_user }}"
+ login_password: "{{ zabbix_login_password }}"
+ host_name: ExampleHost
+ link_templates:
+ - Template App LDAP Service
+ force: no
+ register: zabbix_host1
+
+- name: expect to succeed and that things have changed
+ assert:
+ that:
+ - "zabbix_host1 is changed"
+
+- name: "test: change host linked templates (make sure we are at 4 templates)"
+ zabbix_host:
+ server_url: "{{ zabbix_server_url }}"
+ login_user: "{{ zabbix_login_user }}"
+ login_password: "{{ zabbix_login_password }}"
+ host_name: ExampleHost
+ link_templates:
+ - Template App IMAP Service
+ - Template App NTP Service
+ - Template App HTTP Service
+ - Template App LDAP Service
+ register: zabbix_host1
+
+- name: expect to succeed and that things have not changed
+ assert:
+ that:
+ - "not zabbix_host1 is changed"
+
+- name: "test: change host linked templates (remove all templates)"
+ zabbix_host:
+ server_url: "{{ zabbix_server_url }}"
+ login_user: "{{ zabbix_login_user }}"
+ login_password: "{{ zabbix_login_password }}"
+ host_name: ExampleHost
+ link_templates:
+ -
+ register: zabbix_host1
+
+- name: expect to succeed and that things have changed
+ assert:
+ that:
+ - "zabbix_host1 is changed"
+
+- name: "test: change host linked templates (check we have no templates left)"
+ zabbix_host:
+ server_url: "{{ zabbix_server_url }}"
+ login_user: "{{ zabbix_login_user }}"
+ login_password: "{{ zabbix_login_password }}"
+ host_name: ExampleHost
+ link_templates:
+ -
+ register: zabbix_host1
+
+- name: expect to succeed and that things have not changed
+ assert:
+ that:
+ - "not zabbix_host1 is changed"
+
+- name: "test: change host status"
+ zabbix_host:
+ server_url: "{{ zabbix_server_url }}"
+ login_user: "{{ zabbix_login_user }}"
+ login_password: "{{ zabbix_login_password }}"
+ host_name: ExampleHost
+ status: disabled
+ register: zabbix_host1
+
+- name: expect to succeed and that things have changed
+ assert:
+ that:
+ - "zabbix_host1 is changed"
+
+- name: "test: change host status (again)"
+ zabbix_host:
+ server_url: "{{ zabbix_server_url }}"
+ login_user: "{{ zabbix_login_user }}"
+ login_password: "{{ zabbix_login_password }}"
+ host_name: ExampleHost
+ status: disabled
+ register: zabbix_host1
+
+- name: expect to succeed and that things have not changed
+ assert:
+ that:
+ - "not zabbix_host1 is changed"
+
+- name: "test: change host inventory mode"
+ zabbix_host:
+ server_url: "{{ zabbix_server_url }}"
+ login_user: "{{ zabbix_login_user }}"
+ login_password: "{{ zabbix_login_password }}"
+ host_name: ExampleHost
+ inventory_mode: automatic
+ register: zabbix_host1
+
+- name: expect to succeed and that things have changed
+ assert:
+ that:
+ - "zabbix_host1 is changed"
+
+- name: "test: change host inventory mode"
+ zabbix_host:
+ server_url: "{{ zabbix_server_url }}"
+ login_user: "{{ zabbix_login_user }}"
+ login_password: "{{ zabbix_login_password }}"
+ host_name: ExampleHost
+ inventory_mode: automatic
+ register: zabbix_host1
+
+- name: expect to succeed and that things have not changed
+ assert:
+ that:
+ - "not zabbix_host1 is changed"
+
+- name: "test: change host inventory data (one field)"
+ zabbix_host:
+ server_url: "{{ zabbix_server_url }}"
+ login_user: "{{ zabbix_login_user }}"
+ login_password: "{{ zabbix_login_password }}"
+ host_name: ExampleHost
+ inventory_zabbix:
+ tag: test-tag-two
+ alias: test-alias
+ notes: "Special Informations: test-info"
+ location: test-location
+ site_rack: test-rack
+ os: test-os
+ hardware: test-hw
+ register: zabbix_host1
+
+- name: expect to succeed and that things have changed
+ assert:
+ that:
+ - "zabbix_host1 is changed"
+
+- name: "test: change host inventory data (again)"
+ zabbix_host:
+ server_url: "{{ zabbix_server_url }}"
+ login_user: "{{ zabbix_login_user }}"
+ login_password: "{{ zabbix_login_password }}"
+ host_name: ExampleHost
+ inventory_zabbix:
+ tag: test-tag-two
+ alias: test-alias
+ notes: "Special Informations: test-info"
+ location: test-location
+ site_rack: test-rack
+ os: test-os
+ hardware: test-hw
+ register: zabbix_host1
+
+- name: expect to succeed and that things have not changed
+ assert:
+ that:
+ - "not zabbix_host1 is changed"
+
+- name: "test: remove host proxy"
+ zabbix_host:
+ server_url: "{{ zabbix_server_url }}"
+ login_user: "{{ zabbix_login_user }}"
+ login_password: "{{ zabbix_login_password }}"
+ host_name: ExampleHost
+ proxy: ''
+ register: zabbix_host1
+
+- name: expect to succeed and that things have changed
+ assert:
+ that:
+ - "zabbix_host1 is changed"
+
+- name: "test: add host proxy"
+ zabbix_host:
+ server_url: "{{ zabbix_server_url }}"
+ login_user: "{{ zabbix_login_user }}"
+ login_password: "{{ zabbix_login_password }}"
+ host_name: ExampleHost
+ proxy: ExampleProxy
+ register: zabbix_host1
+
+- name: expect to succeed and that things have changed
+ assert:
+ that:
+ - "zabbix_host1 is changed"
+
+- name: "test: add host proxy (again)"
+ zabbix_host:
+ server_url: "{{ zabbix_server_url }}"
+ login_user: "{{ zabbix_login_user }}"
+ login_password: "{{ zabbix_login_password }}"
+ host_name: ExampleHost
+ proxy: ExampleProxy
+ register: zabbix_host1
+
+- name: expect to succeed and that things have not changed
+ assert:
+ that:
+ - "not zabbix_host1 is changed"
+
+- name: "test: change tls settings"
+ zabbix_host:
+ server_url: "{{ zabbix_server_url }}"
+ login_user: "{{ zabbix_login_user }}"
+ login_password: "{{ zabbix_login_password }}"
+ host_name: ExampleHost
+ tls_psk_identity: test2
+ tls_connect: 4
+ tls_accept: 7
+ tls_psk: 123456789abcdef123456789abcdef13
+ tls_issuer: AcmeCorp
+ tls_subject: AcmeCorpServer
+ register: zabbix_host1
+
+- name: expect to succeed and that things have changed
+ assert:
+ that:
+ - "zabbix_host1 is changed"
+
+- name: "test: change tls settings (again)"
+ zabbix_host:
+ server_url: "{{ zabbix_server_url }}"
+ login_user: "{{ zabbix_login_user }}"
+ login_password: "{{ zabbix_login_password }}"
+ host_name: ExampleHost
+ tls_psk_identity: test2
+ tls_connect: 4
+ tls_accept: 7
+ tls_psk: 123456789abcdef123456789abcdef13
+ tls_issuer: AcmeCorp
+ tls_subject: AcmeCorpServer
+ register: zabbix_host1
+
+- name: expect to succeed and that things have not changed
+ assert:
+ that:
+ - "not zabbix_host1 is changed"
+
+- name: "test: change interface settings (remove one)"
+ zabbix_host:
+ server_url: "{{ zabbix_server_url }}"
+ login_user: "{{ zabbix_login_user }}"
+ login_password: "{{ zabbix_login_password }}"
+ host_name: ExampleHost
+ interfaces:
+ - type: 1
+ main: 1
+ useip: 1
+ ip: 10.1.1.1
+ dns: ""
+ port: "10050"
+ register: zabbix_host1
+
+- name: expect to succeed and that things have changed
+ assert:
+ that:
+ - "zabbix_host1 is changed"
+
+- name: "test: change interface settings (again)"
+ zabbix_host:
+ server_url: "{{ zabbix_server_url }}"
+ login_user: "{{ zabbix_login_user }}"
+ login_password: "{{ zabbix_login_password }}"
+ host_name: ExampleHost
+ interfaces:
+ - type: 1
+ main: 1
+ useip: 1
+ ip: 10.1.1.1
+ dns: ""
+ port: "10050"
+ register: zabbix_host1
+
+- name: expect to succeed and that things have not changed
+ assert:
+ that:
+ - "not zabbix_host1 is changed"
+
+- name: "test: change interface settings (add one interface using force=no)"
+ zabbix_host:
+ server_url: "{{ zabbix_server_url }}"
+ login_user: "{{ zabbix_login_user }}"
+ login_password: "{{ zabbix_login_password }}"
+ host_name: ExampleHost
+ interfaces:
+ - type: 4
+ main: 1
+ useip: 1
+ ip: 10.1.1.1
+ dns: ""
+ port: "12345"
+ force: no
+ register: zabbix_host1
+
+- name: expect to succeed and that things have changed
+ assert:
+ that:
+ - "zabbix_host1 is changed"
+
+- name: "test: change interface settings (verify that we are at two interfaces)"
+ zabbix_host:
+ server_url: "{{ zabbix_server_url }}"
+ login_user: "{{ zabbix_login_user }}"
+ login_password: "{{ zabbix_login_password }}"
+ host_name: ExampleHost
+ interfaces:
+ - type: 1
+ main: 1
+ useip: 1
+ ip: 10.1.1.1
+ dns: ""
+ port: "10050"
+ - type: 4
+ main: 1
+ useip: 1
+ ip: 10.1.1.1
+ dns: ""
+ port: "12345"
+ register: zabbix_host1
+
+- name: expect to succeed and that things have not changed
+ assert:
+ that:
+ - "not zabbix_host1 is changed"
+
+- name: "test: add IPMI settings"
+ zabbix_host:
+ server_url: "{{ zabbix_server_url }}"
+ login_user: "{{ zabbix_login_user }}"
+ login_password: "{{ zabbix_login_password }}"
+ host_name: ExampleHost
+ ipmi_authtype: 2
+ ipmi_privilege: 4
+ ipmi_username: username
+ ipmi_password: password
+ register: zabbix_host1
+
+- name: expect to succeed and that things have changed
+ assert:
+ that:
+ - "zabbix_host1 is changed"
+
+- name: "test: add IPMI settings again"
+ zabbix_host:
+ server_url: "{{ zabbix_server_url }}"
+ login_user: "{{ zabbix_login_user }}"
+ login_password: "{{ zabbix_login_password }}"
+ host_name: ExampleHost
+ ipmi_authtype: 2
+ ipmi_privilege: 4
+ ipmi_username: username
+ ipmi_password: password
+ register: zabbix_host1
+
+- name: expect to succeed and that things have not changed
+ assert:
+ that:
+ - "zabbix_host1 is not changed"
+
+- name: "test: verify that an empty change is idempotent"
+ zabbix_host:
+ server_url: "{{ zabbix_server_url }}"
+ login_user: "{{ zabbix_login_user }}"
+ login_password: "{{ zabbix_login_password }}"
+ host_name: ExampleHost
+ register: zabbix_host1
+
+- name: expect to succeed and that things have not changed
+ assert:
+ that:
+ - "zabbix_host1 is not changed"
+
+- name: "test: IPMI set default values"
+ zabbix_host:
+ server_url: "{{ zabbix_server_url }}"
+ login_user: "{{ zabbix_login_user }}"
+ login_password: "{{ zabbix_login_password }}"
+ host_name: ExampleHost
+ ipmi_authtype: -1
+ ipmi_privilege: 2
+ ipmi_username: ""
+ ipmi_password: ""
+ register: zabbix_host1
+
+- name: expect to succeed and that things have changed
+ assert:
+ that:
+ - "zabbix_host1 is changed"
+
+- name: "test: IPMI set default values (again)"
+ zabbix_host:
+ server_url: "{{ zabbix_server_url }}"
+ login_user: "{{ zabbix_login_user }}"
+ login_password: "{{ zabbix_login_password }}"
+ host_name: ExampleHost
+ ipmi_authtype: -1
+ ipmi_privilege: 2
+ ipmi_username: ""
+ ipmi_password: ""
+ register: zabbix_host1
+
+- name: expect to succeed and that things have not changed
+ assert:
+ that:
+ - "zabbix_host1 is not changed"
+
+- name: "test: change host inventory mode to disabled"
+ zabbix_host:
+ server_url: "{{ zabbix_server_url }}"
+ login_user: "{{ zabbix_login_user }}"
+ login_password: "{{ zabbix_login_password }}"
+ host_name: ExampleHost
+ inventory_mode: disabled
+ register: zabbix_host1
+
+- name: expect to succeed and that things have changed
+ assert:
+ that:
+ - "zabbix_host1 is changed"
+
+- name: "test: change host inventory mode to manual"
+ zabbix_host:
+ server_url: "{{ zabbix_server_url }}"
+ login_user: "{{ zabbix_login_user }}"
+ login_password: "{{ zabbix_login_password }}"
+ host_name: ExampleHost
+ inventory_mode: manual
+ register: zabbix_host1
+
+- name: expect to succeed and that things have changed
+ assert:
+ that:
+ - "zabbix_host1 is changed"
+
+- name: "test: add new set of user macros to the host"
+ zabbix_host:
+ server_url: "{{ zabbix_server_url }}"
+ login_user: "{{ zabbix_login_user }}"
+ login_password: "{{ zabbix_login_password }}"
+ host_name: ExampleHost
+ macros:
+ - macro: '{$NEWMACRO1}'
+ value: test123
+ - macro: NEWMACRO2
+ value: abc
+ register: zabbix_host1
+
+- name: expect to succeed and that things have changed
+ assert:
+ that:
+ - "zabbix_host1 is changed"
+
+- name: "test: add new set of user macros to the host (again - lowercase)"
+ zabbix_host:
+ server_url: "{{ zabbix_server_url }}"
+ login_user: "{{ zabbix_login_user }}"
+ login_password: "{{ zabbix_login_password }}"
+ host_name: ExampleHost
+ macros:
+ - macro: '{$newmacro1}'
+ value: test123
+ - macro: newmacro2
+ value: abc
+ register: zabbix_host1
+
+- name: expect to succeed and that things have not changed
+ assert:
+ that:
+ - "zabbix_host1 is not changed"
+
+- name: "test: update one of the user macros present on the host"
+ zabbix_host:
+ server_url: "{{ zabbix_server_url }}"
+ login_user: "{{ zabbix_login_user }}"
+ login_password: "{{ zabbix_login_password }}"
+ host_name: ExampleHost
+ macros:
+ - macro: '{$NEWMACRO1}'
+ value: test1234
+ - macro: NEWMACRO2
+ value: abc
+ register: zabbix_host1
+
+- name: expect to succeed and that things have changed
+ assert:
+ that:
+ - "zabbix_host1 is changed"
+
+- name: "test: update one of the user macros with description"
+ zabbix_host:
+ server_url: "{{ zabbix_server_url }}"
+ login_user: "{{ zabbix_login_user }}"
+ login_password: "{{ zabbix_login_password }}"
+ host_name: ExampleHost
+ macros:
+ - macro: '{$NEWMACRO1}'
+ value: test1234
+ description: Example Description
+ - macro: NEWMACRO2
+ value: abc
+ register: zabbix_host1
+
+- name: expect to succeed and that things have changed
+ assert:
+ that:
+ - "zabbix_host1 is changed"
+
+- name: "test: update one of the user macros with description (again)"
+ zabbix_host:
+ server_url: "{{ zabbix_server_url }}"
+ login_user: "{{ zabbix_login_user }}"
+ login_password: "{{ zabbix_login_password }}"
+ host_name: ExampleHost
+ macros:
+ - macro: '{$NEWMACRO1}'
+ value: test1234
+ description: Example Description
+ - macro: NEWMACRO2
+ value: abc
+ register: zabbix_host1
+
+- name: expect to succeed and that things have not changed
+ assert:
+ that:
+ - "zabbix_host1 is not changed"
+
+- name: "test: update one of the user macros by removing description"
+ zabbix_host:
+ server_url: "{{ zabbix_server_url }}"
+ login_user: "{{ zabbix_login_user }}"
+ login_password: "{{ zabbix_login_password }}"
+ host_name: ExampleHost
+ macros:
+ - macro: '{$NEWMACRO1}'
+ value: test1234
+ - macro: NEWMACRO2
+ value: abc
+ register: zabbix_host1
+
+- name: expect to succeed and that things have changed
+ assert:
+ that:
+ - "zabbix_host1 is changed"
+
+- name: "test: add user macro while keeping previous ones with force=no"
+ zabbix_host:
+ server_url: "{{ zabbix_server_url }}"
+ login_user: "{{ zabbix_login_user }}"
+ login_password: "{{ zabbix_login_password }}"
+ host_name: ExampleHost
+ force: no
+ macros:
+ - macro: '{$NEWMACRO3}'
+ value: testing
+ register: zabbix_host1
+
+- name: expect to succeed and that things have changed
+ assert:
+ that:
+ - "zabbix_host1 is changed"
+
+- name: "test: add the same user macros (again)"
+ zabbix_host:
+ server_url: "{{ zabbix_server_url }}"
+ login_user: "{{ zabbix_login_user }}"
+ login_password: "{{ zabbix_login_password }}"
+ host_name: ExampleHost
+ macros:
+ - macro: '{$NEWMACRO1}'
+ value: test1234
+ - macro: NEWMACRO2
+ value: abc
+ - macro: '{$NEWMACRO3}'
+ value: testing
+ register: zabbix_host1
+
+- name: expect to succeed and that things have not changed
+ assert:
+ that:
+ - "zabbix_host1 is not changed"
+
+- name: "test: wipe out all of the user macros"
+ zabbix_host:
+ server_url: "{{ zabbix_server_url }}"
+ login_user: "{{ zabbix_login_user }}"
+ login_password: "{{ zabbix_login_password }}"
+ host_name: ExampleHost
+ macros: []
+ register: zabbix_host1
+
+- name: expect to succeed and that things have changed
+ assert:
+ that:
+ - "zabbix_host1 is changed"
+
+- name: "test: wipe out all of the user macros (again)"
+ zabbix_host:
+ server_url: "{{ zabbix_server_url }}"
+ login_user: "{{ zabbix_login_user }}"
+ login_password: "{{ zabbix_login_password }}"
+ host_name: ExampleHost
+ macros: []
+ register: zabbix_host1
+
+- name: expect to succeed and that things have not changed
+ assert:
+ that:
+ - "zabbix_host1 is not changed"
+
+- name: "test: add new set of tags to the host"
+ zabbix_host:
+ server_url: "{{ zabbix_server_url }}"
+ login_user: "{{ zabbix_login_user }}"
+ login_password: "{{ zabbix_login_password }}"
+ host_name: ExampleHost
+ tags:
+ - tag: NEWTAG1
+ - tag: NewTag2
+ value: abc
+ register: zabbix_host1
+
+- name: expect to succeed and that things have changed
+ assert:
+ that:
+ - "zabbix_host1 is changed"
+
+- name: "test: add new set of tags to the host (again)"
+ zabbix_host:
+ server_url: "{{ zabbix_server_url }}"
+ login_user: "{{ zabbix_login_user }}"
+ login_password: "{{ zabbix_login_password }}"
+ host_name: ExampleHost
+ tags:
+ - tag: NEWTAG1
+ - tag: NewTag2
+ value: abc
+ register: zabbix_host1
+
+- name: expect to succeed and that things have not changed
+ assert:
+ that:
+ - "zabbix_host1 is not changed"
+
+- name: "test: update one of the tags present on the host"
+ zabbix_host:
+ server_url: "{{ zabbix_server_url }}"
+ login_user: "{{ zabbix_login_user }}"
+ login_password: "{{ zabbix_login_password }}"
+ host_name: ExampleHost
+ tags:
+ - tag: NEWTAG1
+ - tag: NewTag2
+ value: abcd
+ register: zabbix_host1
+
+- name: expect to succeed and that things have changed
+ assert:
+ that:
+ - "zabbix_host1 is changed"
+
+- name: "test: add tag while keeping previous ones with force=no"
+ zabbix_host:
+ server_url: "{{ zabbix_server_url }}"
+ login_user: "{{ zabbix_login_user }}"
+ login_password: "{{ zabbix_login_password }}"
+ host_name: ExampleHost
+ force: no
+ tags:
+ - tag: newtag3
+ value: testing
+ register: zabbix_host1
+
+- name: expect to succeed and that things have changed
+ assert:
+ that:
+ - "zabbix_host1 is changed"
+
+- name: "test: add the same tags (again)"
+ zabbix_host:
+ server_url: "{{ zabbix_server_url }}"
+ login_user: "{{ zabbix_login_user }}"
+ login_password: "{{ zabbix_login_password }}"
+ host_name: ExampleHost
+ tags:
+ - tag: NEWTAG1
+ - tag: NewTag2
+ value: abcd
+ - tag: newtag3
+ value: testing
+ register: zabbix_host1
+
+- name: expect to succeed and that things have not changed
+ assert:
+ that:
+ - "zabbix_host1 is not changed"
+
+- name: "test: wipe out all of the tags"
+ zabbix_host:
+ server_url: "{{ zabbix_server_url }}"
+ login_user: "{{ zabbix_login_user }}"
+ login_password: "{{ zabbix_login_password }}"
+ host_name: ExampleHost
+ tags: []
+ register: zabbix_host1
+
+- name: expect to succeed and that things have changed
+ assert:
+ that:
+ - "zabbix_host1 is changed"
+
+- name: "test: wipe out all of the tags (again)"
+ zabbix_host:
+ server_url: "{{ zabbix_server_url }}"
+ login_user: "{{ zabbix_login_user }}"
+ login_password: "{{ zabbix_login_password }}"
+ host_name: ExampleHost
+ tags: []
+ register: zabbix_host1
+
+- name: expect to succeed and that things have not changed
+ assert:
+ that:
+ - "zabbix_host1 is not changed"
+
+- name: "test: attempt to delete host created earlier"
+ zabbix_host:
+ server_url: "{{ zabbix_server_url }}"
+ login_user: "{{ zabbix_login_user }}"
+ login_password: "{{ zabbix_login_password }}"
+ host_name: ExampleHost
+ state: absent
+ register: zabbix_host1
+
+- name: deleting a host is a change, right?
+ assert:
+ that:
+ - "zabbix_host1 is changed"
+
+- name: "test: attempt deleting a non-existant host"
+ zabbix_host:
+ server_url: "{{ zabbix_server_url }}"
+ login_user: "{{ zabbix_login_user }}"
+ login_password: "{{ zabbix_login_password }}"
+ host_name: ExampleHost
+ state: absent
+ register: zabbix_host1
+
+- name: deleting a non-existant host is not a change, right?
+ assert:
+ that:
+ - "not zabbix_host1 is changed"
diff --git a/test/lib/ansible_test/_internal/sanity/integration_aliases.py b/test/lib/ansible_test/_internal/sanity/integration_aliases.py
index 1238d12004..a72e6669bf 100644
--- a/test/lib/ansible_test/_internal/sanity/integration_aliases.py
+++ b/test/lib/ansible_test/_internal/sanity/integration_aliases.py
@@ -221,6 +221,7 @@ class IntegrationAliasesTest(SanityVersionNeutral):
messages += self.check_ci_group(
targets=tuple(filter_targets(posix_targets, ['cloud/'], include=False, directories=False, errors=False)),
find=self.format_shippable_group_alias('linux').replace('linux', 'posix'),
+ find_incidental=['shippable/posix/incidental/'],
)
for cloud in clouds:
diff --git a/test/sanity/ignore.txt b/test/sanity/ignore.txt
index 406622b3be..d6e30775b1 100644
--- a/test/sanity/ignore.txt
+++ b/test/sanity/ignore.txt
@@ -8225,12 +8225,21 @@ test/support/integration/plugins/module_utils/common/network.py metaclass-boiler
test/support/integration/plugins/module_utils/compat/ipaddress.py future-import-boilerplate
test/support/integration/plugins/module_utils/compat/ipaddress.py metaclass-boilerplate
test/support/integration/plugins/module_utils/compat/ipaddress.py no-unicode-literals
+test/support/integration/plugins/module_utils/database.py future-import-boilerplate
+test/support/integration/plugins/module_utils/database.py metaclass-boilerplate
test/support/integration/plugins/module_utils/k8s/common.py metaclass-boilerplate
test/support/integration/plugins/module_utils/k8s/raw.py metaclass-boilerplate
+test/support/integration/plugins/module_utils/mysql.py future-import-boilerplate
+test/support/integration/plugins/module_utils/mysql.py metaclass-boilerplate
test/support/integration/plugins/module_utils/net_tools/nios/api.py future-import-boilerplate
test/support/integration/plugins/module_utils/net_tools/nios/api.py metaclass-boilerplate
test/support/integration/plugins/module_utils/network/common/utils.py future-import-boilerplate
test/support/integration/plugins/module_utils/network/common/utils.py metaclass-boilerplate
+test/support/integration/plugins/module_utils/postgres.py future-import-boilerplate
+test/support/integration/plugins/module_utils/postgres.py metaclass-boilerplate
+test/support/integration/plugins/modules/lvg.py pylint:blacklisted-name
+test/support/integration/plugins/modules/synchronize.py pylint:blacklisted-name
+test/support/integration/plugins/modules/timezone.py pylint:blacklisted-name
test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/doc_fragments/netconf.py future-import-boilerplate
test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/doc_fragments/netconf.py metaclass-boilerplate
test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/doc_fragments/network_agnostic.py future-import-boilerplate
diff --git a/test/support/integration/plugins/action/assemble.py b/test/support/integration/plugins/action/assemble.py
new file mode 100644
index 0000000000..d874c7090b
--- /dev/null
+++ b/test/support/integration/plugins/action/assemble.py
@@ -0,0 +1,165 @@
+# (c) 2013-2016, Michael DeHaan <michael.dehaan@gmail.com>
+# Stephen Fromm <sfromm@gmail.com>
+# Brian Coca <briancoca+dev@gmail.com>
+# Toshio Kuratomi <tkuratomi@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import codecs
+import os
+import os.path
+import re
+import tempfile
+
+from ansible import constants as C
+from ansible.errors import AnsibleError, AnsibleAction, _AnsibleActionDone, AnsibleActionFail
+from ansible.module_utils._text import to_native, to_text
+from ansible.module_utils.parsing.convert_bool import boolean
+from ansible.plugins.action import ActionBase
+from ansible.utils.hashing import checksum_s
+
+
+class ActionModule(ActionBase):
+
+ TRANSFERS_FILES = True
+
+ def _assemble_from_fragments(self, src_path, delimiter=None, compiled_regexp=None, ignore_hidden=False, decrypt=True):
+ ''' assemble a file from a directory of fragments '''
+
+ tmpfd, temp_path = tempfile.mkstemp(dir=C.DEFAULT_LOCAL_TMP)
+ tmp = os.fdopen(tmpfd, 'wb')
+ delimit_me = False
+ add_newline = False
+
+ for f in (to_text(p, errors='surrogate_or_strict') for p in sorted(os.listdir(src_path))):
+ if compiled_regexp and not compiled_regexp.search(f):
+ continue
+ fragment = u"%s/%s" % (src_path, f)
+ if not os.path.isfile(fragment) or (ignore_hidden and os.path.basename(fragment).startswith('.')):
+ continue
+
+ with open(self._loader.get_real_file(fragment, decrypt=decrypt), 'rb') as fragment_fh:
+ fragment_content = fragment_fh.read()
+
+ # always put a newline between fragments if the previous fragment didn't end with a newline.
+ if add_newline:
+ tmp.write(b'\n')
+
+ # delimiters should only appear between fragments
+ if delimit_me:
+ if delimiter:
+ # un-escape anything like newlines
+ delimiter = codecs.escape_decode(delimiter)[0]
+ tmp.write(delimiter)
+ # always make sure there's a newline after the
+ # delimiter, so lines don't run together
+ if delimiter[-1] != b'\n':
+ tmp.write(b'\n')
+
+ tmp.write(fragment_content)
+ delimit_me = True
+ if fragment_content.endswith(b'\n'):
+ add_newline = False
+ else:
+ add_newline = True
+
+ tmp.close()
+ return temp_path
+
+ def run(self, tmp=None, task_vars=None):
+
+ self._supports_check_mode = False
+
+ result = super(ActionModule, self).run(tmp, task_vars)
+ del tmp # tmp no longer has any effect
+
+ if task_vars is None:
+ task_vars = dict()
+
+ src = self._task.args.get('src', None)
+ dest = self._task.args.get('dest', None)
+ delimiter = self._task.args.get('delimiter', None)
+ remote_src = self._task.args.get('remote_src', 'yes')
+ regexp = self._task.args.get('regexp', None)
+ follow = self._task.args.get('follow', False)
+ ignore_hidden = self._task.args.get('ignore_hidden', False)
+ decrypt = self._task.args.get('decrypt', True)
+
+ try:
+ if src is None or dest is None:
+ raise AnsibleActionFail("src and dest are required")
+
+ if boolean(remote_src, strict=False):
+ result.update(self._execute_module(module_name='assemble', task_vars=task_vars))
+ raise _AnsibleActionDone()
+ else:
+ try:
+ src = self._find_needle('files', src)
+ except AnsibleError as e:
+ raise AnsibleActionFail(to_native(e))
+
+ if not os.path.isdir(src):
+ raise AnsibleActionFail(u"Source (%s) is not a directory" % src)
+
+ _re = None
+ if regexp is not None:
+ _re = re.compile(regexp)
+
+ # Does all work assembling the file
+ path = self._assemble_from_fragments(src, delimiter, _re, ignore_hidden, decrypt)
+
+ path_checksum = checksum_s(path)
+ dest = self._remote_expand_user(dest)
+ dest_stat = self._execute_remote_stat(dest, all_vars=task_vars, follow=follow)
+
+ diff = {}
+
+ # setup args for running modules
+ new_module_args = self._task.args.copy()
+
+ # clean assemble specific options
+ for opt in ['remote_src', 'regexp', 'delimiter', 'ignore_hidden', 'decrypt']:
+ if opt in new_module_args:
+ del new_module_args[opt]
+ new_module_args['dest'] = dest
+
+ if path_checksum != dest_stat['checksum']:
+
+ if self._play_context.diff:
+ diff = self._get_diff_data(dest, path, task_vars)
+
+ remote_path = self._connection._shell.join_path(self._connection._shell.tmpdir, 'src')
+ xfered = self._transfer_file(path, remote_path)
+
+ # fix file permissions when the copy is done as a different user
+ self._fixup_perms2((self._connection._shell.tmpdir, remote_path))
+
+ new_module_args.update(dict(src=xfered,))
+
+ res = self._execute_module(module_name='copy', module_args=new_module_args, task_vars=task_vars)
+ if diff:
+ res['diff'] = diff
+ result.update(res)
+ else:
+ result.update(self._execute_module(module_name='file', module_args=new_module_args, task_vars=task_vars))
+
+ except AnsibleAction as e:
+ result.update(e.result)
+ finally:
+ self._remove_tmp_path(self._connection._shell.tmpdir)
+
+ return result
diff --git a/test/support/integration/plugins/connection/chroot.py b/test/support/integration/plugins/connection/chroot.py
new file mode 100644
index 0000000000..d95497b42b
--- /dev/null
+++ b/test/support/integration/plugins/connection/chroot.py
@@ -0,0 +1,208 @@
+# Based on local.py (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# (c) 2013, Maykel Moya <mmoya@speedyrails.com>
+# (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com>
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+ author: Maykel Moya <mmoya@speedyrails.com>
+ connection: chroot
+ short_description: Interact with local chroot
+ description:
+ - Run commands or put/fetch files to an existing chroot on the Ansible controller.
+ version_added: "1.1"
+ options:
+ remote_addr:
+ description:
+ - The path of the chroot you want to access.
+ default: inventory_hostname
+ vars:
+ - name: ansible_host
+ executable:
+ description:
+ - User specified executable shell
+ ini:
+ - section: defaults
+ key: executable
+ env:
+ - name: ANSIBLE_EXECUTABLE
+ vars:
+ - name: ansible_executable
+ default: /bin/sh
+ chroot_exe:
+ version_added: '2.8'
+ description:
+ - User specified chroot binary
+ ini:
+ - section: chroot_connection
+ key: exe
+ env:
+ - name: ANSIBLE_CHROOT_EXE
+ vars:
+ - name: ansible_chroot_exe
+ default: chroot
+"""
+
+import os
+import os.path
+import subprocess
+import traceback
+
+from ansible.errors import AnsibleError
+from ansible.module_utils.basic import is_executable
+from ansible.module_utils.common.process import get_bin_path
+from ansible.module_utils.six.moves import shlex_quote
+from ansible.module_utils._text import to_bytes, to_native
+from ansible.plugins.connection import ConnectionBase, BUFSIZE
+from ansible.utils.display import Display
+
+display = Display()
+
+
+class Connection(ConnectionBase):
+ ''' Local chroot based connections '''
+
+ transport = 'chroot'
+ has_pipelining = True
+ # su currently has an undiagnosed issue with calculating the file
+ # checksums (so copy, for instance, doesn't work right)
+ # Have to look into that before re-enabling this
+ has_tty = False
+
+ default_user = 'root'
+
+ def __init__(self, play_context, new_stdin, *args, **kwargs):
+ super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
+
+ self.chroot = self._play_context.remote_addr
+
+ if os.geteuid() != 0:
+ raise AnsibleError("chroot connection requires running as root")
+
+ # we're running as root on the local system so do some
+ # trivial checks for ensuring 'host' is actually a chroot'able dir
+ if not os.path.isdir(self.chroot):
+ raise AnsibleError("%s is not a directory" % self.chroot)
+
+ chrootsh = os.path.join(self.chroot, 'bin/sh')
+ # Want to check for a usable bourne shell inside the chroot.
+ # is_executable() == True is sufficient. For symlinks it
+ # gets really complicated really fast. So we punt on finding that
+ # out. As long as it's a symlink we assume that it will work
+ if not (is_executable(chrootsh) or (os.path.lexists(chrootsh) and os.path.islink(chrootsh))):
+ raise AnsibleError("%s does not look like a chrootable dir (/bin/sh missing)" % self.chroot)
+
+ def _connect(self):
+ ''' connect to the chroot '''
+ if os.path.isabs(self.get_option('chroot_exe')):
+ self.chroot_cmd = self.get_option('chroot_exe')
+ else:
+ try:
+ self.chroot_cmd = get_bin_path(self.get_option('chroot_exe'))
+ except ValueError as e:
+ raise AnsibleError(to_native(e))
+
+ super(Connection, self)._connect()
+ if not self._connected:
+ display.vvv("THIS IS A LOCAL CHROOT DIR", host=self.chroot)
+ self._connected = True
+
+ def _buffered_exec_command(self, cmd, stdin=subprocess.PIPE):
+ ''' run a command on the chroot. This is only needed for implementing
+ put_file() get_file() so that we don't have to read the whole file
+ into memory.
+
+ compared to exec_command() it looses some niceties like being able to
+ return the process's exit code immediately.
+ '''
+ executable = self.get_option('executable')
+ local_cmd = [self.chroot_cmd, self.chroot, executable, '-c', cmd]
+
+ display.vvv("EXEC %s" % (local_cmd), host=self.chroot)
+ local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
+ p = subprocess.Popen(local_cmd, shell=False, stdin=stdin,
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+
+ return p
+
+ def exec_command(self, cmd, in_data=None, sudoable=False):
+ ''' run a command on the chroot '''
+ super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
+
+ p = self._buffered_exec_command(cmd)
+
+ stdout, stderr = p.communicate(in_data)
+ return (p.returncode, stdout, stderr)
+
+ def _prefix_login_path(self, remote_path):
+ ''' Make sure that we put files into a standard path
+
+ If a path is relative, then we need to choose where to put it.
+ ssh chooses $HOME but we aren't guaranteed that a home dir will
+ exist in any given chroot. So for now we're choosing "/" instead.
+ This also happens to be the former default.
+
+ Can revisit using $HOME instead if it's a problem
+ '''
+ if not remote_path.startswith(os.path.sep):
+ remote_path = os.path.join(os.path.sep, remote_path)
+ return os.path.normpath(remote_path)
+
+ def put_file(self, in_path, out_path):
+ ''' transfer a file from local to chroot '''
+ super(Connection, self).put_file(in_path, out_path)
+ display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.chroot)
+
+ out_path = shlex_quote(self._prefix_login_path(out_path))
+ try:
+ with open(to_bytes(in_path, errors='surrogate_or_strict'), 'rb') as in_file:
+ if not os.fstat(in_file.fileno()).st_size:
+ count = ' count=0'
+ else:
+ count = ''
+ try:
+ p = self._buffered_exec_command('dd of=%s bs=%s%s' % (out_path, BUFSIZE, count), stdin=in_file)
+ except OSError:
+ raise AnsibleError("chroot connection requires dd command in the chroot")
+ try:
+ stdout, stderr = p.communicate()
+ except Exception:
+ traceback.print_exc()
+ raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path))
+ if p.returncode != 0:
+ raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr))
+ except IOError:
+ raise AnsibleError("file or module does not exist at: %s" % in_path)
+
+ def fetch_file(self, in_path, out_path):
+ ''' fetch a file from chroot to local '''
+ super(Connection, self).fetch_file(in_path, out_path)
+ display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.chroot)
+
+ in_path = shlex_quote(self._prefix_login_path(in_path))
+ try:
+ p = self._buffered_exec_command('dd if=%s bs=%s' % (in_path, BUFSIZE))
+ except OSError:
+ raise AnsibleError("chroot connection requires dd command in the chroot")
+
+ with open(to_bytes(out_path, errors='surrogate_or_strict'), 'wb+') as out_file:
+ try:
+ chunk = p.stdout.read(BUFSIZE)
+ while chunk:
+ out_file.write(chunk)
+ chunk = p.stdout.read(BUFSIZE)
+ except Exception:
+ traceback.print_exc()
+ raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path))
+ stdout, stderr = p.communicate()
+ if p.returncode != 0:
+ raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr))
+
+ def close(self):
+ ''' terminate the connection; nothing to do here '''
+ super(Connection, self).close()
+ self._connected = False
diff --git a/test/support/integration/plugins/lookup/hashi_vault.py b/test/support/integration/plugins/lookup/hashi_vault.py
new file mode 100644
index 0000000000..b90fe586ca
--- /dev/null
+++ b/test/support/integration/plugins/lookup/hashi_vault.py
@@ -0,0 +1,302 @@
+# (c) 2015, Jonathan Davila <jonathan(at)davila.io>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+ lookup: hashi_vault
+ author: Jonathan Davila <jdavila(at)ansible.com>
+ version_added: "2.0"
+ short_description: retrieve secrets from HashiCorp's vault
+ requirements:
+ - hvac (python library)
+ description:
+ - retrieve secrets from HashiCorp's vault
+ notes:
+ - Due to a current limitation in the HVAC library there won't necessarily be an error if a bad endpoint is specified.
+ - As of Ansible 2.10, only the latest secret is returned when specifying a KV v2 path.
+ options:
+ secret:
+ description: query you are making.
+ required: True
+ token:
+ description: vault token.
+ env:
+ - name: VAULT_TOKEN
+ url:
+ description: URL to vault service.
+ env:
+ - name: VAULT_ADDR
+ default: 'http://127.0.0.1:8200'
+ username:
+ description: Authentication user name.
+ password:
+ description: Authentication password.
+ role_id:
+ description: Role id for a vault AppRole auth.
+ env:
+ - name: VAULT_ROLE_ID
+ secret_id:
+ description: Secret id for a vault AppRole auth.
+ env:
+ - name: VAULT_SECRET_ID
+ auth_method:
+ description:
+ - Authentication method to be used.
+ - C(userpass) is added in version 2.8.
+ env:
+ - name: VAULT_AUTH_METHOD
+ choices:
+ - userpass
+ - ldap
+ - approle
+ mount_point:
+ description: vault mount point, only required if you have a custom mount point.
+ default: ldap
+ ca_cert:
+ description: path to certificate to use for authentication.
+ aliases: [ cacert ]
+ validate_certs:
+ description: controls verification and validation of SSL certificates, mostly you only want to turn off with self signed ones.
+ type: boolean
+ default: True
+ namespace:
+ version_added: "2.8"
+ description: namespace where secrets reside. requires HVAC 0.7.0+ and Vault 0.11+.
+"""
+
+EXAMPLES = """
+- debug:
+ msg: "{{ lookup('hashi_vault', 'secret=secret/hello:value token=c975b780-d1be-8016-866b-01d0f9b688a5 url=http://myvault:8200')}}"
+
+- name: Return all secrets from a path
+ debug:
+ msg: "{{ lookup('hashi_vault', 'secret=secret/hello token=c975b780-d1be-8016-866b-01d0f9b688a5 url=http://myvault:8200')}}"
+
+- name: Vault that requires authentication via LDAP
+ debug:
+ msg: "{{ lookup('hashi_vault', 'secret=secret/hello:value auth_method=ldap mount_point=ldap username=myuser password=mypas url=http://myvault:8200')}}"
+
+- name: Vault that requires authentication via username and password
+ debug:
+ msg: "{{ lookup('hashi_vault', 'secret=secret/hello:value auth_method=userpass username=myuser password=mypas url=http://myvault:8200')}}"
+
+- name: Using an ssl vault
+ debug:
+ msg: "{{ lookup('hashi_vault', 'secret=secret/hola:value token=c975b780-d1be-8016-866b-01d0f9b688a5 url=https://myvault:8200 validate_certs=False')}}"
+
+- name: using certificate auth
+ debug:
+ msg: "{{ lookup('hashi_vault', 'secret=secret/hi:value token=xxxx-xxx-xxx url=https://myvault:8200 validate_certs=True cacert=/cacert/path/ca.pem')}}"
+
+- name: authenticate with a Vault app role
+ debug:
+ msg: "{{ lookup('hashi_vault', 'secret=secret/hello:value auth_method=approle role_id=myroleid secret_id=mysecretid url=http://myvault:8200')}}"
+
+- name: Return all secrets from a path in a namespace
+ debug:
+ msg: "{{ lookup('hashi_vault', 'secret=secret/hello token=c975b780-d1be-8016-866b-01d0f9b688a5 url=http://myvault:8200 namespace=teama/admins')}}"
+
+# When using KV v2 the PATH should include "data" between the secret engine mount and path (e.g. "secret/data/:path")
+# see: https://www.vaultproject.io/api/secret/kv/kv-v2.html#read-secret-version
+- name: Return latest KV v2 secret from path
+ debug:
+ msg: "{{ lookup('hashi_vault', 'secret=secret/data/hello token=my_vault_token url=http://myvault_url:8200') }}"
+
+
+"""
+
+RETURN = """
+_raw:
+ description:
+ - secrets(s) requested
+"""
+
+import os
+
+from ansible.errors import AnsibleError
+from ansible.module_utils.parsing.convert_bool import boolean
+from ansible.plugins.lookup import LookupBase
+
+HAS_HVAC = False
+try:
+ import hvac
+ HAS_HVAC = True
+except ImportError:
+ HAS_HVAC = False
+
+
+ANSIBLE_HASHI_VAULT_ADDR = 'http://127.0.0.1:8200'
+
+if os.getenv('VAULT_ADDR') is not None:
+ ANSIBLE_HASHI_VAULT_ADDR = os.environ['VAULT_ADDR']
+
+
+class HashiVault:
+ def __init__(self, **kwargs):
+
+ self.url = kwargs.get('url', ANSIBLE_HASHI_VAULT_ADDR)
+ self.namespace = kwargs.get('namespace', None)
+ self.avail_auth_method = ['approle', 'userpass', 'ldap']
+
+ # split secret arg, which has format 'secret/hello:value' into secret='secret/hello' and secret_field='value'
+ s = kwargs.get('secret')
+ if s is None:
+ raise AnsibleError("No secret specified for hashi_vault lookup")
+
+ s_f = s.rsplit(':', 1)
+ self.secret = s_f[0]
+ if len(s_f) >= 2:
+ self.secret_field = s_f[1]
+ else:
+ self.secret_field = ''
+
+ self.verify = self.boolean_or_cacert(kwargs.get('validate_certs', True), kwargs.get('cacert', ''))
+
+ # If a particular backend is asked for (and its method exists) we call it, otherwise drop through to using
+ # token auth. This means if a particular auth backend is requested and a token is also given, then we
+ # ignore the token and attempt authentication against the specified backend.
+ #
+ # to enable a new auth backend, simply add a new 'def auth_<type>' method below.
+ #
+ self.auth_method = kwargs.get('auth_method', os.environ.get('VAULT_AUTH_METHOD'))
+ self.verify = self.boolean_or_cacert(kwargs.get('validate_certs', True), kwargs.get('cacert', ''))
+ if self.auth_method and self.auth_method != 'token':
+ try:
+ if self.namespace is not None:
+ self.client = hvac.Client(url=self.url, verify=self.verify, namespace=self.namespace)
+ else:
+ self.client = hvac.Client(url=self.url, verify=self.verify)
+ # prefixing with auth_ to limit which methods can be accessed
+ getattr(self, 'auth_' + self.auth_method)(**kwargs)
+ except AttributeError:
+ raise AnsibleError("Authentication method '%s' not supported."
+ " Available options are %r" % (self.auth_method, self.avail_auth_method))
+ else:
+ self.token = kwargs.get('token', os.environ.get('VAULT_TOKEN', None))
+ if self.token is None and os.environ.get('HOME'):
+ token_filename = os.path.join(
+ os.environ.get('HOME'),
+ '.vault-token'
+ )
+ if os.path.exists(token_filename):
+ with open(token_filename) as token_file:
+ self.token = token_file.read().strip()
+
+ if self.token is None:
+ raise AnsibleError("No Vault Token specified")
+
+ if self.namespace is not None:
+ self.client = hvac.Client(url=self.url, token=self.token, verify=self.verify, namespace=self.namespace)
+ else:
+ self.client = hvac.Client(url=self.url, token=self.token, verify=self.verify)
+
+ if not self.client.is_authenticated():
+ raise AnsibleError("Invalid Hashicorp Vault Token Specified for hashi_vault lookup")
+
+ def get(self):
+ data = self.client.read(self.secret)
+
+ # Check response for KV v2 fields and flatten nested secret data.
+ #
+ # https://vaultproject.io/api/secret/kv/kv-v2.html#sample-response-1
+ try:
+ # sentinel field checks
+ check_dd = data['data']['data']
+ check_md = data['data']['metadata']
+ # unwrap nested data
+ data = data['data']
+ except KeyError:
+ pass
+
+ if data is None:
+ raise AnsibleError("The secret %s doesn't seem to exist for hashi_vault lookup" % self.secret)
+
+ if self.secret_field == '':
+ return data['data']
+
+ if self.secret_field not in data['data']:
+ raise AnsibleError("The secret %s does not contain the field '%s'. for hashi_vault lookup" % (self.secret, self.secret_field))
+
+ return data['data'][self.secret_field]
+
+ def check_params(self, **kwargs):
+ username = kwargs.get('username')
+ if username is None:
+ raise AnsibleError("Authentication method %s requires a username" % self.auth_method)
+
+ password = kwargs.get('password')
+ if password is None:
+ raise AnsibleError("Authentication method %s requires a password" % self.auth_method)
+
+ mount_point = kwargs.get('mount_point')
+
+ return username, password, mount_point
+
+ def auth_userpass(self, **kwargs):
+ username, password, mount_point = self.check_params(**kwargs)
+ if mount_point is None:
+ mount_point = 'userpass'
+
+ self.client.auth_userpass(username, password, mount_point=mount_point)
+
+ def auth_ldap(self, **kwargs):
+ username, password, mount_point = self.check_params(**kwargs)
+ if mount_point is None:
+ mount_point = 'ldap'
+
+ self.client.auth.ldap.login(username, password, mount_point=mount_point)
+
+ def boolean_or_cacert(self, validate_certs, cacert):
+ validate_certs = boolean(validate_certs, strict=False)
+ '''' return a bool or cacert '''
+ if validate_certs is True:
+ if cacert != '':
+ return cacert
+ else:
+ return True
+ else:
+ return False
+
+ def auth_approle(self, **kwargs):
+ role_id = kwargs.get('role_id', os.environ.get('VAULT_ROLE_ID', None))
+ if role_id is None:
+ raise AnsibleError("Authentication method app role requires a role_id")
+
+ secret_id = kwargs.get('secret_id', os.environ.get('VAULT_SECRET_ID', None))
+ if secret_id is None:
+ raise AnsibleError("Authentication method app role requires a secret_id")
+
+ self.client.auth_approle(role_id, secret_id)
+
+
+class LookupModule(LookupBase):
+ def run(self, terms, variables=None, **kwargs):
+ if not HAS_HVAC:
+ raise AnsibleError("Please pip install hvac to use the hashi_vault lookup module.")
+
+ vault_args = terms[0].split()
+ vault_dict = {}
+ ret = []
+
+ for param in vault_args:
+ try:
+ key, value = param.split('=')
+ except ValueError:
+ raise AnsibleError("hashi_vault lookup plugin needs key=value pairs, but received %s" % terms)
+ vault_dict[key] = value
+
+ if 'ca_cert' in vault_dict.keys():
+ vault_dict['cacert'] = vault_dict['ca_cert']
+ vault_dict.pop('ca_cert', None)
+
+ vault_conn = HashiVault(**vault_dict)
+
+ for term in terms:
+ key = term.split()[0]
+ value = vault_conn.get()
+ ret.append(value)
+
+ return ret
diff --git a/test/support/integration/plugins/lookup/rabbitmq.py b/test/support/integration/plugins/lookup/rabbitmq.py
new file mode 100644
index 0000000000..7c2745f41d
--- /dev/null
+++ b/test/support/integration/plugins/lookup/rabbitmq.py
@@ -0,0 +1,190 @@
+# (c) 2018, John Imison <john+github@imison.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+ lookup: rabbitmq
+ author: John Imison <@Im0>
+ version_added: "2.8"
+ short_description: Retrieve messages from an AMQP/AMQPS RabbitMQ queue.
+ description:
+ - This lookup uses a basic get to retrieve all, or a limited number C(count), messages from a RabbitMQ queue.
+ options:
+ url:
+ description:
+ - An URI connection string to connect to the AMQP/AMQPS RabbitMQ server.
+ - For more information refer to the URI spec U(https://www.rabbitmq.com/uri-spec.html).
+ required: True
+ queue:
+ description:
+ - The queue to get messages from.
+ required: True
+ count:
+ description:
+ - How many messages to collect from the queue.
+ - If not set, defaults to retrieving all the messages from the queue.
+ requirements:
+ - The python pika package U(https://pypi.org/project/pika/).
+ notes:
+ - This lookup implements BlockingChannel.basic_get to get messages from a RabbitMQ server.
+ - After retrieving a message from the server, receipt of the message is acknowledged and the message on the server is deleted.
+ - Pika is a pure-Python implementation of the AMQP 0-9-1 protocol that tries to stay fairly independent of the underlying network support library.
+ - More information about pika can be found at U(https://pika.readthedocs.io/en/stable/).
+ - This plugin is tested against RabbitMQ. Other AMQP 0.9.1 protocol based servers may work but not tested/guaranteed.
+ - Assigning the return messages to a variable under C(vars) may result in unexpected results as the lookup is evaluated every time the
+ variable is referenced.
+ - Currently this plugin only handles text based messages from a queue. Unexpected results may occur when retrieving binary data.
+"""
+
+
+EXAMPLES = """
+- name: Get all messages off a queue
+ debug:
+ msg: "{{ lookup('rabbitmq', url='amqp://guest:guest@192.168.0.10:5672/%2F', queue='hello') }}"
+
+
+# If you are intending on using the returned messages as a variable in more than
+# one task (eg. debug, template), it is recommended to set_fact.
+
+- name: Get 2 messages off a queue and set a fact for re-use
+ set_fact:
+ messages: "{{ lookup('rabbitmq', url='amqp://guest:guest@192.168.0.10:5672/%2F', queue='hello', count=2) }}"
+
+- name: Dump out contents of the messages
+ debug:
+ var: messages
+
+"""
+
+RETURN = """
+ _list:
+ description:
+ - A list of dictionaries with keys and value from the queue.
+ type: list
+ contains:
+ content_type:
+ description: The content_type on the message in the queue.
+ type: str
+ delivery_mode:
+ description: The delivery_mode on the message in the queue.
+ type: str
+ delivery_tag:
+ description: The delivery_tag on the message in the queue.
+ type: str
+ exchange:
+ description: The exchange the message came from.
+ type: str
+ message_count:
+ description: The message_count for the message on the queue.
+ type: str
+ msg:
+ description: The content of the message.
+ type: str
+ redelivered:
+ description: The redelivered flag. True if the message has been delivered before.
+ type: bool
+ routing_key:
+ description: The routing_key on the message in the queue.
+ type: str
+ headers:
+ description: The headers for the message returned from the queue.
+ type: dict
+ json:
+ description: If application/json is specified in content_type, json will be loaded into variables.
+ type: dict
+
+"""
+
+import json
+
+from ansible.errors import AnsibleError, AnsibleParserError
+from ansible.plugins.lookup import LookupBase
+from ansible.module_utils._text import to_native, to_text
+from ansible.utils.display import Display
+
+try:
+ import pika
+ from pika import spec
+ HAS_PIKA = True
+except ImportError:
+ HAS_PIKA = False
+
+display = Display()
+
+
+class LookupModule(LookupBase):
+
+ def run(self, terms, variables=None, url=None, queue=None, count=None):
+ if not HAS_PIKA:
+ raise AnsibleError('pika python package is required for rabbitmq lookup.')
+ if not url:
+ raise AnsibleError('URL is required for rabbitmq lookup.')
+ if not queue:
+ raise AnsibleError('Queue is required for rabbitmq lookup.')
+
+ display.vvv(u"terms:%s : variables:%s url:%s queue:%s count:%s" % (terms, variables, url, queue, count))
+
+ try:
+ parameters = pika.URLParameters(url)
+ except Exception as e:
+ raise AnsibleError("URL malformed: %s" % to_native(e))
+
+ try:
+ connection = pika.BlockingConnection(parameters)
+ except Exception as e:
+ raise AnsibleError("Connection issue: %s" % to_native(e))
+
+ try:
+ conn_channel = connection.channel()
+ except pika.exceptions.AMQPChannelError as e:
+ try:
+ connection.close()
+ except pika.exceptions.AMQPConnectionError as ie:
+ raise AnsibleError("Channel and connection closing issues: %s / %s" % to_native(e), to_native(ie))
+ raise AnsibleError("Channel issue: %s" % to_native(e))
+
+ ret = []
+ idx = 0
+
+ while True:
+ method_frame, properties, body = conn_channel.basic_get(queue=queue)
+ if method_frame:
+ display.vvv(u"%s, %s, %s " % (method_frame, properties, to_text(body)))
+
+ # TODO: In the future consider checking content_type and handle text/binary data differently.
+ msg_details = dict({
+ 'msg': to_text(body),
+ 'message_count': method_frame.message_count,
+ 'routing_key': method_frame.routing_key,
+ 'delivery_tag': method_frame.delivery_tag,
+ 'redelivered': method_frame.redelivered,
+ 'exchange': method_frame.exchange,
+ 'delivery_mode': properties.delivery_mode,
+ 'content_type': properties.content_type,
+ 'headers': properties.headers
+ })
+ if properties.content_type == 'application/json':
+ try:
+ msg_details['json'] = json.loads(msg_details['msg'])
+ except ValueError as e:
+ raise AnsibleError("Unable to decode JSON for message %s: %s" % (method_frame.delivery_tag, to_native(e)))
+
+ ret.append(msg_details)
+ conn_channel.basic_ack(method_frame.delivery_tag)
+ idx += 1
+ if method_frame.message_count == 0 or idx == count:
+ break
+ # If we didn't get a method_frame, exit.
+ else:
+ break
+
+ if connection.is_closed:
+ return [ret]
+ else:
+ try:
+ connection.close()
+ except pika.exceptions.AMQPConnectionError:
+ pass
+ return [ret]
diff --git a/test/support/integration/plugins/module_utils/crypto.py b/test/support/integration/plugins/module_utils/crypto.py
new file mode 100644
index 0000000000..e67eeff1b4
--- /dev/null
+++ b/test/support/integration/plugins/module_utils/crypto.py
@@ -0,0 +1,2125 @@
+# -*- coding: utf-8 -*-
+#
+# (c) 2016, Yanis Guenane <yanis+ansible@guenane.org>
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+# ----------------------------------------------------------------------
+# A clearly marked portion of this file is licensed under the BSD license
+# Copyright (c) 2015, 2016 Paul Kehrer (@reaperhulk)
+# Copyright (c) 2017 Fraser Tweedale (@frasertweedale)
+# For more details, search for the function _obj2txt().
+# ---------------------------------------------------------------------
+# A clearly marked portion of this file is extracted from a project that
+# is licensed under the Apache License 2.0
+# Copyright (c) the OpenSSL contributors
+# For more details, search for the function _OID_MAP.
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+import sys
+from distutils.version import LooseVersion
+
+try:
+ import OpenSSL
+ from OpenSSL import crypto
+except ImportError:
+ # An error will be raised in the calling class to let the end
+ # user know that OpenSSL couldn't be found.
+ pass
+
+try:
+ import cryptography
+ from cryptography import x509
+ from cryptography.hazmat.backends import default_backend as cryptography_backend
+ from cryptography.hazmat.primitives.serialization import load_pem_private_key
+ from cryptography.hazmat.primitives import hashes
+ from cryptography.hazmat.primitives import serialization
+ import ipaddress
+
+ # Older versions of cryptography (< 2.1) do not have __hash__ functions for
+ # general name objects (DNSName, IPAddress, ...), while providing overloaded
+ # equality and string representation operations. This makes it impossible to
+ # use them in hash-based data structures such as set or dict. Since we are
+ # actually doing that in openssl_certificate, and potentially in other code,
+ # we need to monkey-patch __hash__ for these classes to make sure our code
+ # works fine.
+ if LooseVersion(cryptography.__version__) < LooseVersion('2.1'):
+ # A very simply hash function which relies on the representation
+ # of an object to be implemented. This is the case since at least
+ # cryptography 1.0, see
+ # https://github.com/pyca/cryptography/commit/7a9abce4bff36c05d26d8d2680303a6f64a0e84f
+ def simple_hash(self):
+ return hash(repr(self))
+
+ # The hash functions for the following types were added for cryptography 2.1:
+ # https://github.com/pyca/cryptography/commit/fbfc36da2a4769045f2373b004ddf0aff906cf38
+ x509.DNSName.__hash__ = simple_hash
+ x509.DirectoryName.__hash__ = simple_hash
+ x509.GeneralName.__hash__ = simple_hash
+ x509.IPAddress.__hash__ = simple_hash
+ x509.OtherName.__hash__ = simple_hash
+ x509.RegisteredID.__hash__ = simple_hash
+
+ if LooseVersion(cryptography.__version__) < LooseVersion('1.2'):
+ # The hash functions for the following types were added for cryptography 1.2:
+ # https://github.com/pyca/cryptography/commit/b642deed88a8696e5f01ce6855ccf89985fc35d0
+ # https://github.com/pyca/cryptography/commit/d1b5681f6db2bde7a14625538bd7907b08dfb486
+ x509.RFC822Name.__hash__ = simple_hash
+ x509.UniformResourceIdentifier.__hash__ = simple_hash
+
+ # Test whether we have support for X25519, X448, Ed25519 and/or Ed448
+ try:
+ import cryptography.hazmat.primitives.asymmetric.x25519
+ CRYPTOGRAPHY_HAS_X25519 = True
+ try:
+ cryptography.hazmat.primitives.asymmetric.x25519.X25519PrivateKey.private_bytes
+ CRYPTOGRAPHY_HAS_X25519_FULL = True
+ except AttributeError:
+ CRYPTOGRAPHY_HAS_X25519_FULL = False
+ except ImportError:
+ CRYPTOGRAPHY_HAS_X25519 = False
+ CRYPTOGRAPHY_HAS_X25519_FULL = False
+ try:
+ import cryptography.hazmat.primitives.asymmetric.x448
+ CRYPTOGRAPHY_HAS_X448 = True
+ except ImportError:
+ CRYPTOGRAPHY_HAS_X448 = False
+ try:
+ import cryptography.hazmat.primitives.asymmetric.ed25519
+ CRYPTOGRAPHY_HAS_ED25519 = True
+ except ImportError:
+ CRYPTOGRAPHY_HAS_ED25519 = False
+ try:
+ import cryptography.hazmat.primitives.asymmetric.ed448
+ CRYPTOGRAPHY_HAS_ED448 = True
+ except ImportError:
+ CRYPTOGRAPHY_HAS_ED448 = False
+
+ HAS_CRYPTOGRAPHY = True
+except ImportError:
+ # Error handled in the calling module.
+ CRYPTOGRAPHY_HAS_X25519 = False
+ CRYPTOGRAPHY_HAS_X25519_FULL = False
+ CRYPTOGRAPHY_HAS_X448 = False
+ CRYPTOGRAPHY_HAS_ED25519 = False
+ CRYPTOGRAPHY_HAS_ED448 = False
+ HAS_CRYPTOGRAPHY = False
+
+
+import abc
+import base64
+import binascii
+import datetime
+import errno
+import hashlib
+import os
+import re
+import tempfile
+
+from ansible.module_utils import six
+from ansible.module_utils._text import to_native, to_bytes, to_text
+
+
+class OpenSSLObjectError(Exception):
+ pass
+
+
+class OpenSSLBadPassphraseError(OpenSSLObjectError):
+ pass
+
+
+def get_fingerprint_of_bytes(source):
+ """Generate the fingerprint of the given bytes."""
+
+ fingerprint = {}
+
+ try:
+ algorithms = hashlib.algorithms
+ except AttributeError:
+ try:
+ algorithms = hashlib.algorithms_guaranteed
+ except AttributeError:
+ return None
+
+ for algo in algorithms:
+ f = getattr(hashlib, algo)
+ try:
+ h = f(source)
+ except ValueError:
+ # This can happen for hash algorithms not supported in FIPS mode
+ # (https://github.com/ansible/ansible/issues/67213)
+ continue
+ try:
+ # Certain hash functions have a hexdigest() which expects a length parameter
+ pubkey_digest = h.hexdigest()
+ except TypeError:
+ pubkey_digest = h.hexdigest(32)
+ fingerprint[algo] = ':'.join(pubkey_digest[i:i + 2] for i in range(0, len(pubkey_digest), 2))
+
+ return fingerprint
+
+
+def get_fingerprint(path, passphrase=None, content=None, backend='pyopenssl'):
+ """Generate the fingerprint of the public key. """
+
+ privatekey = load_privatekey(path, passphrase=passphrase, content=content, check_passphrase=False, backend=backend)
+
+ if backend == 'pyopenssl':
+ try:
+ publickey = crypto.dump_publickey(crypto.FILETYPE_ASN1, privatekey)
+ except AttributeError:
+ # If PyOpenSSL < 16.0 crypto.dump_publickey() will fail.
+ try:
+ bio = crypto._new_mem_buf()
+ rc = crypto._lib.i2d_PUBKEY_bio(bio, privatekey._pkey)
+ if rc != 1:
+ crypto._raise_current_error()
+ publickey = crypto._bio_to_string(bio)
+ except AttributeError:
+ # By doing this we prevent the code from raising an error
+ # yet we return no value in the fingerprint hash.
+ return None
+ elif backend == 'cryptography':
+ publickey = privatekey.public_key().public_bytes(
+ serialization.Encoding.DER,
+ serialization.PublicFormat.SubjectPublicKeyInfo
+ )
+
+ return get_fingerprint_of_bytes(publickey)
+
+
+def load_file_if_exists(path, module=None, ignore_errors=False):
+ try:
+ with open(path, 'rb') as f:
+ return f.read()
+ except EnvironmentError as exc:
+ if exc.errno == errno.ENOENT:
+ return None
+ if ignore_errors:
+ return None
+ if module is None:
+ raise
+ module.fail_json('Error while loading {0} - {1}'.format(path, str(exc)))
+ except Exception as exc:
+ if ignore_errors:
+ return None
+ if module is None:
+ raise
+ module.fail_json('Error while loading {0} - {1}'.format(path, str(exc)))
+
+
+def load_privatekey(path, passphrase=None, check_passphrase=True, content=None, backend='pyopenssl'):
+ """Load the specified OpenSSL private key.
+
+ The content can also be specified via content; in that case,
+ this function will not load the key from disk.
+ """
+
+ try:
+ if content is None:
+ with open(path, 'rb') as b_priv_key_fh:
+ priv_key_detail = b_priv_key_fh.read()
+ else:
+ priv_key_detail = content
+
+ if backend == 'pyopenssl':
+
+ # First try: try to load with real passphrase (resp. empty string)
+ # Will work if this is the correct passphrase, or the key is not
+ # password-protected.
+ try:
+ result = crypto.load_privatekey(crypto.FILETYPE_PEM,
+ priv_key_detail,
+ to_bytes(passphrase or ''))
+ except crypto.Error as e:
+ if len(e.args) > 0 and len(e.args[0]) > 0:
+ if e.args[0][0][2] in ('bad decrypt', 'bad password read'):
+ # This happens in case we have the wrong passphrase.
+ if passphrase is not None:
+ raise OpenSSLBadPassphraseError('Wrong passphrase provided for private key!')
+ else:
+ raise OpenSSLBadPassphraseError('No passphrase provided, but private key is password-protected!')
+ raise OpenSSLObjectError('Error while deserializing key: {0}'.format(e))
+ if check_passphrase:
+ # Next we want to make sure that the key is actually protected by
+ # a passphrase (in case we did try the empty string before, make
+ # sure that the key is not protected by the empty string)
+ try:
+ crypto.load_privatekey(crypto.FILETYPE_PEM,
+ priv_key_detail,
+ to_bytes('y' if passphrase == 'x' else 'x'))
+ if passphrase is not None:
+ # Since we can load the key without an exception, the
+ # key isn't password-protected
+ raise OpenSSLBadPassphraseError('Passphrase provided, but private key is not password-protected!')
+ except crypto.Error as e:
+ if passphrase is None and len(e.args) > 0 and len(e.args[0]) > 0:
+ if e.args[0][0][2] in ('bad decrypt', 'bad password read'):
+ # The key is obviously protected by the empty string.
+ # Don't do this at home (if it's possible at all)...
+ raise OpenSSLBadPassphraseError('No passphrase provided, but private key is password-protected!')
+ elif backend == 'cryptography':
+ try:
+ result = load_pem_private_key(priv_key_detail,
+ None if passphrase is None else to_bytes(passphrase),
+ cryptography_backend())
+ except TypeError as dummy:
+ raise OpenSSLBadPassphraseError('Wrong or empty passphrase provided for private key')
+ except ValueError as dummy:
+ raise OpenSSLBadPassphraseError('Wrong passphrase provided for private key')
+
+ return result
+ except (IOError, OSError) as exc:
+ raise OpenSSLObjectError(exc)
+
+
+def load_certificate(path, content=None, backend='pyopenssl'):
+ """Load the specified certificate."""
+
+ try:
+ if content is None:
+ with open(path, 'rb') as cert_fh:
+ cert_content = cert_fh.read()
+ else:
+ cert_content = content
+ if backend == 'pyopenssl':
+ return crypto.load_certificate(crypto.FILETYPE_PEM, cert_content)
+ elif backend == 'cryptography':
+ return x509.load_pem_x509_certificate(cert_content, cryptography_backend())
+ except (IOError, OSError) as exc:
+ raise OpenSSLObjectError(exc)
+
+
+def load_certificate_request(path, content=None, backend='pyopenssl'):
+ """Load the specified certificate signing request."""
+ try:
+ if content is None:
+ with open(path, 'rb') as csr_fh:
+ csr_content = csr_fh.read()
+ else:
+ csr_content = content
+ except (IOError, OSError) as exc:
+ raise OpenSSLObjectError(exc)
+ if backend == 'pyopenssl':
+ return crypto.load_certificate_request(crypto.FILETYPE_PEM, csr_content)
+ elif backend == 'cryptography':
+ return x509.load_pem_x509_csr(csr_content, cryptography_backend())
+
+
+def parse_name_field(input_dict):
+ """Take a dict with key: value or key: list_of_values mappings and return a list of tuples"""
+
+ result = []
+ for key in input_dict:
+ if isinstance(input_dict[key], list):
+ for entry in input_dict[key]:
+ result.append((key, entry))
+ else:
+ result.append((key, input_dict[key]))
+ return result
+
+
+def convert_relative_to_datetime(relative_time_string):
+ """Get a datetime.datetime or None from a string in the time format described in sshd_config(5)"""
+
+ parsed_result = re.match(
+ r"^(?P<prefix>[+-])((?P<weeks>\d+)[wW])?((?P<days>\d+)[dD])?((?P<hours>\d+)[hH])?((?P<minutes>\d+)[mM])?((?P<seconds>\d+)[sS]?)?$",
+ relative_time_string)
+
+ if parsed_result is None or len(relative_time_string) == 1:
+ # not matched or only a single "+" or "-"
+ return None
+
+ offset = datetime.timedelta(0)
+ if parsed_result.group("weeks") is not None:
+ offset += datetime.timedelta(weeks=int(parsed_result.group("weeks")))
+ if parsed_result.group("days") is not None:
+ offset += datetime.timedelta(days=int(parsed_result.group("days")))
+ if parsed_result.group("hours") is not None:
+ offset += datetime.timedelta(hours=int(parsed_result.group("hours")))
+ if parsed_result.group("minutes") is not None:
+ offset += datetime.timedelta(
+ minutes=int(parsed_result.group("minutes")))
+ if parsed_result.group("seconds") is not None:
+ offset += datetime.timedelta(
+ seconds=int(parsed_result.group("seconds")))
+
+ if parsed_result.group("prefix") == "+":
+ return datetime.datetime.utcnow() + offset
+ else:
+ return datetime.datetime.utcnow() - offset
+
+
+def get_relative_time_option(input_string, input_name, backend='cryptography'):
+ """Return an absolute timespec if a relative timespec or an ASN1 formatted
+ string is provided.
+
+ The return value will be a datetime object for the cryptography backend,
+ and a ASN1 formatted string for the pyopenssl backend."""
+ result = to_native(input_string)
+ if result is None:
+ raise OpenSSLObjectError(
+ 'The timespec "%s" for %s is not valid' %
+ input_string, input_name)
+ # Relative time
+ if result.startswith("+") or result.startswith("-"):
+ result_datetime = convert_relative_to_datetime(result)
+ if backend == 'pyopenssl':
+ return result_datetime.strftime("%Y%m%d%H%M%SZ")
+ elif backend == 'cryptography':
+ return result_datetime
+ # Absolute time
+ if backend == 'pyopenssl':
+ return input_string
+ elif backend == 'cryptography':
+ for date_fmt in ['%Y%m%d%H%M%SZ', '%Y%m%d%H%MZ', '%Y%m%d%H%M%S%z', '%Y%m%d%H%M%z']:
+ try:
+ return datetime.datetime.strptime(result, date_fmt)
+ except ValueError:
+ pass
+
+ raise OpenSSLObjectError(
+ 'The time spec "%s" for %s is invalid' %
+ (input_string, input_name)
+ )
+
+
+def select_message_digest(digest_string):
+ digest = None
+ if digest_string == 'sha256':
+ digest = hashes.SHA256()
+ elif digest_string == 'sha384':
+ digest = hashes.SHA384()
+ elif digest_string == 'sha512':
+ digest = hashes.SHA512()
+ elif digest_string == 'sha1':
+ digest = hashes.SHA1()
+ elif digest_string == 'md5':
+ digest = hashes.MD5()
+ return digest
+
+
+def write_file(module, content, default_mode=None, path=None):
+ '''
+ Writes content into destination file as securely as possible.
+ Uses file arguments from module.
+ '''
+ # Find out parameters for file
+ file_args = module.load_file_common_arguments(module.params, path=path)
+ if file_args['mode'] is None:
+ file_args['mode'] = default_mode
+ # Create tempfile name
+ tmp_fd, tmp_name = tempfile.mkstemp(prefix=b'.ansible_tmp')
+ try:
+ os.close(tmp_fd)
+ except Exception as dummy:
+ pass
+ module.add_cleanup_file(tmp_name) # if we fail, let Ansible try to remove the file
+ try:
+ try:
+ # Create tempfile
+ file = os.open(tmp_name, os.O_WRONLY | os.O_CREAT | os.O_TRUNC, 0o600)
+ os.write(file, content)
+ os.close(file)
+ except Exception as e:
+ try:
+ os.remove(tmp_name)
+ except Exception as dummy:
+ pass
+ module.fail_json(msg='Error while writing result into temporary file: {0}'.format(e))
+ # Update destination to wanted permissions
+ if os.path.exists(file_args['path']):
+ module.set_fs_attributes_if_different(file_args, False)
+ # Move tempfile to final destination
+ module.atomic_move(tmp_name, file_args['path'])
+ # Try to update permissions again
+ module.set_fs_attributes_if_different(file_args, False)
+ except Exception as e:
+ try:
+ os.remove(tmp_name)
+ except Exception as dummy:
+ pass
+ module.fail_json(msg='Error while writing result: {0}'.format(e))
+
+
+@six.add_metaclass(abc.ABCMeta)
+class OpenSSLObject(object):
+
+ def __init__(self, path, state, force, check_mode):
+ self.path = path
+ self.state = state
+ self.force = force
+ self.name = os.path.basename(path)
+ self.changed = False
+ self.check_mode = check_mode
+
+ def check(self, module, perms_required=True):
+ """Ensure the resource is in its desired state."""
+
+ def _check_state():
+ return os.path.exists(self.path)
+
+ def _check_perms(module):
+ file_args = module.load_file_common_arguments(module.params)
+ return not module.set_fs_attributes_if_different(file_args, False)
+
+ if not perms_required:
+ return _check_state()
+
+ return _check_state() and _check_perms(module)
+
+ @abc.abstractmethod
+ def dump(self):
+ """Serialize the object into a dictionary."""
+
+ pass
+
+ @abc.abstractmethod
+ def generate(self):
+ """Generate the resource."""
+
+ pass
+
+ def remove(self, module):
+ """Remove the resource from the filesystem."""
+
+ try:
+ os.remove(self.path)
+ self.changed = True
+ except OSError as exc:
+ if exc.errno != errno.ENOENT:
+ raise OpenSSLObjectError(exc)
+ else:
+ pass
+
+
+# #####################################################################################
+# #####################################################################################
+# This has been extracted from the OpenSSL project's objects.txt:
+# https://github.com/openssl/openssl/blob/9537fe5757bb07761fa275d779bbd40bcf5530e4/crypto/objects/objects.txt
+# Extracted with https://gist.github.com/felixfontein/376748017ad65ead093d56a45a5bf376
+#
+# In case the following data structure has any copyrightable content, note that it is licensed as follows:
+# Copyright (c) the OpenSSL contributors
+# Licensed under the Apache License 2.0
+# https://github.com/openssl/openssl/blob/master/LICENSE
+_OID_MAP = {
+ '0': ('itu-t', 'ITU-T', 'ccitt'),
+ '0.3.4401.5': ('ntt-ds', ),
+ '0.3.4401.5.3.1.9': ('camellia', ),
+ '0.3.4401.5.3.1.9.1': ('camellia-128-ecb', 'CAMELLIA-128-ECB'),
+ '0.3.4401.5.3.1.9.3': ('camellia-128-ofb', 'CAMELLIA-128-OFB'),
+ '0.3.4401.5.3.1.9.4': ('camellia-128-cfb', 'CAMELLIA-128-CFB'),
+ '0.3.4401.5.3.1.9.6': ('camellia-128-gcm', 'CAMELLIA-128-GCM'),
+ '0.3.4401.5.3.1.9.7': ('camellia-128-ccm', 'CAMELLIA-128-CCM'),
+ '0.3.4401.5.3.1.9.9': ('camellia-128-ctr', 'CAMELLIA-128-CTR'),
+ '0.3.4401.5.3.1.9.10': ('camellia-128-cmac', 'CAMELLIA-128-CMAC'),
+ '0.3.4401.5.3.1.9.21': ('camellia-192-ecb', 'CAMELLIA-192-ECB'),
+ '0.3.4401.5.3.1.9.23': ('camellia-192-ofb', 'CAMELLIA-192-OFB'),
+ '0.3.4401.5.3.1.9.24': ('camellia-192-cfb', 'CAMELLIA-192-CFB'),
+ '0.3.4401.5.3.1.9.26': ('camellia-192-gcm', 'CAMELLIA-192-GCM'),
+ '0.3.4401.5.3.1.9.27': ('camellia-192-ccm', 'CAMELLIA-192-CCM'),
+ '0.3.4401.5.3.1.9.29': ('camellia-192-ctr', 'CAMELLIA-192-CTR'),
+ '0.3.4401.5.3.1.9.30': ('camellia-192-cmac', 'CAMELLIA-192-CMAC'),
+ '0.3.4401.5.3.1.9.41': ('camellia-256-ecb', 'CAMELLIA-256-ECB'),
+ '0.3.4401.5.3.1.9.43': ('camellia-256-ofb', 'CAMELLIA-256-OFB'),
+ '0.3.4401.5.3.1.9.44': ('camellia-256-cfb', 'CAMELLIA-256-CFB'),
+ '0.3.4401.5.3.1.9.46': ('camellia-256-gcm', 'CAMELLIA-256-GCM'),
+ '0.3.4401.5.3.1.9.47': ('camellia-256-ccm', 'CAMELLIA-256-CCM'),
+ '0.3.4401.5.3.1.9.49': ('camellia-256-ctr', 'CAMELLIA-256-CTR'),
+ '0.3.4401.5.3.1.9.50': ('camellia-256-cmac', 'CAMELLIA-256-CMAC'),
+ '0.9': ('data', ),
+ '0.9.2342': ('pss', ),
+ '0.9.2342.19200300': ('ucl', ),
+ '0.9.2342.19200300.100': ('pilot', ),
+ '0.9.2342.19200300.100.1': ('pilotAttributeType', ),
+ '0.9.2342.19200300.100.1.1': ('userId', 'UID'),
+ '0.9.2342.19200300.100.1.2': ('textEncodedORAddress', ),
+ '0.9.2342.19200300.100.1.3': ('rfc822Mailbox', 'mail'),
+ '0.9.2342.19200300.100.1.4': ('info', ),
+ '0.9.2342.19200300.100.1.5': ('favouriteDrink', ),
+ '0.9.2342.19200300.100.1.6': ('roomNumber', ),
+ '0.9.2342.19200300.100.1.7': ('photo', ),
+ '0.9.2342.19200300.100.1.8': ('userClass', ),
+ '0.9.2342.19200300.100.1.9': ('host', ),
+ '0.9.2342.19200300.100.1.10': ('manager', ),
+ '0.9.2342.19200300.100.1.11': ('documentIdentifier', ),
+ '0.9.2342.19200300.100.1.12': ('documentTitle', ),
+ '0.9.2342.19200300.100.1.13': ('documentVersion', ),
+ '0.9.2342.19200300.100.1.14': ('documentAuthor', ),
+ '0.9.2342.19200300.100.1.15': ('documentLocation', ),
+ '0.9.2342.19200300.100.1.20': ('homeTelephoneNumber', ),
+ '0.9.2342.19200300.100.1.21': ('secretary', ),
+ '0.9.2342.19200300.100.1.22': ('otherMailbox', ),
+ '0.9.2342.19200300.100.1.23': ('lastModifiedTime', ),
+ '0.9.2342.19200300.100.1.24': ('lastModifiedBy', ),
+ '0.9.2342.19200300.100.1.25': ('domainComponent', 'DC'),
+ '0.9.2342.19200300.100.1.26': ('aRecord', ),
+ '0.9.2342.19200300.100.1.27': ('pilotAttributeType27', ),
+ '0.9.2342.19200300.100.1.28': ('mXRecord', ),
+ '0.9.2342.19200300.100.1.29': ('nSRecord', ),
+ '0.9.2342.19200300.100.1.30': ('sOARecord', ),
+ '0.9.2342.19200300.100.1.31': ('cNAMERecord', ),
+ '0.9.2342.19200300.100.1.37': ('associatedDomain', ),
+ '0.9.2342.19200300.100.1.38': ('associatedName', ),
+ '0.9.2342.19200300.100.1.39': ('homePostalAddress', ),
+ '0.9.2342.19200300.100.1.40': ('personalTitle', ),
+ '0.9.2342.19200300.100.1.41': ('mobileTelephoneNumber', ),
+ '0.9.2342.19200300.100.1.42': ('pagerTelephoneNumber', ),
+ '0.9.2342.19200300.100.1.43': ('friendlyCountryName', ),
+ '0.9.2342.19200300.100.1.44': ('uniqueIdentifier', 'uid'),
+ '0.9.2342.19200300.100.1.45': ('organizationalStatus', ),
+ '0.9.2342.19200300.100.1.46': ('janetMailbox', ),
+ '0.9.2342.19200300.100.1.47': ('mailPreferenceOption', ),
+ '0.9.2342.19200300.100.1.48': ('buildingName', ),
+ '0.9.2342.19200300.100.1.49': ('dSAQuality', ),
+ '0.9.2342.19200300.100.1.50': ('singleLevelQuality', ),
+ '0.9.2342.19200300.100.1.51': ('subtreeMinimumQuality', ),
+ '0.9.2342.19200300.100.1.52': ('subtreeMaximumQuality', ),
+ '0.9.2342.19200300.100.1.53': ('personalSignature', ),
+ '0.9.2342.19200300.100.1.54': ('dITRedirect', ),
+ '0.9.2342.19200300.100.1.55': ('audio', ),
+ '0.9.2342.19200300.100.1.56': ('documentPublisher', ),
+ '0.9.2342.19200300.100.3': ('pilotAttributeSyntax', ),
+ '0.9.2342.19200300.100.3.4': ('iA5StringSyntax', ),
+ '0.9.2342.19200300.100.3.5': ('caseIgnoreIA5StringSyntax', ),
+ '0.9.2342.19200300.100.4': ('pilotObjectClass', ),
+ '0.9.2342.19200300.100.4.3': ('pilotObject', ),
+ '0.9.2342.19200300.100.4.4': ('pilotPerson', ),
+ '0.9.2342.19200300.100.4.5': ('account', ),
+ '0.9.2342.19200300.100.4.6': ('document', ),
+ '0.9.2342.19200300.100.4.7': ('room', ),
+ '0.9.2342.19200300.100.4.9': ('documentSeries', ),
+ '0.9.2342.19200300.100.4.13': ('Domain', 'domain'),
+ '0.9.2342.19200300.100.4.14': ('rFC822localPart', ),
+ '0.9.2342.19200300.100.4.15': ('dNSDomain', ),
+ '0.9.2342.19200300.100.4.17': ('domainRelatedObject', ),
+ '0.9.2342.19200300.100.4.18': ('friendlyCountry', ),
+ '0.9.2342.19200300.100.4.19': ('simpleSecurityObject', ),
+ '0.9.2342.19200300.100.4.20': ('pilotOrganization', ),
+ '0.9.2342.19200300.100.4.21': ('pilotDSA', ),
+ '0.9.2342.19200300.100.4.22': ('qualityLabelledData', ),
+ '0.9.2342.19200300.100.10': ('pilotGroups', ),
+ '1': ('iso', 'ISO'),
+ '1.0.9797.3.4': ('gmac', 'GMAC'),
+ '1.0.10118.3.0.55': ('whirlpool', ),
+ '1.2': ('ISO Member Body', 'member-body'),
+ '1.2.156': ('ISO CN Member Body', 'ISO-CN'),
+ '1.2.156.10197': ('oscca', ),
+ '1.2.156.10197.1': ('sm-scheme', ),
+ '1.2.156.10197.1.104.1': ('sm4-ecb', 'SM4-ECB'),
+ '1.2.156.10197.1.104.2': ('sm4-cbc', 'SM4-CBC'),
+ '1.2.156.10197.1.104.3': ('sm4-ofb', 'SM4-OFB'),
+ '1.2.156.10197.1.104.4': ('sm4-cfb', 'SM4-CFB'),
+ '1.2.156.10197.1.104.5': ('sm4-cfb1', 'SM4-CFB1'),
+ '1.2.156.10197.1.104.6': ('sm4-cfb8', 'SM4-CFB8'),
+ '1.2.156.10197.1.104.7': ('sm4-ctr', 'SM4-CTR'),
+ '1.2.156.10197.1.301': ('sm2', 'SM2'),
+ '1.2.156.10197.1.401': ('sm3', 'SM3'),
+ '1.2.156.10197.1.501': ('SM2-with-SM3', 'SM2-SM3'),
+ '1.2.156.10197.1.504': ('sm3WithRSAEncryption', 'RSA-SM3'),
+ '1.2.392.200011.61.1.1.1.2': ('camellia-128-cbc', 'CAMELLIA-128-CBC'),
+ '1.2.392.200011.61.1.1.1.3': ('camellia-192-cbc', 'CAMELLIA-192-CBC'),
+ '1.2.392.200011.61.1.1.1.4': ('camellia-256-cbc', 'CAMELLIA-256-CBC'),
+ '1.2.392.200011.61.1.1.3.2': ('id-camellia128-wrap', ),
+ '1.2.392.200011.61.1.1.3.3': ('id-camellia192-wrap', ),
+ '1.2.392.200011.61.1.1.3.4': ('id-camellia256-wrap', ),
+ '1.2.410.200004': ('kisa', 'KISA'),
+ '1.2.410.200004.1.3': ('seed-ecb', 'SEED-ECB'),
+ '1.2.410.200004.1.4': ('seed-cbc', 'SEED-CBC'),
+ '1.2.410.200004.1.5': ('seed-cfb', 'SEED-CFB'),
+ '1.2.410.200004.1.6': ('seed-ofb', 'SEED-OFB'),
+ '1.2.410.200046.1.1': ('aria', ),
+ '1.2.410.200046.1.1.1': ('aria-128-ecb', 'ARIA-128-ECB'),
+ '1.2.410.200046.1.1.2': ('aria-128-cbc', 'ARIA-128-CBC'),
+ '1.2.410.200046.1.1.3': ('aria-128-cfb', 'ARIA-128-CFB'),
+ '1.2.410.200046.1.1.4': ('aria-128-ofb', 'ARIA-128-OFB'),
+ '1.2.410.200046.1.1.5': ('aria-128-ctr', 'ARIA-128-CTR'),
+ '1.2.410.200046.1.1.6': ('aria-192-ecb', 'ARIA-192-ECB'),
+ '1.2.410.200046.1.1.7': ('aria-192-cbc', 'ARIA-192-CBC'),
+ '1.2.410.200046.1.1.8': ('aria-192-cfb', 'ARIA-192-CFB'),
+ '1.2.410.200046.1.1.9': ('aria-192-ofb', 'ARIA-192-OFB'),
+ '1.2.410.200046.1.1.10': ('aria-192-ctr', 'ARIA-192-CTR'),
+ '1.2.410.200046.1.1.11': ('aria-256-ecb', 'ARIA-256-ECB'),
+ '1.2.410.200046.1.1.12': ('aria-256-cbc', 'ARIA-256-CBC'),
+ '1.2.410.200046.1.1.13': ('aria-256-cfb', 'ARIA-256-CFB'),
+ '1.2.410.200046.1.1.14': ('aria-256-ofb', 'ARIA-256-OFB'),
+ '1.2.410.200046.1.1.15': ('aria-256-ctr', 'ARIA-256-CTR'),
+ '1.2.410.200046.1.1.34': ('aria-128-gcm', 'ARIA-128-GCM'),
+ '1.2.410.200046.1.1.35': ('aria-192-gcm', 'ARIA-192-GCM'),
+ '1.2.410.200046.1.1.36': ('aria-256-gcm', 'ARIA-256-GCM'),
+ '1.2.410.200046.1.1.37': ('aria-128-ccm', 'ARIA-128-CCM'),
+ '1.2.410.200046.1.1.38': ('aria-192-ccm', 'ARIA-192-CCM'),
+ '1.2.410.200046.1.1.39': ('aria-256-ccm', 'ARIA-256-CCM'),
+ '1.2.643.2.2': ('cryptopro', ),
+ '1.2.643.2.2.3': ('GOST R 34.11-94 with GOST R 34.10-2001', 'id-GostR3411-94-with-GostR3410-2001'),
+ '1.2.643.2.2.4': ('GOST R 34.11-94 with GOST R 34.10-94', 'id-GostR3411-94-with-GostR3410-94'),
+ '1.2.643.2.2.9': ('GOST R 34.11-94', 'md_gost94'),
+ '1.2.643.2.2.10': ('HMAC GOST 34.11-94', 'id-HMACGostR3411-94'),
+ '1.2.643.2.2.14.0': ('id-Gost28147-89-None-KeyMeshing', ),
+ '1.2.643.2.2.14.1': ('id-Gost28147-89-CryptoPro-KeyMeshing', ),
+ '1.2.643.2.2.19': ('GOST R 34.10-2001', 'gost2001'),
+ '1.2.643.2.2.20': ('GOST R 34.10-94', 'gost94'),
+ '1.2.643.2.2.20.1': ('id-GostR3410-94-a', ),
+ '1.2.643.2.2.20.2': ('id-GostR3410-94-aBis', ),
+ '1.2.643.2.2.20.3': ('id-GostR3410-94-b', ),
+ '1.2.643.2.2.20.4': ('id-GostR3410-94-bBis', ),
+ '1.2.643.2.2.21': ('GOST 28147-89', 'gost89'),
+ '1.2.643.2.2.22': ('GOST 28147-89 MAC', 'gost-mac'),
+ '1.2.643.2.2.23': ('GOST R 34.11-94 PRF', 'prf-gostr3411-94'),
+ '1.2.643.2.2.30.0': ('id-GostR3411-94-TestParamSet', ),
+ '1.2.643.2.2.30.1': ('id-GostR3411-94-CryptoProParamSet', ),
+ '1.2.643.2.2.31.0': ('id-Gost28147-89-TestParamSet', ),
+ '1.2.643.2.2.31.1': ('id-Gost28147-89-CryptoPro-A-ParamSet', ),
+ '1.2.643.2.2.31.2': ('id-Gost28147-89-CryptoPro-B-ParamSet', ),
+ '1.2.643.2.2.31.3': ('id-Gost28147-89-CryptoPro-C-ParamSet', ),
+ '1.2.643.2.2.31.4': ('id-Gost28147-89-CryptoPro-D-ParamSet', ),
+ '1.2.643.2.2.31.5': ('id-Gost28147-89-CryptoPro-Oscar-1-1-ParamSet', ),
+ '1.2.643.2.2.31.6': ('id-Gost28147-89-CryptoPro-Oscar-1-0-ParamSet', ),
+ '1.2.643.2.2.31.7': ('id-Gost28147-89-CryptoPro-RIC-1-ParamSet', ),
+ '1.2.643.2.2.32.0': ('id-GostR3410-94-TestParamSet', ),
+ '1.2.643.2.2.32.2': ('id-GostR3410-94-CryptoPro-A-ParamSet', ),
+ '1.2.643.2.2.32.3': ('id-GostR3410-94-CryptoPro-B-ParamSet', ),
+ '1.2.643.2.2.32.4': ('id-GostR3410-94-CryptoPro-C-ParamSet', ),
+ '1.2.643.2.2.32.5': ('id-GostR3410-94-CryptoPro-D-ParamSet', ),
+ '1.2.643.2.2.33.1': ('id-GostR3410-94-CryptoPro-XchA-ParamSet', ),
+ '1.2.643.2.2.33.2': ('id-GostR3410-94-CryptoPro-XchB-ParamSet', ),
+ '1.2.643.2.2.33.3': ('id-GostR3410-94-CryptoPro-XchC-ParamSet', ),
+ '1.2.643.2.2.35.0': ('id-GostR3410-2001-TestParamSet', ),
+ '1.2.643.2.2.35.1': ('id-GostR3410-2001-CryptoPro-A-ParamSet', ),
+ '1.2.643.2.2.35.2': ('id-GostR3410-2001-CryptoPro-B-ParamSet', ),
+ '1.2.643.2.2.35.3': ('id-GostR3410-2001-CryptoPro-C-ParamSet', ),
+ '1.2.643.2.2.36.0': ('id-GostR3410-2001-CryptoPro-XchA-ParamSet', ),
+ '1.2.643.2.2.36.1': ('id-GostR3410-2001-CryptoPro-XchB-ParamSet', ),
+ '1.2.643.2.2.98': ('GOST R 34.10-2001 DH', 'id-GostR3410-2001DH'),
+ '1.2.643.2.2.99': ('GOST R 34.10-94 DH', 'id-GostR3410-94DH'),
+ '1.2.643.2.9': ('cryptocom', ),
+ '1.2.643.2.9.1.3.3': ('GOST R 34.11-94 with GOST R 34.10-94 Cryptocom', 'id-GostR3411-94-with-GostR3410-94-cc'),
+ '1.2.643.2.9.1.3.4': ('GOST R 34.11-94 with GOST R 34.10-2001 Cryptocom', 'id-GostR3411-94-with-GostR3410-2001-cc'),
+ '1.2.643.2.9.1.5.3': ('GOST 34.10-94 Cryptocom', 'gost94cc'),
+ '1.2.643.2.9.1.5.4': ('GOST 34.10-2001 Cryptocom', 'gost2001cc'),
+ '1.2.643.2.9.1.6.1': ('GOST 28147-89 Cryptocom ParamSet', 'id-Gost28147-89-cc'),
+ '1.2.643.2.9.1.8.1': ('GOST R 3410-2001 Parameter Set Cryptocom', 'id-GostR3410-2001-ParamSet-cc'),
+ '1.2.643.3.131.1.1': ('INN', 'INN'),
+ '1.2.643.7.1': ('id-tc26', ),
+ '1.2.643.7.1.1': ('id-tc26-algorithms', ),
+ '1.2.643.7.1.1.1': ('id-tc26-sign', ),
+ '1.2.643.7.1.1.1.1': ('GOST R 34.10-2012 with 256 bit modulus', 'gost2012_256'),
+ '1.2.643.7.1.1.1.2': ('GOST R 34.10-2012 with 512 bit modulus', 'gost2012_512'),
+ '1.2.643.7.1.1.2': ('id-tc26-digest', ),
+ '1.2.643.7.1.1.2.2': ('GOST R 34.11-2012 with 256 bit hash', 'md_gost12_256'),
+ '1.2.643.7.1.1.2.3': ('GOST R 34.11-2012 with 512 bit hash', 'md_gost12_512'),
+ '1.2.643.7.1.1.3': ('id-tc26-signwithdigest', ),
+ '1.2.643.7.1.1.3.2': ('GOST R 34.10-2012 with GOST R 34.11-2012 (256 bit)', 'id-tc26-signwithdigest-gost3410-2012-256'),
+ '1.2.643.7.1.1.3.3': ('GOST R 34.10-2012 with GOST R 34.11-2012 (512 bit)', 'id-tc26-signwithdigest-gost3410-2012-512'),
+ '1.2.643.7.1.1.4': ('id-tc26-mac', ),
+ '1.2.643.7.1.1.4.1': ('HMAC GOST 34.11-2012 256 bit', 'id-tc26-hmac-gost-3411-2012-256'),
+ '1.2.643.7.1.1.4.2': ('HMAC GOST 34.11-2012 512 bit', 'id-tc26-hmac-gost-3411-2012-512'),
+ '1.2.643.7.1.1.5': ('id-tc26-cipher', ),
+ '1.2.643.7.1.1.5.1': ('id-tc26-cipher-gostr3412-2015-magma', ),
+ '1.2.643.7.1.1.5.1.1': ('id-tc26-cipher-gostr3412-2015-magma-ctracpkm', ),
+ '1.2.643.7.1.1.5.1.2': ('id-tc26-cipher-gostr3412-2015-magma-ctracpkm-omac', ),
+ '1.2.643.7.1.1.5.2': ('id-tc26-cipher-gostr3412-2015-kuznyechik', ),
+ '1.2.643.7.1.1.5.2.1': ('id-tc26-cipher-gostr3412-2015-kuznyechik-ctracpkm', ),
+ '1.2.643.7.1.1.5.2.2': ('id-tc26-cipher-gostr3412-2015-kuznyechik-ctracpkm-omac', ),
+ '1.2.643.7.1.1.6': ('id-tc26-agreement', ),
+ '1.2.643.7.1.1.6.1': ('id-tc26-agreement-gost-3410-2012-256', ),
+ '1.2.643.7.1.1.6.2': ('id-tc26-agreement-gost-3410-2012-512', ),
+ '1.2.643.7.1.1.7': ('id-tc26-wrap', ),
+ '1.2.643.7.1.1.7.1': ('id-tc26-wrap-gostr3412-2015-magma', ),
+ '1.2.643.7.1.1.7.1.1': ('id-tc26-wrap-gostr3412-2015-magma-kexp15', 'id-tc26-wrap-gostr3412-2015-kuznyechik-kexp15'),
+ '1.2.643.7.1.1.7.2': ('id-tc26-wrap-gostr3412-2015-kuznyechik', ),
+ '1.2.643.7.1.2': ('id-tc26-constants', ),
+ '1.2.643.7.1.2.1': ('id-tc26-sign-constants', ),
+ '1.2.643.7.1.2.1.1': ('id-tc26-gost-3410-2012-256-constants', ),
+ '1.2.643.7.1.2.1.1.1': ('GOST R 34.10-2012 (256 bit) ParamSet A', 'id-tc26-gost-3410-2012-256-paramSetA'),
+ '1.2.643.7.1.2.1.1.2': ('GOST R 34.10-2012 (256 bit) ParamSet B', 'id-tc26-gost-3410-2012-256-paramSetB'),
+ '1.2.643.7.1.2.1.1.3': ('GOST R 34.10-2012 (256 bit) ParamSet C', 'id-tc26-gost-3410-2012-256-paramSetC'),
+ '1.2.643.7.1.2.1.1.4': ('GOST R 34.10-2012 (256 bit) ParamSet D', 'id-tc26-gost-3410-2012-256-paramSetD'),
+ '1.2.643.7.1.2.1.2': ('id-tc26-gost-3410-2012-512-constants', ),
+ '1.2.643.7.1.2.1.2.0': ('GOST R 34.10-2012 (512 bit) testing parameter set', 'id-tc26-gost-3410-2012-512-paramSetTest'),
+ '1.2.643.7.1.2.1.2.1': ('GOST R 34.10-2012 (512 bit) ParamSet A', 'id-tc26-gost-3410-2012-512-paramSetA'),
+ '1.2.643.7.1.2.1.2.2': ('GOST R 34.10-2012 (512 bit) ParamSet B', 'id-tc26-gost-3410-2012-512-paramSetB'),
+ '1.2.643.7.1.2.1.2.3': ('GOST R 34.10-2012 (512 bit) ParamSet C', 'id-tc26-gost-3410-2012-512-paramSetC'),
+ '1.2.643.7.1.2.2': ('id-tc26-digest-constants', ),
+ '1.2.643.7.1.2.5': ('id-tc26-cipher-constants', ),
+ '1.2.643.7.1.2.5.1': ('id-tc26-gost-28147-constants', ),
+ '1.2.643.7.1.2.5.1.1': ('GOST 28147-89 TC26 parameter set', 'id-tc26-gost-28147-param-Z'),
+ '1.2.643.100.1': ('OGRN', 'OGRN'),
+ '1.2.643.100.3': ('SNILS', 'SNILS'),
+ '1.2.643.100.111': ('Signing Tool of Subject', 'subjectSignTool'),
+ '1.2.643.100.112': ('Signing Tool of Issuer', 'issuerSignTool'),
+ '1.2.804': ('ISO-UA', ),
+ '1.2.804.2.1.1.1': ('ua-pki', ),
+ '1.2.804.2.1.1.1.1.1.1': ('DSTU Gost 28147-2009', 'dstu28147'),
+ '1.2.804.2.1.1.1.1.1.1.2': ('DSTU Gost 28147-2009 OFB mode', 'dstu28147-ofb'),
+ '1.2.804.2.1.1.1.1.1.1.3': ('DSTU Gost 28147-2009 CFB mode', 'dstu28147-cfb'),
+ '1.2.804.2.1.1.1.1.1.1.5': ('DSTU Gost 28147-2009 key wrap', 'dstu28147-wrap'),
+ '1.2.804.2.1.1.1.1.1.2': ('HMAC DSTU Gost 34311-95', 'hmacWithDstu34311'),
+ '1.2.804.2.1.1.1.1.2.1': ('DSTU Gost 34311-95', 'dstu34311'),
+ '1.2.804.2.1.1.1.1.3.1.1': ('DSTU 4145-2002 little endian', 'dstu4145le'),
+ '1.2.804.2.1.1.1.1.3.1.1.1.1': ('DSTU 4145-2002 big endian', 'dstu4145be'),
+ '1.2.804.2.1.1.1.1.3.1.1.2.0': ('DSTU curve 0', 'uacurve0'),
+ '1.2.804.2.1.1.1.1.3.1.1.2.1': ('DSTU curve 1', 'uacurve1'),
+ '1.2.804.2.1.1.1.1.3.1.1.2.2': ('DSTU curve 2', 'uacurve2'),
+ '1.2.804.2.1.1.1.1.3.1.1.2.3': ('DSTU curve 3', 'uacurve3'),
+ '1.2.804.2.1.1.1.1.3.1.1.2.4': ('DSTU curve 4', 'uacurve4'),
+ '1.2.804.2.1.1.1.1.3.1.1.2.5': ('DSTU curve 5', 'uacurve5'),
+ '1.2.804.2.1.1.1.1.3.1.1.2.6': ('DSTU curve 6', 'uacurve6'),
+ '1.2.804.2.1.1.1.1.3.1.1.2.7': ('DSTU curve 7', 'uacurve7'),
+ '1.2.804.2.1.1.1.1.3.1.1.2.8': ('DSTU curve 8', 'uacurve8'),
+ '1.2.804.2.1.1.1.1.3.1.1.2.9': ('DSTU curve 9', 'uacurve9'),
+ '1.2.840': ('ISO US Member Body', 'ISO-US'),
+ '1.2.840.10040': ('X9.57', 'X9-57'),
+ '1.2.840.10040.2': ('holdInstruction', ),
+ '1.2.840.10040.2.1': ('Hold Instruction None', 'holdInstructionNone'),
+ '1.2.840.10040.2.2': ('Hold Instruction Call Issuer', 'holdInstructionCallIssuer'),
+ '1.2.840.10040.2.3': ('Hold Instruction Reject', 'holdInstructionReject'),
+ '1.2.840.10040.4': ('X9.57 CM ?', 'X9cm'),
+ '1.2.840.10040.4.1': ('dsaEncryption', 'DSA'),
+ '1.2.840.10040.4.3': ('dsaWithSHA1', 'DSA-SHA1'),
+ '1.2.840.10045': ('ANSI X9.62', 'ansi-X9-62'),
+ '1.2.840.10045.1': ('id-fieldType', ),
+ '1.2.840.10045.1.1': ('prime-field', ),
+ '1.2.840.10045.1.2': ('characteristic-two-field', ),
+ '1.2.840.10045.1.2.3': ('id-characteristic-two-basis', ),
+ '1.2.840.10045.1.2.3.1': ('onBasis', ),
+ '1.2.840.10045.1.2.3.2': ('tpBasis', ),
+ '1.2.840.10045.1.2.3.3': ('ppBasis', ),
+ '1.2.840.10045.2': ('id-publicKeyType', ),
+ '1.2.840.10045.2.1': ('id-ecPublicKey', ),
+ '1.2.840.10045.3': ('ellipticCurve', ),
+ '1.2.840.10045.3.0': ('c-TwoCurve', ),
+ '1.2.840.10045.3.0.1': ('c2pnb163v1', ),
+ '1.2.840.10045.3.0.2': ('c2pnb163v2', ),
+ '1.2.840.10045.3.0.3': ('c2pnb163v3', ),
+ '1.2.840.10045.3.0.4': ('c2pnb176v1', ),
+ '1.2.840.10045.3.0.5': ('c2tnb191v1', ),
+ '1.2.840.10045.3.0.6': ('c2tnb191v2', ),
+ '1.2.840.10045.3.0.7': ('c2tnb191v3', ),
+ '1.2.840.10045.3.0.8': ('c2onb191v4', ),
+ '1.2.840.10045.3.0.9': ('c2onb191v5', ),
+ '1.2.840.10045.3.0.10': ('c2pnb208w1', ),
+ '1.2.840.10045.3.0.11': ('c2tnb239v1', ),
+ '1.2.840.10045.3.0.12': ('c2tnb239v2', ),
+ '1.2.840.10045.3.0.13': ('c2tnb239v3', ),
+ '1.2.840.10045.3.0.14': ('c2onb239v4', ),
+ '1.2.840.10045.3.0.15': ('c2onb239v5', ),
+ '1.2.840.10045.3.0.16': ('c2pnb272w1', ),
+ '1.2.840.10045.3.0.17': ('c2pnb304w1', ),
+ '1.2.840.10045.3.0.18': ('c2tnb359v1', ),
+ '1.2.840.10045.3.0.19': ('c2pnb368w1', ),
+ '1.2.840.10045.3.0.20': ('c2tnb431r1', ),
+ '1.2.840.10045.3.1': ('primeCurve', ),
+ '1.2.840.10045.3.1.1': ('prime192v1', ),
+ '1.2.840.10045.3.1.2': ('prime192v2', ),
+ '1.2.840.10045.3.1.3': ('prime192v3', ),
+ '1.2.840.10045.3.1.4': ('prime239v1', ),
+ '1.2.840.10045.3.1.5': ('prime239v2', ),
+ '1.2.840.10045.3.1.6': ('prime239v3', ),
+ '1.2.840.10045.3.1.7': ('prime256v1', ),
+ '1.2.840.10045.4': ('id-ecSigType', ),
+ '1.2.840.10045.4.1': ('ecdsa-with-SHA1', ),
+ '1.2.840.10045.4.2': ('ecdsa-with-Recommended', ),
+ '1.2.840.10045.4.3': ('ecdsa-with-Specified', ),
+ '1.2.840.10045.4.3.1': ('ecdsa-with-SHA224', ),
+ '1.2.840.10045.4.3.2': ('ecdsa-with-SHA256', ),
+ '1.2.840.10045.4.3.3': ('ecdsa-with-SHA384', ),
+ '1.2.840.10045.4.3.4': ('ecdsa-with-SHA512', ),
+ '1.2.840.10046.2.1': ('X9.42 DH', 'dhpublicnumber'),
+ '1.2.840.113533.7.66.10': ('cast5-cbc', 'CAST5-CBC'),
+ '1.2.840.113533.7.66.12': ('pbeWithMD5AndCast5CBC', ),
+ '1.2.840.113533.7.66.13': ('password based MAC', 'id-PasswordBasedMAC'),
+ '1.2.840.113533.7.66.30': ('Diffie-Hellman based MAC', 'id-DHBasedMac'),
+ '1.2.840.113549': ('RSA Data Security, Inc.', 'rsadsi'),
+ '1.2.840.113549.1': ('RSA Data Security, Inc. PKCS', 'pkcs'),
+ '1.2.840.113549.1.1': ('pkcs1', ),
+ '1.2.840.113549.1.1.1': ('rsaEncryption', ),
+ '1.2.840.113549.1.1.2': ('md2WithRSAEncryption', 'RSA-MD2'),
+ '1.2.840.113549.1.1.3': ('md4WithRSAEncryption', 'RSA-MD4'),
+ '1.2.840.113549.1.1.4': ('md5WithRSAEncryption', 'RSA-MD5'),
+ '1.2.840.113549.1.1.5': ('sha1WithRSAEncryption', 'RSA-SHA1'),
+ '1.2.840.113549.1.1.6': ('rsaOAEPEncryptionSET', ),
+ '1.2.840.113549.1.1.7': ('rsaesOaep', 'RSAES-OAEP'),
+ '1.2.840.113549.1.1.8': ('mgf1', 'MGF1'),
+ '1.2.840.113549.1.1.9': ('pSpecified', 'PSPECIFIED'),
+ '1.2.840.113549.1.1.10': ('rsassaPss', 'RSASSA-PSS'),
+ '1.2.840.113549.1.1.11': ('sha256WithRSAEncryption', 'RSA-SHA256'),
+ '1.2.840.113549.1.1.12': ('sha384WithRSAEncryption', 'RSA-SHA384'),
+ '1.2.840.113549.1.1.13': ('sha512WithRSAEncryption', 'RSA-SHA512'),
+ '1.2.840.113549.1.1.14': ('sha224WithRSAEncryption', 'RSA-SHA224'),
+ '1.2.840.113549.1.1.15': ('sha512-224WithRSAEncryption', 'RSA-SHA512/224'),
+ '1.2.840.113549.1.1.16': ('sha512-256WithRSAEncryption', 'RSA-SHA512/256'),
+ '1.2.840.113549.1.3': ('pkcs3', ),
+ '1.2.840.113549.1.3.1': ('dhKeyAgreement', ),
+ '1.2.840.113549.1.5': ('pkcs5', ),
+ '1.2.840.113549.1.5.1': ('pbeWithMD2AndDES-CBC', 'PBE-MD2-DES'),
+ '1.2.840.113549.1.5.3': ('pbeWithMD5AndDES-CBC', 'PBE-MD5-DES'),
+ '1.2.840.113549.1.5.4': ('pbeWithMD2AndRC2-CBC', 'PBE-MD2-RC2-64'),
+ '1.2.840.113549.1.5.6': ('pbeWithMD5AndRC2-CBC', 'PBE-MD5-RC2-64'),
+ '1.2.840.113549.1.5.10': ('pbeWithSHA1AndDES-CBC', 'PBE-SHA1-DES'),
+ '1.2.840.113549.1.5.11': ('pbeWithSHA1AndRC2-CBC', 'PBE-SHA1-RC2-64'),
+ '1.2.840.113549.1.5.12': ('PBKDF2', ),
+ '1.2.840.113549.1.5.13': ('PBES2', ),
+ '1.2.840.113549.1.5.14': ('PBMAC1', ),
+ '1.2.840.113549.1.7': ('pkcs7', ),
+ '1.2.840.113549.1.7.1': ('pkcs7-data', ),
+ '1.2.840.113549.1.7.2': ('pkcs7-signedData', ),
+ '1.2.840.113549.1.7.3': ('pkcs7-envelopedData', ),
+ '1.2.840.113549.1.7.4': ('pkcs7-signedAndEnvelopedData', ),
+ '1.2.840.113549.1.7.5': ('pkcs7-digestData', ),
+ '1.2.840.113549.1.7.6': ('pkcs7-encryptedData', ),
+ '1.2.840.113549.1.9': ('pkcs9', ),
+ '1.2.840.113549.1.9.1': ('emailAddress', ),
+ '1.2.840.113549.1.9.2': ('unstructuredName', ),
+ '1.2.840.113549.1.9.3': ('contentType', ),
+ '1.2.840.113549.1.9.4': ('messageDigest', ),
+ '1.2.840.113549.1.9.5': ('signingTime', ),
+ '1.2.840.113549.1.9.6': ('countersignature', ),
+ '1.2.840.113549.1.9.7': ('challengePassword', ),
+ '1.2.840.113549.1.9.8': ('unstructuredAddress', ),
+ '1.2.840.113549.1.9.9': ('extendedCertificateAttributes', ),
+ '1.2.840.113549.1.9.14': ('Extension Request', 'extReq'),
+ '1.2.840.113549.1.9.15': ('S/MIME Capabilities', 'SMIME-CAPS'),
+ '1.2.840.113549.1.9.16': ('S/MIME', 'SMIME'),
+ '1.2.840.113549.1.9.16.0': ('id-smime-mod', ),
+ '1.2.840.113549.1.9.16.0.1': ('id-smime-mod-cms', ),
+ '1.2.840.113549.1.9.16.0.2': ('id-smime-mod-ess', ),
+ '1.2.840.113549.1.9.16.0.3': ('id-smime-mod-oid', ),
+ '1.2.840.113549.1.9.16.0.4': ('id-smime-mod-msg-v3', ),
+ '1.2.840.113549.1.9.16.0.5': ('id-smime-mod-ets-eSignature-88', ),
+ '1.2.840.113549.1.9.16.0.6': ('id-smime-mod-ets-eSignature-97', ),
+ '1.2.840.113549.1.9.16.0.7': ('id-smime-mod-ets-eSigPolicy-88', ),
+ '1.2.840.113549.1.9.16.0.8': ('id-smime-mod-ets-eSigPolicy-97', ),
+ '1.2.840.113549.1.9.16.1': ('id-smime-ct', ),
+ '1.2.840.113549.1.9.16.1.1': ('id-smime-ct-receipt', ),
+ '1.2.840.113549.1.9.16.1.2': ('id-smime-ct-authData', ),
+ '1.2.840.113549.1.9.16.1.3': ('id-smime-ct-publishCert', ),
+ '1.2.840.113549.1.9.16.1.4': ('id-smime-ct-TSTInfo', ),
+ '1.2.840.113549.1.9.16.1.5': ('id-smime-ct-TDTInfo', ),
+ '1.2.840.113549.1.9.16.1.6': ('id-smime-ct-contentInfo', ),
+ '1.2.840.113549.1.9.16.1.7': ('id-smime-ct-DVCSRequestData', ),
+ '1.2.840.113549.1.9.16.1.8': ('id-smime-ct-DVCSResponseData', ),
+ '1.2.840.113549.1.9.16.1.9': ('id-smime-ct-compressedData', ),
+ '1.2.840.113549.1.9.16.1.19': ('id-smime-ct-contentCollection', ),
+ '1.2.840.113549.1.9.16.1.23': ('id-smime-ct-authEnvelopedData', ),
+ '1.2.840.113549.1.9.16.1.27': ('id-ct-asciiTextWithCRLF', ),
+ '1.2.840.113549.1.9.16.1.28': ('id-ct-xml', ),
+ '1.2.840.113549.1.9.16.2': ('id-smime-aa', ),
+ '1.2.840.113549.1.9.16.2.1': ('id-smime-aa-receiptRequest', ),
+ '1.2.840.113549.1.9.16.2.2': ('id-smime-aa-securityLabel', ),
+ '1.2.840.113549.1.9.16.2.3': ('id-smime-aa-mlExpandHistory', ),
+ '1.2.840.113549.1.9.16.2.4': ('id-smime-aa-contentHint', ),
+ '1.2.840.113549.1.9.16.2.5': ('id-smime-aa-msgSigDigest', ),
+ '1.2.840.113549.1.9.16.2.6': ('id-smime-aa-encapContentType', ),
+ '1.2.840.113549.1.9.16.2.7': ('id-smime-aa-contentIdentifier', ),
+ '1.2.840.113549.1.9.16.2.8': ('id-smime-aa-macValue', ),
+ '1.2.840.113549.1.9.16.2.9': ('id-smime-aa-equivalentLabels', ),
+ '1.2.840.113549.1.9.16.2.10': ('id-smime-aa-contentReference', ),
+ '1.2.840.113549.1.9.16.2.11': ('id-smime-aa-encrypKeyPref', ),
+ '1.2.840.113549.1.9.16.2.12': ('id-smime-aa-signingCertificate', ),
+ '1.2.840.113549.1.9.16.2.13': ('id-smime-aa-smimeEncryptCerts', ),
+ '1.2.840.113549.1.9.16.2.14': ('id-smime-aa-timeStampToken', ),
+ '1.2.840.113549.1.9.16.2.15': ('id-smime-aa-ets-sigPolicyId', ),
+ '1.2.840.113549.1.9.16.2.16': ('id-smime-aa-ets-commitmentType', ),
+ '1.2.840.113549.1.9.16.2.17': ('id-smime-aa-ets-signerLocation', ),
+ '1.2.840.113549.1.9.16.2.18': ('id-smime-aa-ets-signerAttr', ),
+ '1.2.840.113549.1.9.16.2.19': ('id-smime-aa-ets-otherSigCert', ),
+ '1.2.840.113549.1.9.16.2.20': ('id-smime-aa-ets-contentTimestamp', ),
+ '1.2.840.113549.1.9.16.2.21': ('id-smime-aa-ets-CertificateRefs', ),
+ '1.2.840.113549.1.9.16.2.22': ('id-smime-aa-ets-RevocationRefs', ),
+ '1.2.840.113549.1.9.16.2.23': ('id-smime-aa-ets-certValues', ),
+ '1.2.840.113549.1.9.16.2.24': ('id-smime-aa-ets-revocationValues', ),
+ '1.2.840.113549.1.9.16.2.25': ('id-smime-aa-ets-escTimeStamp', ),
+ '1.2.840.113549.1.9.16.2.26': ('id-smime-aa-ets-certCRLTimestamp', ),
+ '1.2.840.113549.1.9.16.2.27': ('id-smime-aa-ets-archiveTimeStamp', ),
+ '1.2.840.113549.1.9.16.2.28': ('id-smime-aa-signatureType', ),
+ '1.2.840.113549.1.9.16.2.29': ('id-smime-aa-dvcs-dvc', ),
+ '1.2.840.113549.1.9.16.2.47': ('id-smime-aa-signingCertificateV2', ),
+ '1.2.840.113549.1.9.16.3': ('id-smime-alg', ),
+ '1.2.840.113549.1.9.16.3.1': ('id-smime-alg-ESDHwith3DES', ),
+ '1.2.840.113549.1.9.16.3.2': ('id-smime-alg-ESDHwithRC2', ),
+ '1.2.840.113549.1.9.16.3.3': ('id-smime-alg-3DESwrap', ),
+ '1.2.840.113549.1.9.16.3.4': ('id-smime-alg-RC2wrap', ),
+ '1.2.840.113549.1.9.16.3.5': ('id-smime-alg-ESDH', ),
+ '1.2.840.113549.1.9.16.3.6': ('id-smime-alg-CMS3DESwrap', ),
+ '1.2.840.113549.1.9.16.3.7': ('id-smime-alg-CMSRC2wrap', ),
+ '1.2.840.113549.1.9.16.3.8': ('zlib compression', 'ZLIB'),
+ '1.2.840.113549.1.9.16.3.9': ('id-alg-PWRI-KEK', ),
+ '1.2.840.113549.1.9.16.4': ('id-smime-cd', ),
+ '1.2.840.113549.1.9.16.4.1': ('id-smime-cd-ldap', ),
+ '1.2.840.113549.1.9.16.5': ('id-smime-spq', ),
+ '1.2.840.113549.1.9.16.5.1': ('id-smime-spq-ets-sqt-uri', ),
+ '1.2.840.113549.1.9.16.5.2': ('id-smime-spq-ets-sqt-unotice', ),
+ '1.2.840.113549.1.9.16.6': ('id-smime-cti', ),
+ '1.2.840.113549.1.9.16.6.1': ('id-smime-cti-ets-proofOfOrigin', ),
+ '1.2.840.113549.1.9.16.6.2': ('id-smime-cti-ets-proofOfReceipt', ),
+ '1.2.840.113549.1.9.16.6.3': ('id-smime-cti-ets-proofOfDelivery', ),
+ '1.2.840.113549.1.9.16.6.4': ('id-smime-cti-ets-proofOfSender', ),
+ '1.2.840.113549.1.9.16.6.5': ('id-smime-cti-ets-proofOfApproval', ),
+ '1.2.840.113549.1.9.16.6.6': ('id-smime-cti-ets-proofOfCreation', ),
+ '1.2.840.113549.1.9.20': ('friendlyName', ),
+ '1.2.840.113549.1.9.21': ('localKeyID', ),
+ '1.2.840.113549.1.9.22': ('certTypes', ),
+ '1.2.840.113549.1.9.22.1': ('x509Certificate', ),
+ '1.2.840.113549.1.9.22.2': ('sdsiCertificate', ),
+ '1.2.840.113549.1.9.23': ('crlTypes', ),
+ '1.2.840.113549.1.9.23.1': ('x509Crl', ),
+ '1.2.840.113549.1.12': ('pkcs12', ),
+ '1.2.840.113549.1.12.1': ('pkcs12-pbeids', ),
+ '1.2.840.113549.1.12.1.1': ('pbeWithSHA1And128BitRC4', 'PBE-SHA1-RC4-128'),
+ '1.2.840.113549.1.12.1.2': ('pbeWithSHA1And40BitRC4', 'PBE-SHA1-RC4-40'),
+ '1.2.840.113549.1.12.1.3': ('pbeWithSHA1And3-KeyTripleDES-CBC', 'PBE-SHA1-3DES'),
+ '1.2.840.113549.1.12.1.4': ('pbeWithSHA1And2-KeyTripleDES-CBC', 'PBE-SHA1-2DES'),
+ '1.2.840.113549.1.12.1.5': ('pbeWithSHA1And128BitRC2-CBC', 'PBE-SHA1-RC2-128'),
+ '1.2.840.113549.1.12.1.6': ('pbeWithSHA1And40BitRC2-CBC', 'PBE-SHA1-RC2-40'),
+ '1.2.840.113549.1.12.10': ('pkcs12-Version1', ),
+ '1.2.840.113549.1.12.10.1': ('pkcs12-BagIds', ),
+ '1.2.840.113549.1.12.10.1.1': ('keyBag', ),
+ '1.2.840.113549.1.12.10.1.2': ('pkcs8ShroudedKeyBag', ),
+ '1.2.840.113549.1.12.10.1.3': ('certBag', ),
+ '1.2.840.113549.1.12.10.1.4': ('crlBag', ),
+ '1.2.840.113549.1.12.10.1.5': ('secretBag', ),
+ '1.2.840.113549.1.12.10.1.6': ('safeContentsBag', ),
+ '1.2.840.113549.2.2': ('md2', 'MD2'),
+ '1.2.840.113549.2.4': ('md4', 'MD4'),
+ '1.2.840.113549.2.5': ('md5', 'MD5'),
+ '1.2.840.113549.2.6': ('hmacWithMD5', ),
+ '1.2.840.113549.2.7': ('hmacWithSHA1', ),
+ '1.2.840.113549.2.8': ('hmacWithSHA224', ),
+ '1.2.840.113549.2.9': ('hmacWithSHA256', ),
+ '1.2.840.113549.2.10': ('hmacWithSHA384', ),
+ '1.2.840.113549.2.11': ('hmacWithSHA512', ),
+ '1.2.840.113549.2.12': ('hmacWithSHA512-224', ),
+ '1.2.840.113549.2.13': ('hmacWithSHA512-256', ),
+ '1.2.840.113549.3.2': ('rc2-cbc', 'RC2-CBC'),
+ '1.2.840.113549.3.4': ('rc4', 'RC4'),
+ '1.2.840.113549.3.7': ('des-ede3-cbc', 'DES-EDE3-CBC'),
+ '1.2.840.113549.3.8': ('rc5-cbc', 'RC5-CBC'),
+ '1.2.840.113549.3.10': ('des-cdmf', 'DES-CDMF'),
+ '1.3': ('identified-organization', 'org', 'ORG'),
+ '1.3.6': ('dod', 'DOD'),
+ '1.3.6.1': ('iana', 'IANA', 'internet'),
+ '1.3.6.1.1': ('Directory', 'directory'),
+ '1.3.6.1.2': ('Management', 'mgmt'),
+ '1.3.6.1.3': ('Experimental', 'experimental'),
+ '1.3.6.1.4': ('Private', 'private'),
+ '1.3.6.1.4.1': ('Enterprises', 'enterprises'),
+ '1.3.6.1.4.1.188.7.1.1.2': ('idea-cbc', 'IDEA-CBC'),
+ '1.3.6.1.4.1.311.2.1.14': ('Microsoft Extension Request', 'msExtReq'),
+ '1.3.6.1.4.1.311.2.1.21': ('Microsoft Individual Code Signing', 'msCodeInd'),
+ '1.3.6.1.4.1.311.2.1.22': ('Microsoft Commercial Code Signing', 'msCodeCom'),
+ '1.3.6.1.4.1.311.10.3.1': ('Microsoft Trust List Signing', 'msCTLSign'),
+ '1.3.6.1.4.1.311.10.3.3': ('Microsoft Server Gated Crypto', 'msSGC'),
+ '1.3.6.1.4.1.311.10.3.4': ('Microsoft Encrypted File System', 'msEFS'),
+ '1.3.6.1.4.1.311.17.1': ('Microsoft CSP Name', 'CSPName'),
+ '1.3.6.1.4.1.311.17.2': ('Microsoft Local Key set', 'LocalKeySet'),
+ '1.3.6.1.4.1.311.20.2.2': ('Microsoft Smartcardlogin', 'msSmartcardLogin'),
+ '1.3.6.1.4.1.311.20.2.3': ('Microsoft Universal Principal Name', 'msUPN'),
+ '1.3.6.1.4.1.311.60.2.1.1': ('jurisdictionLocalityName', 'jurisdictionL'),
+ '1.3.6.1.4.1.311.60.2.1.2': ('jurisdictionStateOrProvinceName', 'jurisdictionST'),
+ '1.3.6.1.4.1.311.60.2.1.3': ('jurisdictionCountryName', 'jurisdictionC'),
+ '1.3.6.1.4.1.1466.344': ('dcObject', 'dcobject'),
+ '1.3.6.1.4.1.1722.12.2.1.16': ('blake2b512', 'BLAKE2b512'),
+ '1.3.6.1.4.1.1722.12.2.2.8': ('blake2s256', 'BLAKE2s256'),
+ '1.3.6.1.4.1.3029.1.2': ('bf-cbc', 'BF-CBC'),
+ '1.3.6.1.4.1.11129.2.4.2': ('CT Precertificate SCTs', 'ct_precert_scts'),
+ '1.3.6.1.4.1.11129.2.4.3': ('CT Precertificate Poison', 'ct_precert_poison'),
+ '1.3.6.1.4.1.11129.2.4.4': ('CT Precertificate Signer', 'ct_precert_signer'),
+ '1.3.6.1.4.1.11129.2.4.5': ('CT Certificate SCTs', 'ct_cert_scts'),
+ '1.3.6.1.4.1.11591.4.11': ('scrypt', 'id-scrypt'),
+ '1.3.6.1.5': ('Security', 'security'),
+ '1.3.6.1.5.2.3': ('id-pkinit', ),
+ '1.3.6.1.5.2.3.4': ('PKINIT Client Auth', 'pkInitClientAuth'),
+ '1.3.6.1.5.2.3.5': ('Signing KDC Response', 'pkInitKDC'),
+ '1.3.6.1.5.5.7': ('PKIX', ),
+ '1.3.6.1.5.5.7.0': ('id-pkix-mod', ),
+ '1.3.6.1.5.5.7.0.1': ('id-pkix1-explicit-88', ),
+ '1.3.6.1.5.5.7.0.2': ('id-pkix1-implicit-88', ),
+ '1.3.6.1.5.5.7.0.3': ('id-pkix1-explicit-93', ),
+ '1.3.6.1.5.5.7.0.4': ('id-pkix1-implicit-93', ),
+ '1.3.6.1.5.5.7.0.5': ('id-mod-crmf', ),
+ '1.3.6.1.5.5.7.0.6': ('id-mod-cmc', ),
+ '1.3.6.1.5.5.7.0.7': ('id-mod-kea-profile-88', ),
+ '1.3.6.1.5.5.7.0.8': ('id-mod-kea-profile-93', ),
+ '1.3.6.1.5.5.7.0.9': ('id-mod-cmp', ),
+ '1.3.6.1.5.5.7.0.10': ('id-mod-qualified-cert-88', ),
+ '1.3.6.1.5.5.7.0.11': ('id-mod-qualified-cert-93', ),
+ '1.3.6.1.5.5.7.0.12': ('id-mod-attribute-cert', ),
+ '1.3.6.1.5.5.7.0.13': ('id-mod-timestamp-protocol', ),
+ '1.3.6.1.5.5.7.0.14': ('id-mod-ocsp', ),
+ '1.3.6.1.5.5.7.0.15': ('id-mod-dvcs', ),
+ '1.3.6.1.5.5.7.0.16': ('id-mod-cmp2000', ),
+ '1.3.6.1.5.5.7.1': ('id-pe', ),
+ '1.3.6.1.5.5.7.1.1': ('Authority Information Access', 'authorityInfoAccess'),
+ '1.3.6.1.5.5.7.1.2': ('Biometric Info', 'biometricInfo'),
+ '1.3.6.1.5.5.7.1.3': ('qcStatements', ),
+ '1.3.6.1.5.5.7.1.4': ('ac-auditEntity', ),
+ '1.3.6.1.5.5.7.1.5': ('ac-targeting', ),
+ '1.3.6.1.5.5.7.1.6': ('aaControls', ),
+ '1.3.6.1.5.5.7.1.7': ('sbgp-ipAddrBlock', ),
+ '1.3.6.1.5.5.7.1.8': ('sbgp-autonomousSysNum', ),
+ '1.3.6.1.5.5.7.1.9': ('sbgp-routerIdentifier', ),
+ '1.3.6.1.5.5.7.1.10': ('ac-proxying', ),
+ '1.3.6.1.5.5.7.1.11': ('Subject Information Access', 'subjectInfoAccess'),
+ '1.3.6.1.5.5.7.1.14': ('Proxy Certificate Information', 'proxyCertInfo'),
+ '1.3.6.1.5.5.7.1.24': ('TLS Feature', 'tlsfeature'),
+ '1.3.6.1.5.5.7.2': ('id-qt', ),
+ '1.3.6.1.5.5.7.2.1': ('Policy Qualifier CPS', 'id-qt-cps'),
+ '1.3.6.1.5.5.7.2.2': ('Policy Qualifier User Notice', 'id-qt-unotice'),
+ '1.3.6.1.5.5.7.2.3': ('textNotice', ),
+ '1.3.6.1.5.5.7.3': ('id-kp', ),
+ '1.3.6.1.5.5.7.3.1': ('TLS Web Server Authentication', 'serverAuth'),
+ '1.3.6.1.5.5.7.3.2': ('TLS Web Client Authentication', 'clientAuth'),
+ '1.3.6.1.5.5.7.3.3': ('Code Signing', 'codeSigning'),
+ '1.3.6.1.5.5.7.3.4': ('E-mail Protection', 'emailProtection'),
+ '1.3.6.1.5.5.7.3.5': ('IPSec End System', 'ipsecEndSystem'),
+ '1.3.6.1.5.5.7.3.6': ('IPSec Tunnel', 'ipsecTunnel'),
+ '1.3.6.1.5.5.7.3.7': ('IPSec User', 'ipsecUser'),
+ '1.3.6.1.5.5.7.3.8': ('Time Stamping', 'timeStamping'),
+ '1.3.6.1.5.5.7.3.9': ('OCSP Signing', 'OCSPSigning'),
+ '1.3.6.1.5.5.7.3.10': ('dvcs', 'DVCS'),
+ '1.3.6.1.5.5.7.3.17': ('ipsec Internet Key Exchange', 'ipsecIKE'),
+ '1.3.6.1.5.5.7.3.18': ('Ctrl/provision WAP Access', 'capwapAC'),
+ '1.3.6.1.5.5.7.3.19': ('Ctrl/Provision WAP Termination', 'capwapWTP'),
+ '1.3.6.1.5.5.7.3.21': ('SSH Client', 'secureShellClient'),
+ '1.3.6.1.5.5.7.3.22': ('SSH Server', 'secureShellServer'),
+ '1.3.6.1.5.5.7.3.23': ('Send Router', 'sendRouter'),
+ '1.3.6.1.5.5.7.3.24': ('Send Proxied Router', 'sendProxiedRouter'),
+ '1.3.6.1.5.5.7.3.25': ('Send Owner', 'sendOwner'),
+ '1.3.6.1.5.5.7.3.26': ('Send Proxied Owner', 'sendProxiedOwner'),
+ '1.3.6.1.5.5.7.3.27': ('CMC Certificate Authority', 'cmcCA'),
+ '1.3.6.1.5.5.7.3.28': ('CMC Registration Authority', 'cmcRA'),
+ '1.3.6.1.5.5.7.4': ('id-it', ),
+ '1.3.6.1.5.5.7.4.1': ('id-it-caProtEncCert', ),
+ '1.3.6.1.5.5.7.4.2': ('id-it-signKeyPairTypes', ),
+ '1.3.6.1.5.5.7.4.3': ('id-it-encKeyPairTypes', ),
+ '1.3.6.1.5.5.7.4.4': ('id-it-preferredSymmAlg', ),
+ '1.3.6.1.5.5.7.4.5': ('id-it-caKeyUpdateInfo', ),
+ '1.3.6.1.5.5.7.4.6': ('id-it-currentCRL', ),
+ '1.3.6.1.5.5.7.4.7': ('id-it-unsupportedOIDs', ),
+ '1.3.6.1.5.5.7.4.8': ('id-it-subscriptionRequest', ),
+ '1.3.6.1.5.5.7.4.9': ('id-it-subscriptionResponse', ),
+ '1.3.6.1.5.5.7.4.10': ('id-it-keyPairParamReq', ),
+ '1.3.6.1.5.5.7.4.11': ('id-it-keyPairParamRep', ),
+ '1.3.6.1.5.5.7.4.12': ('id-it-revPassphrase', ),
+ '1.3.6.1.5.5.7.4.13': ('id-it-implicitConfirm', ),
+ '1.3.6.1.5.5.7.4.14': ('id-it-confirmWaitTime', ),
+ '1.3.6.1.5.5.7.4.15': ('id-it-origPKIMessage', ),
+ '1.3.6.1.5.5.7.4.16': ('id-it-suppLangTags', ),
+ '1.3.6.1.5.5.7.5': ('id-pkip', ),
+ '1.3.6.1.5.5.7.5.1': ('id-regCtrl', ),
+ '1.3.6.1.5.5.7.5.1.1': ('id-regCtrl-regToken', ),
+ '1.3.6.1.5.5.7.5.1.2': ('id-regCtrl-authenticator', ),
+ '1.3.6.1.5.5.7.5.1.3': ('id-regCtrl-pkiPublicationInfo', ),
+ '1.3.6.1.5.5.7.5.1.4': ('id-regCtrl-pkiArchiveOptions', ),
+ '1.3.6.1.5.5.7.5.1.5': ('id-regCtrl-oldCertID', ),
+ '1.3.6.1.5.5.7.5.1.6': ('id-regCtrl-protocolEncrKey', ),
+ '1.3.6.1.5.5.7.5.2': ('id-regInfo', ),
+ '1.3.6.1.5.5.7.5.2.1': ('id-regInfo-utf8Pairs', ),
+ '1.3.6.1.5.5.7.5.2.2': ('id-regInfo-certReq', ),
+ '1.3.6.1.5.5.7.6': ('id-alg', ),
+ '1.3.6.1.5.5.7.6.1': ('id-alg-des40', ),
+ '1.3.6.1.5.5.7.6.2': ('id-alg-noSignature', ),
+ '1.3.6.1.5.5.7.6.3': ('id-alg-dh-sig-hmac-sha1', ),
+ '1.3.6.1.5.5.7.6.4': ('id-alg-dh-pop', ),
+ '1.3.6.1.5.5.7.7': ('id-cmc', ),
+ '1.3.6.1.5.5.7.7.1': ('id-cmc-statusInfo', ),
+ '1.3.6.1.5.5.7.7.2': ('id-cmc-identification', ),
+ '1.3.6.1.5.5.7.7.3': ('id-cmc-identityProof', ),
+ '1.3.6.1.5.5.7.7.4': ('id-cmc-dataReturn', ),
+ '1.3.6.1.5.5.7.7.5': ('id-cmc-transactionId', ),
+ '1.3.6.1.5.5.7.7.6': ('id-cmc-senderNonce', ),
+ '1.3.6.1.5.5.7.7.7': ('id-cmc-recipientNonce', ),
+ '1.3.6.1.5.5.7.7.8': ('id-cmc-addExtensions', ),
+ '1.3.6.1.5.5.7.7.9': ('id-cmc-encryptedPOP', ),
+ '1.3.6.1.5.5.7.7.10': ('id-cmc-decryptedPOP', ),
+ '1.3.6.1.5.5.7.7.11': ('id-cmc-lraPOPWitness', ),
+ '1.3.6.1.5.5.7.7.15': ('id-cmc-getCert', ),
+ '1.3.6.1.5.5.7.7.16': ('id-cmc-getCRL', ),
+ '1.3.6.1.5.5.7.7.17': ('id-cmc-revokeRequest', ),
+ '1.3.6.1.5.5.7.7.18': ('id-cmc-regInfo', ),
+ '1.3.6.1.5.5.7.7.19': ('id-cmc-responseInfo', ),
+ '1.3.6.1.5.5.7.7.21': ('id-cmc-queryPending', ),
+ '1.3.6.1.5.5.7.7.22': ('id-cmc-popLinkRandom', ),
+ '1.3.6.1.5.5.7.7.23': ('id-cmc-popLinkWitness', ),
+ '1.3.6.1.5.5.7.7.24': ('id-cmc-confirmCertAcceptance', ),
+ '1.3.6.1.5.5.7.8': ('id-on', ),
+ '1.3.6.1.5.5.7.8.1': ('id-on-personalData', ),
+ '1.3.6.1.5.5.7.8.3': ('Permanent Identifier', 'id-on-permanentIdentifier'),
+ '1.3.6.1.5.5.7.9': ('id-pda', ),
+ '1.3.6.1.5.5.7.9.1': ('id-pda-dateOfBirth', ),
+ '1.3.6.1.5.5.7.9.2': ('id-pda-placeOfBirth', ),
+ '1.3.6.1.5.5.7.9.3': ('id-pda-gender', ),
+ '1.3.6.1.5.5.7.9.4': ('id-pda-countryOfCitizenship', ),
+ '1.3.6.1.5.5.7.9.5': ('id-pda-countryOfResidence', ),
+ '1.3.6.1.5.5.7.10': ('id-aca', ),
+ '1.3.6.1.5.5.7.10.1': ('id-aca-authenticationInfo', ),
+ '1.3.6.1.5.5.7.10.2': ('id-aca-accessIdentity', ),
+ '1.3.6.1.5.5.7.10.3': ('id-aca-chargingIdentity', ),
+ '1.3.6.1.5.5.7.10.4': ('id-aca-group', ),
+ '1.3.6.1.5.5.7.10.5': ('id-aca-role', ),
+ '1.3.6.1.5.5.7.10.6': ('id-aca-encAttrs', ),
+ '1.3.6.1.5.5.7.11': ('id-qcs', ),
+ '1.3.6.1.5.5.7.11.1': ('id-qcs-pkixQCSyntax-v1', ),
+ '1.3.6.1.5.5.7.12': ('id-cct', ),
+ '1.3.6.1.5.5.7.12.1': ('id-cct-crs', ),
+ '1.3.6.1.5.5.7.12.2': ('id-cct-PKIData', ),
+ '1.3.6.1.5.5.7.12.3': ('id-cct-PKIResponse', ),
+ '1.3.6.1.5.5.7.21': ('id-ppl', ),
+ '1.3.6.1.5.5.7.21.0': ('Any language', 'id-ppl-anyLanguage'),
+ '1.3.6.1.5.5.7.21.1': ('Inherit all', 'id-ppl-inheritAll'),
+ '1.3.6.1.5.5.7.21.2': ('Independent', 'id-ppl-independent'),
+ '1.3.6.1.5.5.7.48': ('id-ad', ),
+ '1.3.6.1.5.5.7.48.1': ('OCSP', 'OCSP', 'id-pkix-OCSP'),
+ '1.3.6.1.5.5.7.48.1.1': ('Basic OCSP Response', 'basicOCSPResponse'),
+ '1.3.6.1.5.5.7.48.1.2': ('OCSP Nonce', 'Nonce'),
+ '1.3.6.1.5.5.7.48.1.3': ('OCSP CRL ID', 'CrlID'),
+ '1.3.6.1.5.5.7.48.1.4': ('Acceptable OCSP Responses', 'acceptableResponses'),
+ '1.3.6.1.5.5.7.48.1.5': ('OCSP No Check', 'noCheck'),
+ '1.3.6.1.5.5.7.48.1.6': ('OCSP Archive Cutoff', 'archiveCutoff'),
+ '1.3.6.1.5.5.7.48.1.7': ('OCSP Service Locator', 'serviceLocator'),
+ '1.3.6.1.5.5.7.48.1.8': ('Extended OCSP Status', 'extendedStatus'),
+ '1.3.6.1.5.5.7.48.1.9': ('valid', ),
+ '1.3.6.1.5.5.7.48.1.10': ('path', ),
+ '1.3.6.1.5.5.7.48.1.11': ('Trust Root', 'trustRoot'),
+ '1.3.6.1.5.5.7.48.2': ('CA Issuers', 'caIssuers'),
+ '1.3.6.1.5.5.7.48.3': ('AD Time Stamping', 'ad_timestamping'),
+ '1.3.6.1.5.5.7.48.4': ('ad dvcs', 'AD_DVCS'),
+ '1.3.6.1.5.5.7.48.5': ('CA Repository', 'caRepository'),
+ '1.3.6.1.5.5.8.1.1': ('hmac-md5', 'HMAC-MD5'),
+ '1.3.6.1.5.5.8.1.2': ('hmac-sha1', 'HMAC-SHA1'),
+ '1.3.6.1.6': ('SNMPv2', 'snmpv2'),
+ '1.3.6.1.7': ('Mail', ),
+ '1.3.6.1.7.1': ('MIME MHS', 'mime-mhs'),
+ '1.3.6.1.7.1.1': ('mime-mhs-headings', 'mime-mhs-headings'),
+ '1.3.6.1.7.1.1.1': ('id-hex-partial-message', 'id-hex-partial-message'),
+ '1.3.6.1.7.1.1.2': ('id-hex-multipart-message', 'id-hex-multipart-message'),
+ '1.3.6.1.7.1.2': ('mime-mhs-bodies', 'mime-mhs-bodies'),
+ '1.3.14.3.2': ('algorithm', 'algorithm'),
+ '1.3.14.3.2.3': ('md5WithRSA', 'RSA-NP-MD5'),
+ '1.3.14.3.2.6': ('des-ecb', 'DES-ECB'),
+ '1.3.14.3.2.7': ('des-cbc', 'DES-CBC'),
+ '1.3.14.3.2.8': ('des-ofb', 'DES-OFB'),
+ '1.3.14.3.2.9': ('des-cfb', 'DES-CFB'),
+ '1.3.14.3.2.11': ('rsaSignature', ),
+ '1.3.14.3.2.12': ('dsaEncryption-old', 'DSA-old'),
+ '1.3.14.3.2.13': ('dsaWithSHA', 'DSA-SHA'),
+ '1.3.14.3.2.15': ('shaWithRSAEncryption', 'RSA-SHA'),
+ '1.3.14.3.2.17': ('des-ede', 'DES-EDE'),
+ '1.3.14.3.2.18': ('sha', 'SHA'),
+ '1.3.14.3.2.26': ('sha1', 'SHA1'),
+ '1.3.14.3.2.27': ('dsaWithSHA1-old', 'DSA-SHA1-old'),
+ '1.3.14.3.2.29': ('sha1WithRSA', 'RSA-SHA1-2'),
+ '1.3.36.3.2.1': ('ripemd160', 'RIPEMD160'),
+ '1.3.36.3.3.1.2': ('ripemd160WithRSA', 'RSA-RIPEMD160'),
+ '1.3.36.3.3.2.8.1.1.1': ('brainpoolP160r1', ),
+ '1.3.36.3.3.2.8.1.1.2': ('brainpoolP160t1', ),
+ '1.3.36.3.3.2.8.1.1.3': ('brainpoolP192r1', ),
+ '1.3.36.3.3.2.8.1.1.4': ('brainpoolP192t1', ),
+ '1.3.36.3.3.2.8.1.1.5': ('brainpoolP224r1', ),
+ '1.3.36.3.3.2.8.1.1.6': ('brainpoolP224t1', ),
+ '1.3.36.3.3.2.8.1.1.7': ('brainpoolP256r1', ),
+ '1.3.36.3.3.2.8.1.1.8': ('brainpoolP256t1', ),
+ '1.3.36.3.3.2.8.1.1.9': ('brainpoolP320r1', ),
+ '1.3.36.3.3.2.8.1.1.10': ('brainpoolP320t1', ),
+ '1.3.36.3.3.2.8.1.1.11': ('brainpoolP384r1', ),
+ '1.3.36.3.3.2.8.1.1.12': ('brainpoolP384t1', ),
+ '1.3.36.3.3.2.8.1.1.13': ('brainpoolP512r1', ),
+ '1.3.36.3.3.2.8.1.1.14': ('brainpoolP512t1', ),
+ '1.3.36.8.3.3': ('Professional Information or basis for Admission', 'x509ExtAdmission'),
+ '1.3.101.1.4.1': ('Strong Extranet ID', 'SXNetID'),
+ '1.3.101.110': ('X25519', ),
+ '1.3.101.111': ('X448', ),
+ '1.3.101.112': ('ED25519', ),
+ '1.3.101.113': ('ED448', ),
+ '1.3.111': ('ieee', ),
+ '1.3.111.2.1619': ('IEEE Security in Storage Working Group', 'ieee-siswg'),
+ '1.3.111.2.1619.0.1.1': ('aes-128-xts', 'AES-128-XTS'),
+ '1.3.111.2.1619.0.1.2': ('aes-256-xts', 'AES-256-XTS'),
+ '1.3.132': ('certicom-arc', ),
+ '1.3.132.0': ('secg_ellipticCurve', ),
+ '1.3.132.0.1': ('sect163k1', ),
+ '1.3.132.0.2': ('sect163r1', ),
+ '1.3.132.0.3': ('sect239k1', ),
+ '1.3.132.0.4': ('sect113r1', ),
+ '1.3.132.0.5': ('sect113r2', ),
+ '1.3.132.0.6': ('secp112r1', ),
+ '1.3.132.0.7': ('secp112r2', ),
+ '1.3.132.0.8': ('secp160r1', ),
+ '1.3.132.0.9': ('secp160k1', ),
+ '1.3.132.0.10': ('secp256k1', ),
+ '1.3.132.0.15': ('sect163r2', ),
+ '1.3.132.0.16': ('sect283k1', ),
+ '1.3.132.0.17': ('sect283r1', ),
+ '1.3.132.0.22': ('sect131r1', ),
+ '1.3.132.0.23': ('sect131r2', ),
+ '1.3.132.0.24': ('sect193r1', ),
+ '1.3.132.0.25': ('sect193r2', ),
+ '1.3.132.0.26': ('sect233k1', ),
+ '1.3.132.0.27': ('sect233r1', ),
+ '1.3.132.0.28': ('secp128r1', ),
+ '1.3.132.0.29': ('secp128r2', ),
+ '1.3.132.0.30': ('secp160r2', ),
+ '1.3.132.0.31': ('secp192k1', ),
+ '1.3.132.0.32': ('secp224k1', ),
+ '1.3.132.0.33': ('secp224r1', ),
+ '1.3.132.0.34': ('secp384r1', ),
+ '1.3.132.0.35': ('secp521r1', ),
+ '1.3.132.0.36': ('sect409k1', ),
+ '1.3.132.0.37': ('sect409r1', ),
+ '1.3.132.0.38': ('sect571k1', ),
+ '1.3.132.0.39': ('sect571r1', ),
+ '1.3.132.1': ('secg-scheme', ),
+ '1.3.132.1.11.0': ('dhSinglePass-stdDH-sha224kdf-scheme', ),
+ '1.3.132.1.11.1': ('dhSinglePass-stdDH-sha256kdf-scheme', ),
+ '1.3.132.1.11.2': ('dhSinglePass-stdDH-sha384kdf-scheme', ),
+ '1.3.132.1.11.3': ('dhSinglePass-stdDH-sha512kdf-scheme', ),
+ '1.3.132.1.14.0': ('dhSinglePass-cofactorDH-sha224kdf-scheme', ),
+ '1.3.132.1.14.1': ('dhSinglePass-cofactorDH-sha256kdf-scheme', ),
+ '1.3.132.1.14.2': ('dhSinglePass-cofactorDH-sha384kdf-scheme', ),
+ '1.3.132.1.14.3': ('dhSinglePass-cofactorDH-sha512kdf-scheme', ),
+ '1.3.133.16.840.63.0': ('x9-63-scheme', ),
+ '1.3.133.16.840.63.0.2': ('dhSinglePass-stdDH-sha1kdf-scheme', ),
+ '1.3.133.16.840.63.0.3': ('dhSinglePass-cofactorDH-sha1kdf-scheme', ),
+ '2': ('joint-iso-itu-t', 'JOINT-ISO-ITU-T', 'joint-iso-ccitt'),
+ '2.5': ('directory services (X.500)', 'X500'),
+ '2.5.1.5': ('Selected Attribute Types', 'selected-attribute-types'),
+ '2.5.1.5.55': ('clearance', ),
+ '2.5.4': ('X509', ),
+ '2.5.4.3': ('commonName', 'CN'),
+ '2.5.4.4': ('surname', 'SN'),
+ '2.5.4.5': ('serialNumber', ),
+ '2.5.4.6': ('countryName', 'C'),
+ '2.5.4.7': ('localityName', 'L'),
+ '2.5.4.8': ('stateOrProvinceName', 'ST'),
+ '2.5.4.9': ('streetAddress', 'street'),
+ '2.5.4.10': ('organizationName', 'O'),
+ '2.5.4.11': ('organizationalUnitName', 'OU'),
+ '2.5.4.12': ('title', 'title'),
+ '2.5.4.13': ('description', ),
+ '2.5.4.14': ('searchGuide', ),
+ '2.5.4.15': ('businessCategory', ),
+ '2.5.4.16': ('postalAddress', ),
+ '2.5.4.17': ('postalCode', ),
+ '2.5.4.18': ('postOfficeBox', ),
+ '2.5.4.19': ('physicalDeliveryOfficeName', ),
+ '2.5.4.20': ('telephoneNumber', ),
+ '2.5.4.21': ('telexNumber', ),
+ '2.5.4.22': ('teletexTerminalIdentifier', ),
+ '2.5.4.23': ('facsimileTelephoneNumber', ),
+ '2.5.4.24': ('x121Address', ),
+ '2.5.4.25': ('internationaliSDNNumber', ),
+ '2.5.4.26': ('registeredAddress', ),
+ '2.5.4.27': ('destinationIndicator', ),
+ '2.5.4.28': ('preferredDeliveryMethod', ),
+ '2.5.4.29': ('presentationAddress', ),
+ '2.5.4.30': ('supportedApplicationContext', ),
+ '2.5.4.31': ('member', ),
+ '2.5.4.32': ('owner', ),
+ '2.5.4.33': ('roleOccupant', ),
+ '2.5.4.34': ('seeAlso', ),
+ '2.5.4.35': ('userPassword', ),
+ '2.5.4.36': ('userCertificate', ),
+ '2.5.4.37': ('cACertificate', ),
+ '2.5.4.38': ('authorityRevocationList', ),
+ '2.5.4.39': ('certificateRevocationList', ),
+ '2.5.4.40': ('crossCertificatePair', ),
+ '2.5.4.41': ('name', 'name'),
+ '2.5.4.42': ('givenName', 'GN'),
+ '2.5.4.43': ('initials', 'initials'),
+ '2.5.4.44': ('generationQualifier', ),
+ '2.5.4.45': ('x500UniqueIdentifier', ),
+ '2.5.4.46': ('dnQualifier', 'dnQualifier'),
+ '2.5.4.47': ('enhancedSearchGuide', ),
+ '2.5.4.48': ('protocolInformation', ),
+ '2.5.4.49': ('distinguishedName', ),
+ '2.5.4.50': ('uniqueMember', ),
+ '2.5.4.51': ('houseIdentifier', ),
+ '2.5.4.52': ('supportedAlgorithms', ),
+ '2.5.4.53': ('deltaRevocationList', ),
+ '2.5.4.54': ('dmdName', ),
+ '2.5.4.65': ('pseudonym', ),
+ '2.5.4.72': ('role', 'role'),
+ '2.5.4.97': ('organizationIdentifier', ),
+ '2.5.4.98': ('countryCode3c', 'c3'),
+ '2.5.4.99': ('countryCode3n', 'n3'),
+ '2.5.4.100': ('dnsName', ),
+ '2.5.8': ('directory services - algorithms', 'X500algorithms'),
+ '2.5.8.1.1': ('rsa', 'RSA'),
+ '2.5.8.3.100': ('mdc2WithRSA', 'RSA-MDC2'),
+ '2.5.8.3.101': ('mdc2', 'MDC2'),
+ '2.5.29': ('id-ce', ),
+ '2.5.29.9': ('X509v3 Subject Directory Attributes', 'subjectDirectoryAttributes'),
+ '2.5.29.14': ('X509v3 Subject Key Identifier', 'subjectKeyIdentifier'),
+ '2.5.29.15': ('X509v3 Key Usage', 'keyUsage'),
+ '2.5.29.16': ('X509v3 Private Key Usage Period', 'privateKeyUsagePeriod'),
+ '2.5.29.17': ('X509v3 Subject Alternative Name', 'subjectAltName'),
+ '2.5.29.18': ('X509v3 Issuer Alternative Name', 'issuerAltName'),
+ '2.5.29.19': ('X509v3 Basic Constraints', 'basicConstraints'),
+ '2.5.29.20': ('X509v3 CRL Number', 'crlNumber'),
+ '2.5.29.21': ('X509v3 CRL Reason Code', 'CRLReason'),
+ '2.5.29.23': ('Hold Instruction Code', 'holdInstructionCode'),
+ '2.5.29.24': ('Invalidity Date', 'invalidityDate'),
+ '2.5.29.27': ('X509v3 Delta CRL Indicator', 'deltaCRL'),
+ '2.5.29.28': ('X509v3 Issuing Distribution Point', 'issuingDistributionPoint'),
+ '2.5.29.29': ('X509v3 Certificate Issuer', 'certificateIssuer'),
+ '2.5.29.30': ('X509v3 Name Constraints', 'nameConstraints'),
+ '2.5.29.31': ('X509v3 CRL Distribution Points', 'crlDistributionPoints'),
+ '2.5.29.32': ('X509v3 Certificate Policies', 'certificatePolicies'),
+ '2.5.29.32.0': ('X509v3 Any Policy', 'anyPolicy'),
+ '2.5.29.33': ('X509v3 Policy Mappings', 'policyMappings'),
+ '2.5.29.35': ('X509v3 Authority Key Identifier', 'authorityKeyIdentifier'),
+ '2.5.29.36': ('X509v3 Policy Constraints', 'policyConstraints'),
+ '2.5.29.37': ('X509v3 Extended Key Usage', 'extendedKeyUsage'),
+ '2.5.29.37.0': ('Any Extended Key Usage', 'anyExtendedKeyUsage'),
+ '2.5.29.46': ('X509v3 Freshest CRL', 'freshestCRL'),
+ '2.5.29.54': ('X509v3 Inhibit Any Policy', 'inhibitAnyPolicy'),
+ '2.5.29.55': ('X509v3 AC Targeting', 'targetInformation'),
+ '2.5.29.56': ('X509v3 No Revocation Available', 'noRevAvail'),
+ '2.16.840.1.101.3': ('csor', ),
+ '2.16.840.1.101.3.4': ('nistAlgorithms', ),
+ '2.16.840.1.101.3.4.1': ('aes', ),
+ '2.16.840.1.101.3.4.1.1': ('aes-128-ecb', 'AES-128-ECB'),
+ '2.16.840.1.101.3.4.1.2': ('aes-128-cbc', 'AES-128-CBC'),
+ '2.16.840.1.101.3.4.1.3': ('aes-128-ofb', 'AES-128-OFB'),
+ '2.16.840.1.101.3.4.1.4': ('aes-128-cfb', 'AES-128-CFB'),
+ '2.16.840.1.101.3.4.1.5': ('id-aes128-wrap', ),
+ '2.16.840.1.101.3.4.1.6': ('aes-128-gcm', 'id-aes128-GCM'),
+ '2.16.840.1.101.3.4.1.7': ('aes-128-ccm', 'id-aes128-CCM'),
+ '2.16.840.1.101.3.4.1.8': ('id-aes128-wrap-pad', ),
+ '2.16.840.1.101.3.4.1.21': ('aes-192-ecb', 'AES-192-ECB'),
+ '2.16.840.1.101.3.4.1.22': ('aes-192-cbc', 'AES-192-CBC'),
+ '2.16.840.1.101.3.4.1.23': ('aes-192-ofb', 'AES-192-OFB'),
+ '2.16.840.1.101.3.4.1.24': ('aes-192-cfb', 'AES-192-CFB'),
+ '2.16.840.1.101.3.4.1.25': ('id-aes192-wrap', ),
+ '2.16.840.1.101.3.4.1.26': ('aes-192-gcm', 'id-aes192-GCM'),
+ '2.16.840.1.101.3.4.1.27': ('aes-192-ccm', 'id-aes192-CCM'),
+ '2.16.840.1.101.3.4.1.28': ('id-aes192-wrap-pad', ),
+ '2.16.840.1.101.3.4.1.41': ('aes-256-ecb', 'AES-256-ECB'),
+ '2.16.840.1.101.3.4.1.42': ('aes-256-cbc', 'AES-256-CBC'),
+ '2.16.840.1.101.3.4.1.43': ('aes-256-ofb', 'AES-256-OFB'),
+ '2.16.840.1.101.3.4.1.44': ('aes-256-cfb', 'AES-256-CFB'),
+ '2.16.840.1.101.3.4.1.45': ('id-aes256-wrap', ),
+ '2.16.840.1.101.3.4.1.46': ('aes-256-gcm', 'id-aes256-GCM'),
+ '2.16.840.1.101.3.4.1.47': ('aes-256-ccm', 'id-aes256-CCM'),
+ '2.16.840.1.101.3.4.1.48': ('id-aes256-wrap-pad', ),
+ '2.16.840.1.101.3.4.2': ('nist_hashalgs', ),
+ '2.16.840.1.101.3.4.2.1': ('sha256', 'SHA256'),
+ '2.16.840.1.101.3.4.2.2': ('sha384', 'SHA384'),
+ '2.16.840.1.101.3.4.2.3': ('sha512', 'SHA512'),
+ '2.16.840.1.101.3.4.2.4': ('sha224', 'SHA224'),
+ '2.16.840.1.101.3.4.2.5': ('sha512-224', 'SHA512-224'),
+ '2.16.840.1.101.3.4.2.6': ('sha512-256', 'SHA512-256'),
+ '2.16.840.1.101.3.4.2.7': ('sha3-224', 'SHA3-224'),
+ '2.16.840.1.101.3.4.2.8': ('sha3-256', 'SHA3-256'),
+ '2.16.840.1.101.3.4.2.9': ('sha3-384', 'SHA3-384'),
+ '2.16.840.1.101.3.4.2.10': ('sha3-512', 'SHA3-512'),
+ '2.16.840.1.101.3.4.2.11': ('shake128', 'SHAKE128'),
+ '2.16.840.1.101.3.4.2.12': ('shake256', 'SHAKE256'),
+ '2.16.840.1.101.3.4.2.13': ('hmac-sha3-224', 'id-hmacWithSHA3-224'),
+ '2.16.840.1.101.3.4.2.14': ('hmac-sha3-256', 'id-hmacWithSHA3-256'),
+ '2.16.840.1.101.3.4.2.15': ('hmac-sha3-384', 'id-hmacWithSHA3-384'),
+ '2.16.840.1.101.3.4.2.16': ('hmac-sha3-512', 'id-hmacWithSHA3-512'),
+ '2.16.840.1.101.3.4.3': ('dsa_with_sha2', 'sigAlgs'),
+ '2.16.840.1.101.3.4.3.1': ('dsa_with_SHA224', ),
+ '2.16.840.1.101.3.4.3.2': ('dsa_with_SHA256', ),
+ '2.16.840.1.101.3.4.3.3': ('dsa_with_SHA384', 'id-dsa-with-sha384'),
+ '2.16.840.1.101.3.4.3.4': ('dsa_with_SHA512', 'id-dsa-with-sha512'),
+ '2.16.840.1.101.3.4.3.5': ('dsa_with_SHA3-224', 'id-dsa-with-sha3-224'),
+ '2.16.840.1.101.3.4.3.6': ('dsa_with_SHA3-256', 'id-dsa-with-sha3-256'),
+ '2.16.840.1.101.3.4.3.7': ('dsa_with_SHA3-384', 'id-dsa-with-sha3-384'),
+ '2.16.840.1.101.3.4.3.8': ('dsa_with_SHA3-512', 'id-dsa-with-sha3-512'),
+ '2.16.840.1.101.3.4.3.9': ('ecdsa_with_SHA3-224', 'id-ecdsa-with-sha3-224'),
+ '2.16.840.1.101.3.4.3.10': ('ecdsa_with_SHA3-256', 'id-ecdsa-with-sha3-256'),
+ '2.16.840.1.101.3.4.3.11': ('ecdsa_with_SHA3-384', 'id-ecdsa-with-sha3-384'),
+ '2.16.840.1.101.3.4.3.12': ('ecdsa_with_SHA3-512', 'id-ecdsa-with-sha3-512'),
+ '2.16.840.1.101.3.4.3.13': ('RSA-SHA3-224', 'id-rsassa-pkcs1-v1_5-with-sha3-224'),
+ '2.16.840.1.101.3.4.3.14': ('RSA-SHA3-256', 'id-rsassa-pkcs1-v1_5-with-sha3-256'),
+ '2.16.840.1.101.3.4.3.15': ('RSA-SHA3-384', 'id-rsassa-pkcs1-v1_5-with-sha3-384'),
+ '2.16.840.1.101.3.4.3.16': ('RSA-SHA3-512', 'id-rsassa-pkcs1-v1_5-with-sha3-512'),
+ '2.16.840.1.113730': ('Netscape Communications Corp.', 'Netscape'),
+ '2.16.840.1.113730.1': ('Netscape Certificate Extension', 'nsCertExt'),
+ '2.16.840.1.113730.1.1': ('Netscape Cert Type', 'nsCertType'),
+ '2.16.840.1.113730.1.2': ('Netscape Base Url', 'nsBaseUrl'),
+ '2.16.840.1.113730.1.3': ('Netscape Revocation Url', 'nsRevocationUrl'),
+ '2.16.840.1.113730.1.4': ('Netscape CA Revocation Url', 'nsCaRevocationUrl'),
+ '2.16.840.1.113730.1.7': ('Netscape Renewal Url', 'nsRenewalUrl'),
+ '2.16.840.1.113730.1.8': ('Netscape CA Policy Url', 'nsCaPolicyUrl'),
+ '2.16.840.1.113730.1.12': ('Netscape SSL Server Name', 'nsSslServerName'),
+ '2.16.840.1.113730.1.13': ('Netscape Comment', 'nsComment'),
+ '2.16.840.1.113730.2': ('Netscape Data Type', 'nsDataType'),
+ '2.16.840.1.113730.2.5': ('Netscape Certificate Sequence', 'nsCertSequence'),
+ '2.16.840.1.113730.4.1': ('Netscape Server Gated Crypto', 'nsSGC'),
+ '2.23': ('International Organizations', 'international-organizations'),
+ '2.23.42': ('Secure Electronic Transactions', 'id-set'),
+ '2.23.42.0': ('content types', 'set-ctype'),
+ '2.23.42.0.0': ('setct-PANData', ),
+ '2.23.42.0.1': ('setct-PANToken', ),
+ '2.23.42.0.2': ('setct-PANOnly', ),
+ '2.23.42.0.3': ('setct-OIData', ),
+ '2.23.42.0.4': ('setct-PI', ),
+ '2.23.42.0.5': ('setct-PIData', ),
+ '2.23.42.0.6': ('setct-PIDataUnsigned', ),
+ '2.23.42.0.7': ('setct-HODInput', ),
+ '2.23.42.0.8': ('setct-AuthResBaggage', ),
+ '2.23.42.0.9': ('setct-AuthRevReqBaggage', ),
+ '2.23.42.0.10': ('setct-AuthRevResBaggage', ),
+ '2.23.42.0.11': ('setct-CapTokenSeq', ),
+ '2.23.42.0.12': ('setct-PInitResData', ),
+ '2.23.42.0.13': ('setct-PI-TBS', ),
+ '2.23.42.0.14': ('setct-PResData', ),
+ '2.23.42.0.16': ('setct-AuthReqTBS', ),
+ '2.23.42.0.17': ('setct-AuthResTBS', ),
+ '2.23.42.0.18': ('setct-AuthResTBSX', ),
+ '2.23.42.0.19': ('setct-AuthTokenTBS', ),
+ '2.23.42.0.20': ('setct-CapTokenData', ),
+ '2.23.42.0.21': ('setct-CapTokenTBS', ),
+ '2.23.42.0.22': ('setct-AcqCardCodeMsg', ),
+ '2.23.42.0.23': ('setct-AuthRevReqTBS', ),
+ '2.23.42.0.24': ('setct-AuthRevResData', ),
+ '2.23.42.0.25': ('setct-AuthRevResTBS', ),
+ '2.23.42.0.26': ('setct-CapReqTBS', ),
+ '2.23.42.0.27': ('setct-CapReqTBSX', ),
+ '2.23.42.0.28': ('setct-CapResData', ),
+ '2.23.42.0.29': ('setct-CapRevReqTBS', ),
+ '2.23.42.0.30': ('setct-CapRevReqTBSX', ),
+ '2.23.42.0.31': ('setct-CapRevResData', ),
+ '2.23.42.0.32': ('setct-CredReqTBS', ),
+ '2.23.42.0.33': ('setct-CredReqTBSX', ),
+ '2.23.42.0.34': ('setct-CredResData', ),
+ '2.23.42.0.35': ('setct-CredRevReqTBS', ),
+ '2.23.42.0.36': ('setct-CredRevReqTBSX', ),
+ '2.23.42.0.37': ('setct-CredRevResData', ),
+ '2.23.42.0.38': ('setct-PCertReqData', ),
+ '2.23.42.0.39': ('setct-PCertResTBS', ),
+ '2.23.42.0.40': ('setct-BatchAdminReqData', ),
+ '2.23.42.0.41': ('setct-BatchAdminResData', ),
+ '2.23.42.0.42': ('setct-CardCInitResTBS', ),
+ '2.23.42.0.43': ('setct-MeAqCInitResTBS', ),
+ '2.23.42.0.44': ('setct-RegFormResTBS', ),
+ '2.23.42.0.45': ('setct-CertReqData', ),
+ '2.23.42.0.46': ('setct-CertReqTBS', ),
+ '2.23.42.0.47': ('setct-CertResData', ),
+ '2.23.42.0.48': ('setct-CertInqReqTBS', ),
+ '2.23.42.0.49': ('setct-ErrorTBS', ),
+ '2.23.42.0.50': ('setct-PIDualSignedTBE', ),
+ '2.23.42.0.51': ('setct-PIUnsignedTBE', ),
+ '2.23.42.0.52': ('setct-AuthReqTBE', ),
+ '2.23.42.0.53': ('setct-AuthResTBE', ),
+ '2.23.42.0.54': ('setct-AuthResTBEX', ),
+ '2.23.42.0.55': ('setct-AuthTokenTBE', ),
+ '2.23.42.0.56': ('setct-CapTokenTBE', ),
+ '2.23.42.0.57': ('setct-CapTokenTBEX', ),
+ '2.23.42.0.58': ('setct-AcqCardCodeMsgTBE', ),
+ '2.23.42.0.59': ('setct-AuthRevReqTBE', ),
+ '2.23.42.0.60': ('setct-AuthRevResTBE', ),
+ '2.23.42.0.61': ('setct-AuthRevResTBEB', ),
+ '2.23.42.0.62': ('setct-CapReqTBE', ),
+ '2.23.42.0.63': ('setct-CapReqTBEX', ),
+ '2.23.42.0.64': ('setct-CapResTBE', ),
+ '2.23.42.0.65': ('setct-CapRevReqTBE', ),
+ '2.23.42.0.66': ('setct-CapRevReqTBEX', ),
+ '2.23.42.0.67': ('setct-CapRevResTBE', ),
+ '2.23.42.0.68': ('setct-CredReqTBE', ),
+ '2.23.42.0.69': ('setct-CredReqTBEX', ),
+ '2.23.42.0.70': ('setct-CredResTBE', ),
+ '2.23.42.0.71': ('setct-CredRevReqTBE', ),
+ '2.23.42.0.72': ('setct-CredRevReqTBEX', ),
+ '2.23.42.0.73': ('setct-CredRevResTBE', ),
+ '2.23.42.0.74': ('setct-BatchAdminReqTBE', ),
+ '2.23.42.0.75': ('setct-BatchAdminResTBE', ),
+ '2.23.42.0.76': ('setct-RegFormReqTBE', ),
+ '2.23.42.0.77': ('setct-CertReqTBE', ),
+ '2.23.42.0.78': ('setct-CertReqTBEX', ),
+ '2.23.42.0.79': ('setct-CertResTBE', ),
+ '2.23.42.0.80': ('setct-CRLNotificationTBS', ),
+ '2.23.42.0.81': ('setct-CRLNotificationResTBS', ),
+ '2.23.42.0.82': ('setct-BCIDistributionTBS', ),
+ '2.23.42.1': ('message extensions', 'set-msgExt'),
+ '2.23.42.1.1': ('generic cryptogram', 'setext-genCrypt'),
+ '2.23.42.1.3': ('merchant initiated auth', 'setext-miAuth'),
+ '2.23.42.1.4': ('setext-pinSecure', ),
+ '2.23.42.1.5': ('setext-pinAny', ),
+ '2.23.42.1.7': ('setext-track2', ),
+ '2.23.42.1.8': ('additional verification', 'setext-cv'),
+ '2.23.42.3': ('set-attr', ),
+ '2.23.42.3.0': ('setAttr-Cert', ),
+ '2.23.42.3.0.0': ('set-rootKeyThumb', ),
+ '2.23.42.3.0.1': ('set-addPolicy', ),
+ '2.23.42.3.1': ('payment gateway capabilities', 'setAttr-PGWYcap'),
+ '2.23.42.3.2': ('setAttr-TokenType', ),
+ '2.23.42.3.2.1': ('setAttr-Token-EMV', ),
+ '2.23.42.3.2.2': ('setAttr-Token-B0Prime', ),
+ '2.23.42.3.3': ('issuer capabilities', 'setAttr-IssCap'),
+ '2.23.42.3.3.3': ('setAttr-IssCap-CVM', ),
+ '2.23.42.3.3.3.1': ('generate cryptogram', 'setAttr-GenCryptgrm'),
+ '2.23.42.3.3.4': ('setAttr-IssCap-T2', ),
+ '2.23.42.3.3.4.1': ('encrypted track 2', 'setAttr-T2Enc'),
+ '2.23.42.3.3.4.2': ('cleartext track 2', 'setAttr-T2cleartxt'),
+ '2.23.42.3.3.5': ('setAttr-IssCap-Sig', ),
+ '2.23.42.3.3.5.1': ('ICC or token signature', 'setAttr-TokICCsig'),
+ '2.23.42.3.3.5.2': ('secure device signature', 'setAttr-SecDevSig'),
+ '2.23.42.5': ('set-policy', ),
+ '2.23.42.5.0': ('set-policy-root', ),
+ '2.23.42.7': ('certificate extensions', 'set-certExt'),
+ '2.23.42.7.0': ('setCext-hashedRoot', ),
+ '2.23.42.7.1': ('setCext-certType', ),
+ '2.23.42.7.2': ('setCext-merchData', ),
+ '2.23.42.7.3': ('setCext-cCertRequired', ),
+ '2.23.42.7.4': ('setCext-tunneling', ),
+ '2.23.42.7.5': ('setCext-setExt', ),
+ '2.23.42.7.6': ('setCext-setQualf', ),
+ '2.23.42.7.7': ('setCext-PGWYcapabilities', ),
+ '2.23.42.7.8': ('setCext-TokenIdentifier', ),
+ '2.23.42.7.9': ('setCext-Track2Data', ),
+ '2.23.42.7.10': ('setCext-TokenType', ),
+ '2.23.42.7.11': ('setCext-IssuerCapabilities', ),
+ '2.23.42.8': ('set-brand', ),
+ '2.23.42.8.1': ('set-brand-IATA-ATA', ),
+ '2.23.42.8.4': ('set-brand-Visa', ),
+ '2.23.42.8.5': ('set-brand-MasterCard', ),
+ '2.23.42.8.30': ('set-brand-Diners', ),
+ '2.23.42.8.34': ('set-brand-AmericanExpress', ),
+ '2.23.42.8.35': ('set-brand-JCB', ),
+ '2.23.42.8.6011': ('set-brand-Novus', ),
+ '2.23.43': ('wap', ),
+ '2.23.43.1': ('wap-wsg', ),
+ '2.23.43.1.4': ('wap-wsg-idm-ecid', ),
+ '2.23.43.1.4.1': ('wap-wsg-idm-ecid-wtls1', ),
+ '2.23.43.1.4.3': ('wap-wsg-idm-ecid-wtls3', ),
+ '2.23.43.1.4.4': ('wap-wsg-idm-ecid-wtls4', ),
+ '2.23.43.1.4.5': ('wap-wsg-idm-ecid-wtls5', ),
+ '2.23.43.1.4.6': ('wap-wsg-idm-ecid-wtls6', ),
+ '2.23.43.1.4.7': ('wap-wsg-idm-ecid-wtls7', ),
+ '2.23.43.1.4.8': ('wap-wsg-idm-ecid-wtls8', ),
+ '2.23.43.1.4.9': ('wap-wsg-idm-ecid-wtls9', ),
+ '2.23.43.1.4.10': ('wap-wsg-idm-ecid-wtls10', ),
+ '2.23.43.1.4.11': ('wap-wsg-idm-ecid-wtls11', ),
+ '2.23.43.1.4.12': ('wap-wsg-idm-ecid-wtls12', ),
+}
+# #####################################################################################
+# #####################################################################################
+
+_OID_LOOKUP = dict()
+_NORMALIZE_NAMES = dict()
+_NORMALIZE_NAMES_SHORT = dict()
+
+for dotted, names in _OID_MAP.items():
+ for name in names:
+ if name in _NORMALIZE_NAMES and _OID_LOOKUP[name] != dotted:
+ raise AssertionError(
+ 'Name collision during setup: "{0}" for OIDs {1} and {2}'
+ .format(name, dotted, _OID_LOOKUP[name])
+ )
+ _NORMALIZE_NAMES[name] = names[0]
+ _NORMALIZE_NAMES_SHORT[name] = names[-1]
+ _OID_LOOKUP[name] = dotted
+for alias, original in [('userID', 'userId')]:
+ if alias in _NORMALIZE_NAMES:
+ raise AssertionError(
+ 'Name collision during adding aliases: "{0}" (alias for "{1}") is already mapped to OID {2}'
+ .format(alias, original, _OID_LOOKUP[alias])
+ )
+ _NORMALIZE_NAMES[alias] = original
+ _NORMALIZE_NAMES_SHORT[alias] = _NORMALIZE_NAMES_SHORT[original]
+ _OID_LOOKUP[alias] = _OID_LOOKUP[original]
+
+
+def pyopenssl_normalize_name(name, short=False):
+ nid = OpenSSL._util.lib.OBJ_txt2nid(to_bytes(name))
+ if nid != 0:
+ b_name = OpenSSL._util.lib.OBJ_nid2ln(nid)
+ name = to_text(OpenSSL._util.ffi.string(b_name))
+ if short:
+ return _NORMALIZE_NAMES_SHORT.get(name, name)
+ else:
+ return _NORMALIZE_NAMES.get(name, name)
+
+
+# #####################################################################################
+# #####################################################################################
+# # This excerpt is dual licensed under the terms of the Apache License, Version
+# # 2.0, and the BSD License. See the LICENSE file at
+# # https://github.com/pyca/cryptography/blob/master/LICENSE for complete details.
+# #
+# # Adapted from cryptography's hazmat/backends/openssl/decode_asn1.py
+# #
+# # Copyright (c) 2015, 2016 Paul Kehrer (@reaperhulk)
+# # Copyright (c) 2017 Fraser Tweedale (@frasertweedale)
+# #
+# # Relevant commits from cryptography project (https://github.com/pyca/cryptography):
+# # pyca/cryptography@719d536dd691e84e208534798f2eb4f82aaa2e07
+# # pyca/cryptography@5ab6d6a5c05572bd1c75f05baf264a2d0001894a
+# # pyca/cryptography@2e776e20eb60378e0af9b7439000d0e80da7c7e3
+# # pyca/cryptography@fb309ed24647d1be9e319b61b1f2aa8ebb87b90b
+# # pyca/cryptography@2917e460993c475c72d7146c50dc3bbc2414280d
+# # pyca/cryptography@3057f91ea9a05fb593825006d87a391286a4d828
+# # pyca/cryptography@d607dd7e5bc5c08854ec0c9baff70ba4a35be36f
+def _obj2txt(openssl_lib, openssl_ffi, obj):
+ # Set to 80 on the recommendation of
+ # https://www.openssl.org/docs/crypto/OBJ_nid2ln.html#return_values
+ #
+ # But OIDs longer than this occur in real life (e.g. Active
+ # Directory makes some very long OIDs). So we need to detect
+ # and properly handle the case where the default buffer is not
+ # big enough.
+ #
+ buf_len = 80
+ buf = openssl_ffi.new("char[]", buf_len)
+
+ # 'res' is the number of bytes that *would* be written if the
+ # buffer is large enough. If 'res' > buf_len - 1, we need to
+ # alloc a big-enough buffer and go again.
+ res = openssl_lib.OBJ_obj2txt(buf, buf_len, obj, 1)
+ if res > buf_len - 1: # account for terminating null byte
+ buf_len = res + 1
+ buf = openssl_ffi.new("char[]", buf_len)
+ res = openssl_lib.OBJ_obj2txt(buf, buf_len, obj, 1)
+ return openssl_ffi.buffer(buf, res)[:].decode()
+# #####################################################################################
+# #####################################################################################
+
+
+def cryptography_get_extensions_from_cert(cert):
+ # Since cryptography won't give us the DER value for an extension
+ # (that is only stored for unrecognized extensions), we have to re-do
+ # the extension parsing outselves.
+ result = dict()
+ backend = cert._backend
+ x509_obj = cert._x509
+
+ for i in range(backend._lib.X509_get_ext_count(x509_obj)):
+ ext = backend._lib.X509_get_ext(x509_obj, i)
+ if ext == backend._ffi.NULL:
+ continue
+ crit = backend._lib.X509_EXTENSION_get_critical(ext)
+ data = backend._lib.X509_EXTENSION_get_data(ext)
+ backend.openssl_assert(data != backend._ffi.NULL)
+ der = backend._ffi.buffer(data.data, data.length)[:]
+ entry = dict(
+ critical=(crit == 1),
+ value=base64.b64encode(der),
+ )
+ oid = _obj2txt(backend._lib, backend._ffi, backend._lib.X509_EXTENSION_get_object(ext))
+ result[oid] = entry
+ return result
+
+
+def cryptography_get_extensions_from_csr(csr):
+ # Since cryptography won't give us the DER value for an extension
+ # (that is only stored for unrecognized extensions), we have to re-do
+ # the extension parsing outselves.
+ result = dict()
+ backend = csr._backend
+
+ extensions = backend._lib.X509_REQ_get_extensions(csr._x509_req)
+ extensions = backend._ffi.gc(
+ extensions,
+ lambda ext: backend._lib.sk_X509_EXTENSION_pop_free(
+ ext,
+ backend._ffi.addressof(backend._lib._original_lib, "X509_EXTENSION_free")
+ )
+ )
+
+ for i in range(backend._lib.sk_X509_EXTENSION_num(extensions)):
+ ext = backend._lib.sk_X509_EXTENSION_value(extensions, i)
+ if ext == backend._ffi.NULL:
+ continue
+ crit = backend._lib.X509_EXTENSION_get_critical(ext)
+ data = backend._lib.X509_EXTENSION_get_data(ext)
+ backend.openssl_assert(data != backend._ffi.NULL)
+ der = backend._ffi.buffer(data.data, data.length)[:]
+ entry = dict(
+ critical=(crit == 1),
+ value=base64.b64encode(der),
+ )
+ oid = _obj2txt(backend._lib, backend._ffi, backend._lib.X509_EXTENSION_get_object(ext))
+ result[oid] = entry
+ return result
+
+
+def pyopenssl_get_extensions_from_cert(cert):
+ # While pyOpenSSL allows us to get an extension's DER value, it won't
+ # give us the dotted string for an OID. So we have to do some magic to
+ # get hold of it.
+ result = dict()
+ ext_count = cert.get_extension_count()
+ for i in range(0, ext_count):
+ ext = cert.get_extension(i)
+ entry = dict(
+ critical=bool(ext.get_critical()),
+ value=base64.b64encode(ext.get_data()),
+ )
+ oid = _obj2txt(
+ OpenSSL._util.lib,
+ OpenSSL._util.ffi,
+ OpenSSL._util.lib.X509_EXTENSION_get_object(ext._extension)
+ )
+ # This could also be done a bit simpler:
+ #
+ # oid = _obj2txt(OpenSSL._util.lib, OpenSSL._util.ffi, OpenSSL._util.lib.OBJ_nid2obj(ext._nid))
+ #
+ # Unfortunately this gives the wrong result in case the linked OpenSSL
+ # doesn't know the OID. That's why we have to get the OID dotted string
+ # similarly to how cryptography does it.
+ result[oid] = entry
+ return result
+
+
+def pyopenssl_get_extensions_from_csr(csr):
+ # While pyOpenSSL allows us to get an extension's DER value, it won't
+ # give us the dotted string for an OID. So we have to do some magic to
+ # get hold of it.
+ result = dict()
+ for ext in csr.get_extensions():
+ entry = dict(
+ critical=bool(ext.get_critical()),
+ value=base64.b64encode(ext.get_data()),
+ )
+ oid = _obj2txt(
+ OpenSSL._util.lib,
+ OpenSSL._util.ffi,
+ OpenSSL._util.lib.X509_EXTENSION_get_object(ext._extension)
+ )
+ # This could also be done a bit simpler:
+ #
+ # oid = _obj2txt(OpenSSL._util.lib, OpenSSL._util.ffi, OpenSSL._util.lib.OBJ_nid2obj(ext._nid))
+ #
+ # Unfortunately this gives the wrong result in case the linked OpenSSL
+ # doesn't know the OID. That's why we have to get the OID dotted string
+ # similarly to how cryptography does it.
+ result[oid] = entry
+ return result
+
+
+def cryptography_name_to_oid(name):
+ dotted = _OID_LOOKUP.get(name)
+ if dotted is None:
+ raise OpenSSLObjectError('Cannot find OID for "{0}"'.format(name))
+ return x509.oid.ObjectIdentifier(dotted)
+
+
+def cryptography_oid_to_name(oid, short=False):
+ dotted_string = oid.dotted_string
+ names = _OID_MAP.get(dotted_string)
+ name = names[0] if names else oid._name
+ if short:
+ return _NORMALIZE_NAMES_SHORT.get(name, name)
+ else:
+ return _NORMALIZE_NAMES.get(name, name)
+
+
+def cryptography_get_name(name):
+ '''
+ Given a name string, returns a cryptography x509.Name object.
+ Raises an OpenSSLObjectError if the name is unknown or cannot be parsed.
+ '''
+ try:
+ if name.startswith('DNS:'):
+ return x509.DNSName(to_text(name[4:]))
+ if name.startswith('IP:'):
+ return x509.IPAddress(ipaddress.ip_address(to_text(name[3:])))
+ if name.startswith('email:'):
+ return x509.RFC822Name(to_text(name[6:]))
+ if name.startswith('URI:'):
+ return x509.UniformResourceIdentifier(to_text(name[4:]))
+ except Exception as e:
+ raise OpenSSLObjectError('Cannot parse Subject Alternative Name "{0}": {1}'.format(name, e))
+ if ':' not in name:
+ raise OpenSSLObjectError('Cannot parse Subject Alternative Name "{0}" (forgot "DNS:" prefix?)'.format(name))
+ raise OpenSSLObjectError('Cannot parse Subject Alternative Name "{0}" (potentially unsupported by cryptography backend)'.format(name))
+
+
+def _get_hex(bytesstr):
+ if bytesstr is None:
+ return bytesstr
+ data = binascii.hexlify(bytesstr)
+ data = to_text(b':'.join(data[i:i + 2] for i in range(0, len(data), 2)))
+ return data
+
+
+def cryptography_decode_name(name):
+ '''
+ Given a cryptography x509.Name object, returns a string.
+ Raises an OpenSSLObjectError if the name is not supported.
+ '''
+ if isinstance(name, x509.DNSName):
+ return 'DNS:{0}'.format(name.value)
+ if isinstance(name, x509.IPAddress):
+ return 'IP:{0}'.format(name.value.compressed)
+ if isinstance(name, x509.RFC822Name):
+ return 'email:{0}'.format(name.value)
+ if isinstance(name, x509.UniformResourceIdentifier):
+ return 'URI:{0}'.format(name.value)
+ if isinstance(name, x509.DirectoryName):
+ # FIXME: test
+ return 'DirName:' + ''.join(['/{0}:{1}'.format(attribute.oid._name, attribute.value) for attribute in name.value])
+ if isinstance(name, x509.RegisteredID):
+ # FIXME: test
+ return 'RegisteredID:{0}'.format(name.value)
+ if isinstance(name, x509.OtherName):
+ # FIXME: test
+ return '{0}:{1}'.format(name.type_id.dotted_string, _get_hex(name.value))
+ raise OpenSSLObjectError('Cannot decode name "{0}"'.format(name))
+
+
+def _cryptography_get_keyusage(usage):
+ '''
+ Given a key usage identifier string, returns the parameter name used by cryptography's x509.KeyUsage().
+ Raises an OpenSSLObjectError if the identifier is unknown.
+ '''
+ if usage in ('Digital Signature', 'digitalSignature'):
+ return 'digital_signature'
+ if usage in ('Non Repudiation', 'nonRepudiation'):
+ return 'content_commitment'
+ if usage in ('Key Encipherment', 'keyEncipherment'):
+ return 'key_encipherment'
+ if usage in ('Data Encipherment', 'dataEncipherment'):
+ return 'data_encipherment'
+ if usage in ('Key Agreement', 'keyAgreement'):
+ return 'key_agreement'
+ if usage in ('Certificate Sign', 'keyCertSign'):
+ return 'key_cert_sign'
+ if usage in ('CRL Sign', 'cRLSign'):
+ return 'crl_sign'
+ if usage in ('Encipher Only', 'encipherOnly'):
+ return 'encipher_only'
+ if usage in ('Decipher Only', 'decipherOnly'):
+ return 'decipher_only'
+ raise OpenSSLObjectError('Unknown key usage "{0}"'.format(usage))
+
+
+def cryptography_parse_key_usage_params(usages):
+ '''
+ Given a list of key usage identifier strings, returns the parameters for cryptography's x509.KeyUsage().
+ Raises an OpenSSLObjectError if an identifier is unknown.
+ '''
+ params = dict(
+ digital_signature=False,
+ content_commitment=False,
+ key_encipherment=False,
+ data_encipherment=False,
+ key_agreement=False,
+ key_cert_sign=False,
+ crl_sign=False,
+ encipher_only=False,
+ decipher_only=False,
+ )
+ for usage in usages:
+ params[_cryptography_get_keyusage(usage)] = True
+ return params
+
+
+def cryptography_get_basic_constraints(constraints):
+ '''
+ Given a list of constraints, returns a tuple (ca, path_length).
+ Raises an OpenSSLObjectError if a constraint is unknown or cannot be parsed.
+ '''
+ ca = False
+ path_length = None
+ if constraints:
+ for constraint in constraints:
+ if constraint.startswith('CA:'):
+ if constraint == 'CA:TRUE':
+ ca = True
+ elif constraint == 'CA:FALSE':
+ ca = False
+ else:
+ raise OpenSSLObjectError('Unknown basic constraint value "{0}" for CA'.format(constraint[3:]))
+ elif constraint.startswith('pathlen:'):
+ v = constraint[len('pathlen:'):]
+ try:
+ path_length = int(v)
+ except Exception as e:
+ raise OpenSSLObjectError('Cannot parse path length constraint "{0}" ({1})'.format(v, e))
+ else:
+ raise OpenSSLObjectError('Unknown basic constraint "{0}"'.format(constraint))
+ return ca, path_length
+
+
+def binary_exp_mod(f, e, m):
+ '''Computes f^e mod m in O(log e) multiplications modulo m.'''
+ # Compute len_e = floor(log_2(e))
+ len_e = -1
+ x = e
+ while x > 0:
+ x >>= 1
+ len_e += 1
+ # Compute f**e mod m
+ result = 1
+ for k in range(len_e, -1, -1):
+ result = (result * result) % m
+ if ((e >> k) & 1) != 0:
+ result = (result * f) % m
+ return result
+
+
+def simple_gcd(a, b):
+ '''Compute GCD of its two inputs.'''
+ while b != 0:
+ a, b = b, a % b
+ return a
+
+
+def quick_is_not_prime(n):
+ '''Does some quick checks to see if we can poke a hole into the primality of n.
+
+ A result of `False` does **not** mean that the number is prime; it just means
+ that we couldn't detect quickly whether it is not prime.
+ '''
+ if n <= 2:
+ return True
+ # The constant in the next line is the product of all primes < 200
+ if simple_gcd(n, 7799922041683461553249199106329813876687996789903550945093032474868511536164700810) > 1:
+ return True
+ # TODO: maybe do some iterations of Miller-Rabin to increase confidence
+ # (https://en.wikipedia.org/wiki/Miller%E2%80%93Rabin_primality_test)
+ return False
+
+
+python_version = (sys.version_info[0], sys.version_info[1])
+if python_version >= (2, 7) or python_version >= (3, 1):
+ # Ansible still supports Python 2.6 on remote nodes
+ def count_bits(no):
+ no = abs(no)
+ if no == 0:
+ return 0
+ return no.bit_length()
+else:
+ # Slow, but works
+ def count_bits(no):
+ no = abs(no)
+ count = 0
+ while no > 0:
+ no >>= 1
+ count += 1
+ return count
+
+
+PEM_START = '-----BEGIN '
+PEM_END = '-----'
+PKCS8_PRIVATEKEY_NAMES = ('PRIVATE KEY', 'ENCRYPTED PRIVATE KEY')
+PKCS1_PRIVATEKEY_SUFFIX = ' PRIVATE KEY'
+
+
+def identify_private_key_format(content):
+ '''Given the contents of a private key file, identifies its format.'''
+ # See https://github.com/openssl/openssl/blob/master/crypto/pem/pem_pkey.c#L40-L85
+ # (PEM_read_bio_PrivateKey)
+ # and https://github.com/openssl/openssl/blob/master/include/openssl/pem.h#L46-L47
+ # (PEM_STRING_PKCS8, PEM_STRING_PKCS8INF)
+ try:
+ lines = content.decode('utf-8').splitlines(False)
+ if lines[0].startswith(PEM_START) and lines[0].endswith(PEM_END) and len(lines[0]) > len(PEM_START) + len(PEM_END):
+ name = lines[0][len(PEM_START):-len(PEM_END)]
+ if name in PKCS8_PRIVATEKEY_NAMES:
+ return 'pkcs8'
+ if len(name) > len(PKCS1_PRIVATEKEY_SUFFIX) and name.endswith(PKCS1_PRIVATEKEY_SUFFIX):
+ return 'pkcs1'
+ return 'unknown-pem'
+ except UnicodeDecodeError:
+ pass
+ return 'raw'
+
+
+def cryptography_key_needs_digest_for_signing(key):
+ '''Tests whether the given private key requires a digest algorithm for signing.
+
+ Ed25519 and Ed448 keys do not; they need None to be passed as the digest algorithm.
+ '''
+ if CRYPTOGRAPHY_HAS_ED25519 and isinstance(key, cryptography.hazmat.primitives.asymmetric.ed25519.Ed25519PrivateKey):
+ return False
+ if CRYPTOGRAPHY_HAS_ED448 and isinstance(key, cryptography.hazmat.primitives.asymmetric.ed448.Ed448PrivateKey):
+ return False
+ return True
+
+
+def cryptography_compare_public_keys(key1, key2):
+ '''Tests whether two public keys are the same.
+
+ Needs special logic for Ed25519 and Ed448 keys, since they do not have public_numbers().
+ '''
+ if CRYPTOGRAPHY_HAS_ED25519:
+ a = isinstance(key1, cryptography.hazmat.primitives.asymmetric.ed25519.Ed25519PublicKey)
+ b = isinstance(key2, cryptography.hazmat.primitives.asymmetric.ed25519.Ed25519PublicKey)
+ if a or b:
+ if not a or not b:
+ return False
+ a = key1.public_bytes(serialization.Encoding.Raw, serialization.PublicFormat.Raw)
+ b = key2.public_bytes(serialization.Encoding.Raw, serialization.PublicFormat.Raw)
+ return a == b
+ if CRYPTOGRAPHY_HAS_ED448:
+ a = isinstance(key1, cryptography.hazmat.primitives.asymmetric.ed448.Ed448PublicKey)
+ b = isinstance(key2, cryptography.hazmat.primitives.asymmetric.ed448.Ed448PublicKey)
+ if a or b:
+ if not a or not b:
+ return False
+ a = key1.public_bytes(serialization.Encoding.Raw, serialization.PublicFormat.Raw)
+ b = key2.public_bytes(serialization.Encoding.Raw, serialization.PublicFormat.Raw)
+ return a == b
+ return key1.public_numbers() == key2.public_numbers()
+
+
+if HAS_CRYPTOGRAPHY:
+ REVOCATION_REASON_MAP = {
+ 'unspecified': x509.ReasonFlags.unspecified,
+ 'key_compromise': x509.ReasonFlags.key_compromise,
+ 'ca_compromise': x509.ReasonFlags.ca_compromise,
+ 'affiliation_changed': x509.ReasonFlags.affiliation_changed,
+ 'superseded': x509.ReasonFlags.superseded,
+ 'cessation_of_operation': x509.ReasonFlags.cessation_of_operation,
+ 'certificate_hold': x509.ReasonFlags.certificate_hold,
+ 'privilege_withdrawn': x509.ReasonFlags.privilege_withdrawn,
+ 'aa_compromise': x509.ReasonFlags.aa_compromise,
+ 'remove_from_crl': x509.ReasonFlags.remove_from_crl,
+ }
+ REVOCATION_REASON_MAP_INVERSE = dict()
+ for k, v in REVOCATION_REASON_MAP.items():
+ REVOCATION_REASON_MAP_INVERSE[v] = k
+
+
+def cryptography_decode_revoked_certificate(cert):
+ result = {
+ 'serial_number': cert.serial_number,
+ 'revocation_date': cert.revocation_date,
+ 'issuer': None,
+ 'issuer_critical': False,
+ 'reason': None,
+ 'reason_critical': False,
+ 'invalidity_date': None,
+ 'invalidity_date_critical': False,
+ }
+ try:
+ ext = cert.extensions.get_extension_for_class(x509.CertificateIssuer)
+ result['issuer'] = list(ext.value)
+ result['issuer_critical'] = ext.critical
+ except x509.ExtensionNotFound:
+ pass
+ try:
+ ext = cert.extensions.get_extension_for_class(x509.CRLReason)
+ result['reason'] = ext.value.reason
+ result['reason_critical'] = ext.critical
+ except x509.ExtensionNotFound:
+ pass
+ try:
+ ext = cert.extensions.get_extension_for_class(x509.InvalidityDate)
+ result['invalidity_date'] = ext.value.invalidity_date
+ result['invalidity_date_critical'] = ext.critical
+ except x509.ExtensionNotFound:
+ pass
+ return result
diff --git a/test/support/integration/plugins/module_utils/database.py b/test/support/integration/plugins/module_utils/database.py
new file mode 100644
index 0000000000..014939a260
--- /dev/null
+++ b/test/support/integration/plugins/module_utils/database.py
@@ -0,0 +1,142 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c) 2014, Toshio Kuratomi <tkuratomi@ansible.com>
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+class SQLParseError(Exception):
+ pass
+
+
+class UnclosedQuoteError(SQLParseError):
+ pass
+
+
+# maps a type of identifier to the maximum number of dot levels that are
+# allowed to specify that identifier. For example, a database column can be
+# specified by up to 4 levels: database.schema.table.column
+_PG_IDENTIFIER_TO_DOT_LEVEL = dict(
+ database=1,
+ schema=2,
+ table=3,
+ column=4,
+ role=1,
+ tablespace=1,
+ sequence=3,
+ publication=1,
+)
+_MYSQL_IDENTIFIER_TO_DOT_LEVEL = dict(database=1, table=2, column=3, role=1, vars=1)
+
+
+def _find_end_quote(identifier, quote_char):
+ accumulate = 0
+ while True:
+ try:
+ quote = identifier.index(quote_char)
+ except ValueError:
+ raise UnclosedQuoteError
+ accumulate = accumulate + quote
+ try:
+ next_char = identifier[quote + 1]
+ except IndexError:
+ return accumulate
+ if next_char == quote_char:
+ try:
+ identifier = identifier[quote + 2:]
+ accumulate = accumulate + 2
+ except IndexError:
+ raise UnclosedQuoteError
+ else:
+ return accumulate
+
+
+def _identifier_parse(identifier, quote_char):
+ if not identifier:
+ raise SQLParseError('Identifier name unspecified or unquoted trailing dot')
+
+ already_quoted = False
+ if identifier.startswith(quote_char):
+ already_quoted = True
+ try:
+ end_quote = _find_end_quote(identifier[1:], quote_char=quote_char) + 1
+ except UnclosedQuoteError:
+ already_quoted = False
+ else:
+ if end_quote < len(identifier) - 1:
+ if identifier[end_quote + 1] == '.':
+ dot = end_quote + 1
+ first_identifier = identifier[:dot]
+ next_identifier = identifier[dot + 1:]
+ further_identifiers = _identifier_parse(next_identifier, quote_char)
+ further_identifiers.insert(0, first_identifier)
+ else:
+ raise SQLParseError('User escaped identifiers must escape extra quotes')
+ else:
+ further_identifiers = [identifier]
+
+ if not already_quoted:
+ try:
+ dot = identifier.index('.')
+ except ValueError:
+ identifier = identifier.replace(quote_char, quote_char * 2)
+ identifier = ''.join((quote_char, identifier, quote_char))
+ further_identifiers = [identifier]
+ else:
+ if dot == 0 or dot >= len(identifier) - 1:
+ identifier = identifier.replace(quote_char, quote_char * 2)
+ identifier = ''.join((quote_char, identifier, quote_char))
+ further_identifiers = [identifier]
+ else:
+ first_identifier = identifier[:dot]
+ next_identifier = identifier[dot + 1:]
+ further_identifiers = _identifier_parse(next_identifier, quote_char)
+ first_identifier = first_identifier.replace(quote_char, quote_char * 2)
+ first_identifier = ''.join((quote_char, first_identifier, quote_char))
+ further_identifiers.insert(0, first_identifier)
+
+ return further_identifiers
+
+
+def pg_quote_identifier(identifier, id_type):
+ identifier_fragments = _identifier_parse(identifier, quote_char='"')
+ if len(identifier_fragments) > _PG_IDENTIFIER_TO_DOT_LEVEL[id_type]:
+ raise SQLParseError('PostgreSQL does not support %s with more than %i dots' % (id_type, _PG_IDENTIFIER_TO_DOT_LEVEL[id_type]))
+ return '.'.join(identifier_fragments)
+
+
+def mysql_quote_identifier(identifier, id_type):
+ identifier_fragments = _identifier_parse(identifier, quote_char='`')
+ if (len(identifier_fragments) - 1) > _MYSQL_IDENTIFIER_TO_DOT_LEVEL[id_type]:
+ raise SQLParseError('MySQL does not support %s with more than %i dots' % (id_type, _MYSQL_IDENTIFIER_TO_DOT_LEVEL[id_type]))
+
+ special_cased_fragments = []
+ for fragment in identifier_fragments:
+ if fragment == '`*`':
+ special_cased_fragments.append('*')
+ else:
+ special_cased_fragments.append(fragment)
+
+ return '.'.join(special_cased_fragments)
diff --git a/test/support/integration/plugins/module_utils/docker/common.py b/test/support/integration/plugins/module_utils/docker/common.py
new file mode 100644
index 0000000000..03307250d6
--- /dev/null
+++ b/test/support/integration/plugins/module_utils/docker/common.py
@@ -0,0 +1,1022 @@
+#
+# Copyright 2016 Red Hat | Ansible
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+import os
+import platform
+import re
+import sys
+from datetime import timedelta
+from distutils.version import LooseVersion
+
+
+from ansible.module_utils.basic import AnsibleModule, env_fallback, missing_required_lib
+from ansible.module_utils.common._collections_compat import Mapping, Sequence
+from ansible.module_utils.six import string_types
+from ansible.module_utils.six.moves.urllib.parse import urlparse
+from ansible.module_utils.parsing.convert_bool import BOOLEANS_TRUE, BOOLEANS_FALSE
+
+HAS_DOCKER_PY = True
+HAS_DOCKER_PY_2 = False
+HAS_DOCKER_PY_3 = False
+HAS_DOCKER_ERROR = None
+
+try:
+ from requests.exceptions import SSLError
+ from docker import __version__ as docker_version
+ from docker.errors import APIError, NotFound, TLSParameterError
+ from docker.tls import TLSConfig
+ from docker import auth
+
+ if LooseVersion(docker_version) >= LooseVersion('3.0.0'):
+ HAS_DOCKER_PY_3 = True
+ from docker import APIClient as Client
+ elif LooseVersion(docker_version) >= LooseVersion('2.0.0'):
+ HAS_DOCKER_PY_2 = True
+ from docker import APIClient as Client
+ else:
+ from docker import Client
+
+except ImportError as exc:
+ HAS_DOCKER_ERROR = str(exc)
+ HAS_DOCKER_PY = False
+
+
+# The next 2 imports ``docker.models`` and ``docker.ssladapter`` are used
+# to ensure the user does not have both ``docker`` and ``docker-py`` modules
+# installed, as they utilize the same namespace are are incompatible
+try:
+ # docker (Docker SDK for Python >= 2.0.0)
+ import docker.models # noqa: F401
+ HAS_DOCKER_MODELS = True
+except ImportError:
+ HAS_DOCKER_MODELS = False
+
+try:
+ # docker-py (Docker SDK for Python < 2.0.0)
+ import docker.ssladapter # noqa: F401
+ HAS_DOCKER_SSLADAPTER = True
+except ImportError:
+ HAS_DOCKER_SSLADAPTER = False
+
+
+try:
+ from requests.exceptions import RequestException
+except ImportError:
+ # Either docker-py is no longer using requests, or docker-py isn't around either,
+ # or docker-py's dependency requests is missing. In any case, define an exception
+ # class RequestException so that our code doesn't break.
+ class RequestException(Exception):
+ pass
+
+
+DEFAULT_DOCKER_HOST = 'unix://var/run/docker.sock'
+DEFAULT_TLS = False
+DEFAULT_TLS_VERIFY = False
+DEFAULT_TLS_HOSTNAME = 'localhost'
+MIN_DOCKER_VERSION = "1.8.0"
+DEFAULT_TIMEOUT_SECONDS = 60
+
+DOCKER_COMMON_ARGS = dict(
+ docker_host=dict(type='str', default=DEFAULT_DOCKER_HOST, fallback=(env_fallback, ['DOCKER_HOST']), aliases=['docker_url']),
+ tls_hostname=dict(type='str', default=DEFAULT_TLS_HOSTNAME, fallback=(env_fallback, ['DOCKER_TLS_HOSTNAME'])),
+ api_version=dict(type='str', default='auto', fallback=(env_fallback, ['DOCKER_API_VERSION']), aliases=['docker_api_version']),
+ timeout=dict(type='int', default=DEFAULT_TIMEOUT_SECONDS, fallback=(env_fallback, ['DOCKER_TIMEOUT'])),
+ ca_cert=dict(type='path', aliases=['tls_ca_cert', 'cacert_path']),
+ client_cert=dict(type='path', aliases=['tls_client_cert', 'cert_path']),
+ client_key=dict(type='path', aliases=['tls_client_key', 'key_path']),
+ ssl_version=dict(type='str', fallback=(env_fallback, ['DOCKER_SSL_VERSION'])),
+ tls=dict(type='bool', default=DEFAULT_TLS, fallback=(env_fallback, ['DOCKER_TLS'])),
+ validate_certs=dict(type='bool', default=DEFAULT_TLS_VERIFY, fallback=(env_fallback, ['DOCKER_TLS_VERIFY']), aliases=['tls_verify']),
+ debug=dict(type='bool', default=False)
+)
+
+DOCKER_MUTUALLY_EXCLUSIVE = []
+
+DOCKER_REQUIRED_TOGETHER = [
+ ['client_cert', 'client_key']
+]
+
+DEFAULT_DOCKER_REGISTRY = 'https://index.docker.io/v1/'
+EMAIL_REGEX = r'[^@]+@[^@]+\.[^@]+'
+BYTE_SUFFIXES = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
+
+
+if not HAS_DOCKER_PY:
+ docker_version = None
+
+ # No Docker SDK for Python. Create a place holder client to allow
+ # instantiation of AnsibleModule and proper error handing
+ class Client(object): # noqa: F811
+ def __init__(self, **kwargs):
+ pass
+
+ class APIError(Exception): # noqa: F811
+ pass
+
+ class NotFound(Exception): # noqa: F811
+ pass
+
+
+def is_image_name_id(name):
+ """Check whether the given image name is in fact an image ID (hash)."""
+ if re.match('^sha256:[0-9a-fA-F]{64}$', name):
+ return True
+ return False
+
+
+def is_valid_tag(tag, allow_empty=False):
+ """Check whether the given string is a valid docker tag name."""
+ if not tag:
+ return allow_empty
+ # See here ("Extended description") for a definition what tags can be:
+ # https://docs.docker.com/engine/reference/commandline/tag/
+ return bool(re.match('^[a-zA-Z0-9_][a-zA-Z0-9_.-]{0,127}$', tag))
+
+
+def sanitize_result(data):
+ """Sanitize data object for return to Ansible.
+
+ When the data object contains types such as docker.types.containers.HostConfig,
+ Ansible will fail when these are returned via exit_json or fail_json.
+ HostConfig is derived from dict, but its constructor requires additional
+ arguments. This function sanitizes data structures by recursively converting
+ everything derived from dict to dict and everything derived from list (and tuple)
+ to a list.
+ """
+ if isinstance(data, dict):
+ return dict((k, sanitize_result(v)) for k, v in data.items())
+ elif isinstance(data, (list, tuple)):
+ return [sanitize_result(v) for v in data]
+ else:
+ return data
+
+
+class DockerBaseClass(object):
+
+ def __init__(self):
+ self.debug = False
+
+ def log(self, msg, pretty_print=False):
+ pass
+ # if self.debug:
+ # log_file = open('docker.log', 'a')
+ # if pretty_print:
+ # log_file.write(json.dumps(msg, sort_keys=True, indent=4, separators=(',', ': ')))
+ # log_file.write(u'\n')
+ # else:
+ # log_file.write(msg + u'\n')
+
+
+def update_tls_hostname(result):
+ if result['tls_hostname'] is None:
+ # get default machine name from the url
+ parsed_url = urlparse(result['docker_host'])
+ if ':' in parsed_url.netloc:
+ result['tls_hostname'] = parsed_url.netloc[:parsed_url.netloc.rindex(':')]
+ else:
+ result['tls_hostname'] = parsed_url
+
+
+def _get_tls_config(fail_function, **kwargs):
+ try:
+ tls_config = TLSConfig(**kwargs)
+ return tls_config
+ except TLSParameterError as exc:
+ fail_function("TLS config error: %s" % exc)
+
+
+def get_connect_params(auth, fail_function):
+ if auth['tls'] or auth['tls_verify']:
+ auth['docker_host'] = auth['docker_host'].replace('tcp://', 'https://')
+
+ if auth['tls_verify'] and auth['cert_path'] and auth['key_path']:
+ # TLS with certs and host verification
+ if auth['cacert_path']:
+ tls_config = _get_tls_config(client_cert=(auth['cert_path'], auth['key_path']),
+ ca_cert=auth['cacert_path'],
+ verify=True,
+ assert_hostname=auth['tls_hostname'],
+ ssl_version=auth['ssl_version'],
+ fail_function=fail_function)
+ else:
+ tls_config = _get_tls_config(client_cert=(auth['cert_path'], auth['key_path']),
+ verify=True,
+ assert_hostname=auth['tls_hostname'],
+ ssl_version=auth['ssl_version'],
+ fail_function=fail_function)
+
+ return dict(base_url=auth['docker_host'],
+ tls=tls_config,
+ version=auth['api_version'],
+ timeout=auth['timeout'])
+
+ if auth['tls_verify'] and auth['cacert_path']:
+ # TLS with cacert only
+ tls_config = _get_tls_config(ca_cert=auth['cacert_path'],
+ assert_hostname=auth['tls_hostname'],
+ verify=True,
+ ssl_version=auth['ssl_version'],
+ fail_function=fail_function)
+ return dict(base_url=auth['docker_host'],
+ tls=tls_config,
+ version=auth['api_version'],
+ timeout=auth['timeout'])
+
+ if auth['tls_verify']:
+ # TLS with verify and no certs
+ tls_config = _get_tls_config(verify=True,
+ assert_hostname=auth['tls_hostname'],
+ ssl_version=auth['ssl_version'],
+ fail_function=fail_function)
+ return dict(base_url=auth['docker_host'],
+ tls=tls_config,
+ version=auth['api_version'],
+ timeout=auth['timeout'])
+
+ if auth['tls'] and auth['cert_path'] and auth['key_path']:
+ # TLS with certs and no host verification
+ tls_config = _get_tls_config(client_cert=(auth['cert_path'], auth['key_path']),
+ verify=False,
+ ssl_version=auth['ssl_version'],
+ fail_function=fail_function)
+ return dict(base_url=auth['docker_host'],
+ tls=tls_config,
+ version=auth['api_version'],
+ timeout=auth['timeout'])
+
+ if auth['tls']:
+ # TLS with no certs and not host verification
+ tls_config = _get_tls_config(verify=False,
+ ssl_version=auth['ssl_version'],
+ fail_function=fail_function)
+ return dict(base_url=auth['docker_host'],
+ tls=tls_config,
+ version=auth['api_version'],
+ timeout=auth['timeout'])
+
+ # No TLS
+ return dict(base_url=auth['docker_host'],
+ version=auth['api_version'],
+ timeout=auth['timeout'])
+
+
+DOCKERPYUPGRADE_SWITCH_TO_DOCKER = "Try `pip uninstall docker-py` followed by `pip install docker`."
+DOCKERPYUPGRADE_UPGRADE_DOCKER = "Use `pip install --upgrade docker` to upgrade."
+DOCKERPYUPGRADE_RECOMMEND_DOCKER = ("Use `pip install --upgrade docker-py` to upgrade. "
+ "Hint: if you do not need Python 2.6 support, try "
+ "`pip uninstall docker-py` instead, followed by `pip install docker`.")
+
+
+class AnsibleDockerClient(Client):
+
+ def __init__(self, argument_spec=None, supports_check_mode=False, mutually_exclusive=None,
+ required_together=None, required_if=None, min_docker_version=MIN_DOCKER_VERSION,
+ min_docker_api_version=None, option_minimal_versions=None,
+ option_minimal_versions_ignore_params=None, fail_results=None):
+
+ # Modules can put information in here which will always be returned
+ # in case client.fail() is called.
+ self.fail_results = fail_results or {}
+
+ merged_arg_spec = dict()
+ merged_arg_spec.update(DOCKER_COMMON_ARGS)
+ if argument_spec:
+ merged_arg_spec.update(argument_spec)
+ self.arg_spec = merged_arg_spec
+
+ mutually_exclusive_params = []
+ mutually_exclusive_params += DOCKER_MUTUALLY_EXCLUSIVE
+ if mutually_exclusive:
+ mutually_exclusive_params += mutually_exclusive
+
+ required_together_params = []
+ required_together_params += DOCKER_REQUIRED_TOGETHER
+ if required_together:
+ required_together_params += required_together
+
+ self.module = AnsibleModule(
+ argument_spec=merged_arg_spec,
+ supports_check_mode=supports_check_mode,
+ mutually_exclusive=mutually_exclusive_params,
+ required_together=required_together_params,
+ required_if=required_if)
+
+ NEEDS_DOCKER_PY2 = (LooseVersion(min_docker_version) >= LooseVersion('2.0.0'))
+
+ self.docker_py_version = LooseVersion(docker_version)
+
+ if HAS_DOCKER_MODELS and HAS_DOCKER_SSLADAPTER:
+ self.fail("Cannot have both the docker-py and docker python modules (old and new version of Docker "
+ "SDK for Python) installed together as they use the same namespace and cause a corrupt "
+ "installation. Please uninstall both packages, and re-install only the docker-py or docker "
+ "python module (for %s's Python %s). It is recommended to install the docker module if no "
+ "support for Python 2.6 is required. Please note that simply uninstalling one of the modules "
+ "can leave the other module in a broken state." % (platform.node(), sys.executable))
+
+ if not HAS_DOCKER_PY:
+ if NEEDS_DOCKER_PY2:
+ msg = missing_required_lib("Docker SDK for Python: docker")
+ msg = msg + ", for example via `pip install docker`. The error was: %s"
+ else:
+ msg = missing_required_lib("Docker SDK for Python: docker (Python >= 2.7) or docker-py (Python 2.6)")
+ msg = msg + ", for example via `pip install docker` or `pip install docker-py` (Python 2.6). The error was: %s"
+ self.fail(msg % HAS_DOCKER_ERROR)
+
+ if self.docker_py_version < LooseVersion(min_docker_version):
+ msg = "Error: Docker SDK for Python version is %s (%s's Python %s). Minimum version required is %s."
+ if not NEEDS_DOCKER_PY2:
+ # The minimal required version is < 2.0 (and the current version as well).
+ # Advertise docker (instead of docker-py) for non-Python-2.6 users.
+ msg += DOCKERPYUPGRADE_RECOMMEND_DOCKER
+ elif docker_version < LooseVersion('2.0'):
+ msg += DOCKERPYUPGRADE_SWITCH_TO_DOCKER
+ else:
+ msg += DOCKERPYUPGRADE_UPGRADE_DOCKER
+ self.fail(msg % (docker_version, platform.node(), sys.executable, min_docker_version))
+
+ self.debug = self.module.params.get('debug')
+ self.check_mode = self.module.check_mode
+ self._connect_params = get_connect_params(self.auth_params, fail_function=self.fail)
+
+ try:
+ super(AnsibleDockerClient, self).__init__(**self._connect_params)
+ self.docker_api_version_str = self.version()['ApiVersion']
+ except APIError as exc:
+ self.fail("Docker API error: %s" % exc)
+ except Exception as exc:
+ self.fail("Error connecting: %s" % exc)
+
+ self.docker_api_version = LooseVersion(self.docker_api_version_str)
+ if min_docker_api_version is not None:
+ if self.docker_api_version < LooseVersion(min_docker_api_version):
+ self.fail('Docker API version is %s. Minimum version required is %s.' % (self.docker_api_version_str, min_docker_api_version))
+
+ if option_minimal_versions is not None:
+ self._get_minimal_versions(option_minimal_versions, option_minimal_versions_ignore_params)
+
+ def log(self, msg, pretty_print=False):
+ pass
+ # if self.debug:
+ # log_file = open('docker.log', 'a')
+ # if pretty_print:
+ # log_file.write(json.dumps(msg, sort_keys=True, indent=4, separators=(',', ': ')))
+ # log_file.write(u'\n')
+ # else:
+ # log_file.write(msg + u'\n')
+
+ def fail(self, msg, **kwargs):
+ self.fail_results.update(kwargs)
+ self.module.fail_json(msg=msg, **sanitize_result(self.fail_results))
+
+ @staticmethod
+ def _get_value(param_name, param_value, env_variable, default_value):
+ if param_value is not None:
+ # take module parameter value
+ if param_value in BOOLEANS_TRUE:
+ return True
+ if param_value in BOOLEANS_FALSE:
+ return False
+ return param_value
+
+ if env_variable is not None:
+ env_value = os.environ.get(env_variable)
+ if env_value is not None:
+ # take the env variable value
+ if param_name == 'cert_path':
+ return os.path.join(env_value, 'cert.pem')
+ if param_name == 'cacert_path':
+ return os.path.join(env_value, 'ca.pem')
+ if param_name == 'key_path':
+ return os.path.join(env_value, 'key.pem')
+ if env_value in BOOLEANS_TRUE:
+ return True
+ if env_value in BOOLEANS_FALSE:
+ return False
+ return env_value
+
+ # take the default
+ return default_value
+
+ @property
+ def auth_params(self):
+ # Get authentication credentials.
+ # Precedence: module parameters-> environment variables-> defaults.
+
+ self.log('Getting credentials')
+
+ params = dict()
+ for key in DOCKER_COMMON_ARGS:
+ params[key] = self.module.params.get(key)
+
+ if self.module.params.get('use_tls'):
+ # support use_tls option in docker_image.py. This will be deprecated.
+ use_tls = self.module.params.get('use_tls')
+ if use_tls == 'encrypt':
+ params['tls'] = True
+ if use_tls == 'verify':
+ params['validate_certs'] = True
+
+ result = dict(
+ docker_host=self._get_value('docker_host', params['docker_host'], 'DOCKER_HOST',
+ DEFAULT_DOCKER_HOST),
+ tls_hostname=self._get_value('tls_hostname', params['tls_hostname'],
+ 'DOCKER_TLS_HOSTNAME', DEFAULT_TLS_HOSTNAME),
+ api_version=self._get_value('api_version', params['api_version'], 'DOCKER_API_VERSION',
+ 'auto'),
+ cacert_path=self._get_value('cacert_path', params['ca_cert'], 'DOCKER_CERT_PATH', None),
+ cert_path=self._get_value('cert_path', params['client_cert'], 'DOCKER_CERT_PATH', None),
+ key_path=self._get_value('key_path', params['client_key'], 'DOCKER_CERT_PATH', None),
+ ssl_version=self._get_value('ssl_version', params['ssl_version'], 'DOCKER_SSL_VERSION', None),
+ tls=self._get_value('tls', params['tls'], 'DOCKER_TLS', DEFAULT_TLS),
+ tls_verify=self._get_value('tls_verfy', params['validate_certs'], 'DOCKER_TLS_VERIFY',
+ DEFAULT_TLS_VERIFY),
+ timeout=self._get_value('timeout', params['timeout'], 'DOCKER_TIMEOUT',
+ DEFAULT_TIMEOUT_SECONDS),
+ )
+
+ update_tls_hostname(result)
+
+ return result
+
+ def _handle_ssl_error(self, error):
+ match = re.match(r"hostname.*doesn\'t match (\'.*\')", str(error))
+ if match:
+ self.fail("You asked for verification that Docker daemons certificate's hostname matches %s. "
+ "The actual certificate's hostname is %s. Most likely you need to set DOCKER_TLS_HOSTNAME "
+ "or pass `tls_hostname` with a value of %s. You may also use TLS without verification by "
+ "setting the `tls` parameter to true."
+ % (self.auth_params['tls_hostname'], match.group(1), match.group(1)))
+ self.fail("SSL Exception: %s" % (error))
+
+ def _get_minimal_versions(self, option_minimal_versions, ignore_params=None):
+ self.option_minimal_versions = dict()
+ for option in self.module.argument_spec:
+ if ignore_params is not None:
+ if option in ignore_params:
+ continue
+ self.option_minimal_versions[option] = dict()
+ self.option_minimal_versions.update(option_minimal_versions)
+
+ for option, data in self.option_minimal_versions.items():
+ # Test whether option is supported, and store result
+ support_docker_py = True
+ support_docker_api = True
+ if 'docker_py_version' in data:
+ support_docker_py = self.docker_py_version >= LooseVersion(data['docker_py_version'])
+ if 'docker_api_version' in data:
+ support_docker_api = self.docker_api_version >= LooseVersion(data['docker_api_version'])
+ data['supported'] = support_docker_py and support_docker_api
+ # Fail if option is not supported but used
+ if not data['supported']:
+ # Test whether option is specified
+ if 'detect_usage' in data:
+ used = data['detect_usage'](self)
+ else:
+ used = self.module.params.get(option) is not None
+ if used and 'default' in self.module.argument_spec[option]:
+ used = self.module.params[option] != self.module.argument_spec[option]['default']
+ if used:
+ # If the option is used, compose error message.
+ if 'usage_msg' in data:
+ usg = data['usage_msg']
+ else:
+ usg = 'set %s option' % (option, )
+ if not support_docker_api:
+ msg = 'Docker API version is %s. Minimum version required is %s to %s.'
+ msg = msg % (self.docker_api_version_str, data['docker_api_version'], usg)
+ elif not support_docker_py:
+ msg = "Docker SDK for Python version is %s (%s's Python %s). Minimum version required is %s to %s. "
+ if LooseVersion(data['docker_py_version']) < LooseVersion('2.0.0'):
+ msg += DOCKERPYUPGRADE_RECOMMEND_DOCKER
+ elif self.docker_py_version < LooseVersion('2.0.0'):
+ msg += DOCKERPYUPGRADE_SWITCH_TO_DOCKER
+ else:
+ msg += DOCKERPYUPGRADE_UPGRADE_DOCKER
+ msg = msg % (docker_version, platform.node(), sys.executable, data['docker_py_version'], usg)
+ else:
+ # should not happen
+ msg = 'Cannot %s with your configuration.' % (usg, )
+ self.fail(msg)
+
+ def get_container_by_id(self, container_id):
+ try:
+ self.log("Inspecting container Id %s" % container_id)
+ result = self.inspect_container(container=container_id)
+ self.log("Completed container inspection")
+ return result
+ except NotFound as dummy:
+ return None
+ except Exception as exc:
+ self.fail("Error inspecting container: %s" % exc)
+
+ def get_container(self, name=None):
+ '''
+ Lookup a container and return the inspection results.
+ '''
+ if name is None:
+ return None
+
+ search_name = name
+ if not name.startswith('/'):
+ search_name = '/' + name
+
+ result = None
+ try:
+ for container in self.containers(all=True):
+ self.log("testing container: %s" % (container['Names']))
+ if isinstance(container['Names'], list) and search_name in container['Names']:
+ result = container
+ break
+ if container['Id'].startswith(name):
+ result = container
+ break
+ if container['Id'] == name:
+ result = container
+ break
+ except SSLError as exc:
+ self._handle_ssl_error(exc)
+ except Exception as exc:
+ self.fail("Error retrieving container list: %s" % exc)
+
+ if result is None:
+ return None
+
+ return self.get_container_by_id(result['Id'])
+
+ def get_network(self, name=None, network_id=None):
+ '''
+ Lookup a network and return the inspection results.
+ '''
+ if name is None and network_id is None:
+ return None
+
+ result = None
+
+ if network_id is None:
+ try:
+ for network in self.networks():
+ self.log("testing network: %s" % (network['Name']))
+ if name == network['Name']:
+ result = network
+ break
+ if network['Id'].startswith(name):
+ result = network
+ break
+ except SSLError as exc:
+ self._handle_ssl_error(exc)
+ except Exception as exc:
+ self.fail("Error retrieving network list: %s" % exc)
+
+ if result is not None:
+ network_id = result['Id']
+
+ if network_id is not None:
+ try:
+ self.log("Inspecting network Id %s" % network_id)
+ result = self.inspect_network(network_id)
+ self.log("Completed network inspection")
+ except NotFound as dummy:
+ return None
+ except Exception as exc:
+ self.fail("Error inspecting network: %s" % exc)
+
+ return result
+
+ def find_image(self, name, tag):
+ '''
+ Lookup an image (by name and tag) and return the inspection results.
+ '''
+ if not name:
+ return None
+
+ self.log("Find image %s:%s" % (name, tag))
+ images = self._image_lookup(name, tag)
+ if not images:
+ # In API <= 1.20 seeing 'docker.io/<name>' as the name of images pulled from docker hub
+ registry, repo_name = auth.resolve_repository_name(name)
+ if registry == 'docker.io':
+ # If docker.io is explicitly there in name, the image
+ # isn't found in some cases (#41509)
+ self.log("Check for docker.io image: %s" % repo_name)
+ images = self._image_lookup(repo_name, tag)
+ if not images and repo_name.startswith('library/'):
+ # Sometimes library/xxx images are not found
+ lookup = repo_name[len('library/'):]
+ self.log("Check for docker.io image: %s" % lookup)
+ images = self._image_lookup(lookup, tag)
+ if not images:
+ # Last case: if docker.io wasn't there, it can be that
+ # the image wasn't found either (#15586)
+ lookup = "%s/%s" % (registry, repo_name)
+ self.log("Check for docker.io image: %s" % lookup)
+ images = self._image_lookup(lookup, tag)
+
+ if len(images) > 1:
+ self.fail("Registry returned more than one result for %s:%s" % (name, tag))
+
+ if len(images) == 1:
+ try:
+ inspection = self.inspect_image(images[0]['Id'])
+ except Exception as exc:
+ self.fail("Error inspecting image %s:%s - %s" % (name, tag, str(exc)))
+ return inspection
+
+ self.log("Image %s:%s not found." % (name, tag))
+ return None
+
+ def find_image_by_id(self, image_id):
+ '''
+ Lookup an image (by ID) and return the inspection results.
+ '''
+ if not image_id:
+ return None
+
+ self.log("Find image %s (by ID)" % image_id)
+ try:
+ inspection = self.inspect_image(image_id)
+ except Exception as exc:
+ self.fail("Error inspecting image ID %s - %s" % (image_id, str(exc)))
+ return inspection
+
+ def _image_lookup(self, name, tag):
+ '''
+ Including a tag in the name parameter sent to the Docker SDK for Python images method
+ does not work consistently. Instead, get the result set for name and manually check
+ if the tag exists.
+ '''
+ try:
+ response = self.images(name=name)
+ except Exception as exc:
+ self.fail("Error searching for image %s - %s" % (name, str(exc)))
+ images = response
+ if tag:
+ lookup = "%s:%s" % (name, tag)
+ lookup_digest = "%s@%s" % (name, tag)
+ images = []
+ for image in response:
+ tags = image.get('RepoTags')
+ digests = image.get('RepoDigests')
+ if (tags and lookup in tags) or (digests and lookup_digest in digests):
+ images = [image]
+ break
+ return images
+
+ def pull_image(self, name, tag="latest"):
+ '''
+ Pull an image
+ '''
+ self.log("Pulling image %s:%s" % (name, tag))
+ old_tag = self.find_image(name, tag)
+ try:
+ for line in self.pull(name, tag=tag, stream=True, decode=True):
+ self.log(line, pretty_print=True)
+ if line.get('error'):
+ if line.get('errorDetail'):
+ error_detail = line.get('errorDetail')
+ self.fail("Error pulling %s - code: %s message: %s" % (name,
+ error_detail.get('code'),
+ error_detail.get('message')))
+ else:
+ self.fail("Error pulling %s - %s" % (name, line.get('error')))
+ except Exception as exc:
+ self.fail("Error pulling image %s:%s - %s" % (name, tag, str(exc)))
+
+ new_tag = self.find_image(name, tag)
+
+ return new_tag, old_tag == new_tag
+
+ def report_warnings(self, result, warnings_key=None):
+ '''
+ Checks result of client operation for warnings, and if present, outputs them.
+
+ warnings_key should be a list of keys used to crawl the result dictionary.
+ For example, if warnings_key == ['a', 'b'], the function will consider
+ result['a']['b'] if these keys exist. If the result is a non-empty string, it
+ will be reported as a warning. If the result is a list, every entry will be
+ reported as a warning.
+
+ In most cases (if warnings are returned at all), warnings_key should be
+ ['Warnings'] or ['Warning']. The default value (if not specified) is ['Warnings'].
+ '''
+ if warnings_key is None:
+ warnings_key = ['Warnings']
+ for key in warnings_key:
+ if not isinstance(result, Mapping):
+ return
+ result = result.get(key)
+ if isinstance(result, Sequence):
+ for warning in result:
+ self.module.warn('Docker warning: {0}'.format(warning))
+ elif isinstance(result, string_types) and result:
+ self.module.warn('Docker warning: {0}'.format(result))
+
+ def inspect_distribution(self, image, **kwargs):
+ '''
+ Get image digest by directly calling the Docker API when running Docker SDK < 4.0.0
+ since prior versions did not support accessing private repositories.
+ '''
+ if self.docker_py_version < LooseVersion('4.0.0'):
+ registry = auth.resolve_repository_name(image)[0]
+ header = auth.get_config_header(self, registry)
+ if header:
+ return self._result(self._get(
+ self._url('/distribution/{0}/json', image),
+ headers={'X-Registry-Auth': header}
+ ), json=True)
+ return super(AnsibleDockerClient, self).inspect_distribution(image, **kwargs)
+
+
+def compare_dict_allow_more_present(av, bv):
+ '''
+ Compare two dictionaries for whether every entry of the first is in the second.
+ '''
+ for key, value in av.items():
+ if key not in bv:
+ return False
+ if bv[key] != value:
+ return False
+ return True
+
+
+def compare_generic(a, b, method, datatype):
+ '''
+ Compare values a and b as described by method and datatype.
+
+ Returns ``True`` if the values compare equal, and ``False`` if not.
+
+ ``a`` is usually the module's parameter, while ``b`` is a property
+ of the current object. ``a`` must not be ``None`` (except for
+ ``datatype == 'value'``).
+
+ Valid values for ``method`` are:
+ - ``ignore`` (always compare as equal);
+ - ``strict`` (only compare if really equal)
+ - ``allow_more_present`` (allow b to have elements which a does not have).
+
+ Valid values for ``datatype`` are:
+ - ``value``: for simple values (strings, numbers, ...);
+ - ``list``: for ``list``s or ``tuple``s where order matters;
+ - ``set``: for ``list``s, ``tuple``s or ``set``s where order does not
+ matter;
+ - ``set(dict)``: for ``list``s, ``tuple``s or ``sets`` where order does
+ not matter and which contain ``dict``s; ``allow_more_present`` is used
+ for the ``dict``s, and these are assumed to be dictionaries of values;
+ - ``dict``: for dictionaries of values.
+ '''
+ if method == 'ignore':
+ return True
+ # If a or b is None:
+ if a is None or b is None:
+ # If both are None: equality
+ if a == b:
+ return True
+ # Otherwise, not equal for values, and equal
+ # if the other is empty for set/list/dict
+ if datatype == 'value':
+ return False
+ # For allow_more_present, allow a to be None
+ if method == 'allow_more_present' and a is None:
+ return True
+ # Otherwise, the iterable object which is not None must have length 0
+ return len(b if a is None else a) == 0
+ # Do proper comparison (both objects not None)
+ if datatype == 'value':
+ return a == b
+ elif datatype == 'list':
+ if method == 'strict':
+ return a == b
+ else:
+ i = 0
+ for v in a:
+ while i < len(b) and b[i] != v:
+ i += 1
+ if i == len(b):
+ return False
+ i += 1
+ return True
+ elif datatype == 'dict':
+ if method == 'strict':
+ return a == b
+ else:
+ return compare_dict_allow_more_present(a, b)
+ elif datatype == 'set':
+ set_a = set(a)
+ set_b = set(b)
+ if method == 'strict':
+ return set_a == set_b
+ else:
+ return set_b >= set_a
+ elif datatype == 'set(dict)':
+ for av in a:
+ found = False
+ for bv in b:
+ if compare_dict_allow_more_present(av, bv):
+ found = True
+ break
+ if not found:
+ return False
+ if method == 'strict':
+ # If we would know that both a and b do not contain duplicates,
+ # we could simply compare len(a) to len(b) to finish this test.
+ # We can assume that b has no duplicates (as it is returned by
+ # docker), but we don't know for a.
+ for bv in b:
+ found = False
+ for av in a:
+ if compare_dict_allow_more_present(av, bv):
+ found = True
+ break
+ if not found:
+ return False
+ return True
+
+
+class DifferenceTracker(object):
+ def __init__(self):
+ self._diff = []
+
+ def add(self, name, parameter=None, active=None):
+ self._diff.append(dict(
+ name=name,
+ parameter=parameter,
+ active=active,
+ ))
+
+ def merge(self, other_tracker):
+ self._diff.extend(other_tracker._diff)
+
+ @property
+ def empty(self):
+ return len(self._diff) == 0
+
+ def get_before_after(self):
+ '''
+ Return texts ``before`` and ``after``.
+ '''
+ before = dict()
+ after = dict()
+ for item in self._diff:
+ before[item['name']] = item['active']
+ after[item['name']] = item['parameter']
+ return before, after
+
+ def has_difference_for(self, name):
+ '''
+ Returns a boolean if a difference exists for name
+ '''
+ return any(diff for diff in self._diff if diff['name'] == name)
+
+ def get_legacy_docker_container_diffs(self):
+ '''
+ Return differences in the docker_container legacy format.
+ '''
+ result = []
+ for entry in self._diff:
+ item = dict()
+ item[entry['name']] = dict(
+ parameter=entry['parameter'],
+ container=entry['active'],
+ )
+ result.append(item)
+ return result
+
+ def get_legacy_docker_diffs(self):
+ '''
+ Return differences in the docker_container legacy format.
+ '''
+ result = [entry['name'] for entry in self._diff]
+ return result
+
+
+def clean_dict_booleans_for_docker_api(data):
+ '''
+ Go doesn't like Python booleans 'True' or 'False', while Ansible is just
+ fine with them in YAML. As such, they need to be converted in cases where
+ we pass dictionaries to the Docker API (e.g. docker_network's
+ driver_options and docker_prune's filters).
+ '''
+ result = dict()
+ if data is not None:
+ for k, v in data.items():
+ if v is True:
+ v = 'true'
+ elif v is False:
+ v = 'false'
+ else:
+ v = str(v)
+ result[str(k)] = v
+ return result
+
+
+def convert_duration_to_nanosecond(time_str):
+ """
+ Return time duration in nanosecond.
+ """
+ if not isinstance(time_str, str):
+ raise ValueError('Missing unit in duration - %s' % time_str)
+
+ regex = re.compile(
+ r'^(((?P<hours>\d+)h)?'
+ r'((?P<minutes>\d+)m(?!s))?'
+ r'((?P<seconds>\d+)s)?'
+ r'((?P<milliseconds>\d+)ms)?'
+ r'((?P<microseconds>\d+)us)?)$'
+ )
+ parts = regex.match(time_str)
+
+ if not parts:
+ raise ValueError('Invalid time duration - %s' % time_str)
+
+ parts = parts.groupdict()
+ time_params = {}
+ for (name, value) in parts.items():
+ if value:
+ time_params[name] = int(value)
+
+ delta = timedelta(**time_params)
+ time_in_nanoseconds = (
+ delta.microseconds + (delta.seconds + delta.days * 24 * 3600) * 10 ** 6
+ ) * 10 ** 3
+
+ return time_in_nanoseconds
+
+
+def parse_healthcheck(healthcheck):
+ """
+ Return dictionary of healthcheck parameters and boolean if
+ healthcheck defined in image was requested to be disabled.
+ """
+ if (not healthcheck) or (not healthcheck.get('test')):
+ return None, None
+
+ result = dict()
+
+ # All supported healthcheck parameters
+ options = dict(
+ test='test',
+ interval='interval',
+ timeout='timeout',
+ start_period='start_period',
+ retries='retries'
+ )
+
+ duration_options = ['interval', 'timeout', 'start_period']
+
+ for (key, value) in options.items():
+ if value in healthcheck:
+ if healthcheck.get(value) is None:
+ # due to recursive argument_spec, all keys are always present
+ # (but have default value None if not specified)
+ continue
+ if value in duration_options:
+ time = convert_duration_to_nanosecond(healthcheck.get(value))
+ if time:
+ result[key] = time
+ elif healthcheck.get(value):
+ result[key] = healthcheck.get(value)
+ if key == 'test':
+ if isinstance(result[key], (tuple, list)):
+ result[key] = [str(e) for e in result[key]]
+ else:
+ result[key] = ['CMD-SHELL', str(result[key])]
+ elif key == 'retries':
+ try:
+ result[key] = int(result[key])
+ except ValueError:
+ raise ValueError(
+ 'Cannot parse number of retries for healthcheck. '
+ 'Expected an integer, got "{0}".'.format(result[key])
+ )
+
+ if result['test'] == ['NONE']:
+ # If the user explicitly disables the healthcheck, return None
+ # as the healthcheck object, and set disable_healthcheck to True
+ return None, True
+
+ return result, False
+
+
+def omit_none_from_dict(d):
+ """
+ Return a copy of the dictionary with all keys with value None omitted.
+ """
+ return dict((k, v) for (k, v) in d.items() if v is not None)
diff --git a/test/support/integration/plugins/module_utils/docker/swarm.py b/test/support/integration/plugins/module_utils/docker/swarm.py
new file mode 100644
index 0000000000..55d94db06b
--- /dev/null
+++ b/test/support/integration/plugins/module_utils/docker/swarm.py
@@ -0,0 +1,280 @@
+# (c) 2019 Piotr Wojciechowski (@wojciechowskipiotr) <piotr@it-playground.pl>
+# (c) Thierry Bouvet (@tbouvet)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+import json
+from time import sleep
+
+try:
+ from docker.errors import APIError, NotFound
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+from ansible.module_utils._text import to_native
+from ansible.module_utils.docker.common import (
+ AnsibleDockerClient,
+ LooseVersion,
+)
+
+
+class AnsibleDockerSwarmClient(AnsibleDockerClient):
+
+ def __init__(self, **kwargs):
+ super(AnsibleDockerSwarmClient, self).__init__(**kwargs)
+
+ def get_swarm_node_id(self):
+ """
+ Get the 'NodeID' of the Swarm node or 'None' if host is not in Swarm. It returns the NodeID
+ of Docker host the module is executed on
+ :return:
+ NodeID of host or 'None' if not part of Swarm
+ """
+
+ try:
+ info = self.info()
+ except APIError as exc:
+ self.fail("Failed to get node information for %s" % to_native(exc))
+
+ if info:
+ json_str = json.dumps(info, ensure_ascii=False)
+ swarm_info = json.loads(json_str)
+ if swarm_info['Swarm']['NodeID']:
+ return swarm_info['Swarm']['NodeID']
+ return None
+
+ def check_if_swarm_node(self, node_id=None):
+ """
+ Checking if host is part of Docker Swarm. If 'node_id' is not provided it reads the Docker host
+ system information looking if specific key in output exists. If 'node_id' is provided then it tries to
+ read node information assuming it is run on Swarm manager. The get_node_inspect() method handles exception if
+ it is not executed on Swarm manager
+
+ :param node_id: Node identifier
+ :return:
+ bool: True if node is part of Swarm, False otherwise
+ """
+
+ if node_id is None:
+ try:
+ info = self.info()
+ except APIError:
+ self.fail("Failed to get host information.")
+
+ if info:
+ json_str = json.dumps(info, ensure_ascii=False)
+ swarm_info = json.loads(json_str)
+ if swarm_info['Swarm']['NodeID']:
+ return True
+ if swarm_info['Swarm']['LocalNodeState'] in ('active', 'pending', 'locked'):
+ return True
+ return False
+ else:
+ try:
+ node_info = self.get_node_inspect(node_id=node_id)
+ except APIError:
+ return
+
+ if node_info['ID'] is not None:
+ return True
+ return False
+
+ def check_if_swarm_manager(self):
+ """
+ Checks if node role is set as Manager in Swarm. The node is the docker host on which module action
+ is performed. The inspect_swarm() will fail if node is not a manager
+
+ :return: True if node is Swarm Manager, False otherwise
+ """
+
+ try:
+ self.inspect_swarm()
+ return True
+ except APIError:
+ return False
+
+ def fail_task_if_not_swarm_manager(self):
+ """
+ If host is not a swarm manager then Ansible task on this host should end with 'failed' state
+ """
+ if not self.check_if_swarm_manager():
+ self.fail("Error running docker swarm module: must run on swarm manager node")
+
+ def check_if_swarm_worker(self):
+ """
+ Checks if node role is set as Worker in Swarm. The node is the docker host on which module action
+ is performed. Will fail if run on host that is not part of Swarm via check_if_swarm_node()
+
+ :return: True if node is Swarm Worker, False otherwise
+ """
+
+ if self.check_if_swarm_node() and not self.check_if_swarm_manager():
+ return True
+ return False
+
+ def check_if_swarm_node_is_down(self, node_id=None, repeat_check=1):
+ """
+ Checks if node status on Swarm manager is 'down'. If node_id is provided it query manager about
+ node specified in parameter, otherwise it query manager itself. If run on Swarm Worker node or
+ host that is not part of Swarm it will fail the playbook
+
+ :param repeat_check: number of check attempts with 5 seconds delay between them, by default check only once
+ :param node_id: node ID or name, if None then method will try to get node_id of host module run on
+ :return:
+ True if node is part of swarm but its state is down, False otherwise
+ """
+
+ if repeat_check < 1:
+ repeat_check = 1
+
+ if node_id is None:
+ node_id = self.get_swarm_node_id()
+
+ for retry in range(0, repeat_check):
+ if retry > 0:
+ sleep(5)
+ node_info = self.get_node_inspect(node_id=node_id)
+ if node_info['Status']['State'] == 'down':
+ return True
+ return False
+
+ def get_node_inspect(self, node_id=None, skip_missing=False):
+ """
+ Returns Swarm node info as in 'docker node inspect' command about single node
+
+ :param skip_missing: if True then function will return None instead of failing the task
+ :param node_id: node ID or name, if None then method will try to get node_id of host module run on
+ :return:
+ Single node information structure
+ """
+
+ if node_id is None:
+ node_id = self.get_swarm_node_id()
+
+ if node_id is None:
+ self.fail("Failed to get node information.")
+
+ try:
+ node_info = self.inspect_node(node_id=node_id)
+ except APIError as exc:
+ if exc.status_code == 503:
+ self.fail("Cannot inspect node: To inspect node execute module on Swarm Manager")
+ if exc.status_code == 404:
+ if skip_missing:
+ return None
+ self.fail("Error while reading from Swarm manager: %s" % to_native(exc))
+ except Exception as exc:
+ self.fail("Error inspecting swarm node: %s" % exc)
+
+ json_str = json.dumps(node_info, ensure_ascii=False)
+ node_info = json.loads(json_str)
+
+ if 'ManagerStatus' in node_info:
+ if node_info['ManagerStatus'].get('Leader'):
+ # This is workaround of bug in Docker when in some cases the Leader IP is 0.0.0.0
+ # Check moby/moby#35437 for details
+ count_colons = node_info['ManagerStatus']['Addr'].count(":")
+ if count_colons == 1:
+ swarm_leader_ip = node_info['ManagerStatus']['Addr'].split(":", 1)[0] or node_info['Status']['Addr']
+ else:
+ swarm_leader_ip = node_info['Status']['Addr']
+ node_info['Status']['Addr'] = swarm_leader_ip
+ return node_info
+
+ def get_all_nodes_inspect(self):
+ """
+ Returns Swarm node info as in 'docker node inspect' command about all registered nodes
+
+ :return:
+ Structure with information about all nodes
+ """
+ try:
+ node_info = self.nodes()
+ except APIError as exc:
+ if exc.status_code == 503:
+ self.fail("Cannot inspect node: To inspect node execute module on Swarm Manager")
+ self.fail("Error while reading from Swarm manager: %s" % to_native(exc))
+ except Exception as exc:
+ self.fail("Error inspecting swarm node: %s" % exc)
+
+ json_str = json.dumps(node_info, ensure_ascii=False)
+ node_info = json.loads(json_str)
+ return node_info
+
+ def get_all_nodes_list(self, output='short'):
+ """
+ Returns list of nodes registered in Swarm
+
+ :param output: Defines format of returned data
+ :return:
+ If 'output' is 'short' then return data is list of nodes hostnames registered in Swarm,
+ if 'output' is 'long' then returns data is list of dict containing the attributes as in
+ output of command 'docker node ls'
+ """
+ nodes_list = []
+
+ nodes_inspect = self.get_all_nodes_inspect()
+ if nodes_inspect is None:
+ return None
+
+ if output == 'short':
+ for node in nodes_inspect:
+ nodes_list.append(node['Description']['Hostname'])
+ elif output == 'long':
+ for node in nodes_inspect:
+ node_property = {}
+
+ node_property.update({'ID': node['ID']})
+ node_property.update({'Hostname': node['Description']['Hostname']})
+ node_property.update({'Status': node['Status']['State']})
+ node_property.update({'Availability': node['Spec']['Availability']})
+ if 'ManagerStatus' in node:
+ if node['ManagerStatus']['Leader'] is True:
+ node_property.update({'Leader': True})
+ node_property.update({'ManagerStatus': node['ManagerStatus']['Reachability']})
+ node_property.update({'EngineVersion': node['Description']['Engine']['EngineVersion']})
+
+ nodes_list.append(node_property)
+ else:
+ return None
+
+ return nodes_list
+
+ def get_node_name_by_id(self, nodeid):
+ return self.get_node_inspect(nodeid)['Description']['Hostname']
+
+ def get_unlock_key(self):
+ if self.docker_py_version < LooseVersion('2.7.0'):
+ return None
+ return super(AnsibleDockerSwarmClient, self).get_unlock_key()
+
+ def get_service_inspect(self, service_id, skip_missing=False):
+ """
+ Returns Swarm service info as in 'docker service inspect' command about single service
+
+ :param service_id: service ID or name
+ :param skip_missing: if True then function will return None instead of failing the task
+ :return:
+ Single service information structure
+ """
+ try:
+ service_info = self.inspect_service(service_id)
+ except NotFound as exc:
+ if skip_missing is False:
+ self.fail("Error while reading from Swarm manager: %s" % to_native(exc))
+ else:
+ return None
+ except APIError as exc:
+ if exc.status_code == 503:
+ self.fail("Cannot inspect service: To inspect service execute module on Swarm Manager")
+ self.fail("Error inspecting swarm service: %s" % exc)
+ except Exception as exc:
+ self.fail("Error inspecting swarm service: %s" % exc)
+
+ json_str = json.dumps(service_info, ensure_ascii=False)
+ service_info = json.loads(json_str)
+ return service_info
diff --git a/test/support/integration/plugins/module_utils/mysql.py b/test/support/integration/plugins/module_utils/mysql.py
new file mode 100644
index 0000000000..46198f367b
--- /dev/null
+++ b/test/support/integration/plugins/module_utils/mysql.py
@@ -0,0 +1,106 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c), Jonathan Mainguy <jon@soh.re>, 2015
+# Most of this was originally added by Sven Schliesing @muffl0n in the mysql_user.py module
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import os
+
+try:
+ import pymysql as mysql_driver
+ _mysql_cursor_param = 'cursor'
+except ImportError:
+ try:
+ import MySQLdb as mysql_driver
+ import MySQLdb.cursors
+ _mysql_cursor_param = 'cursorclass'
+ except ImportError:
+ mysql_driver = None
+
+mysql_driver_fail_msg = 'The PyMySQL (Python 2.7 and Python 3.X) or MySQL-python (Python 2.X) module is required.'
+
+
+def mysql_connect(module, login_user=None, login_password=None, config_file='', ssl_cert=None, ssl_key=None, ssl_ca=None, db=None, cursor_class=None,
+ connect_timeout=30, autocommit=False):
+ config = {}
+
+ if ssl_ca is not None or ssl_key is not None or ssl_cert is not None:
+ config['ssl'] = {}
+
+ if module.params['login_unix_socket']:
+ config['unix_socket'] = module.params['login_unix_socket']
+ else:
+ config['host'] = module.params['login_host']
+ config['port'] = module.params['login_port']
+
+ if os.path.exists(config_file):
+ config['read_default_file'] = config_file
+
+ # If login_user or login_password are given, they should override the
+ # config file
+ if login_user is not None:
+ config['user'] = login_user
+ if login_password is not None:
+ config['passwd'] = login_password
+ if ssl_cert is not None:
+ config['ssl']['cert'] = ssl_cert
+ if ssl_key is not None:
+ config['ssl']['key'] = ssl_key
+ if ssl_ca is not None:
+ config['ssl']['ca'] = ssl_ca
+ if db is not None:
+ config['db'] = db
+ if connect_timeout is not None:
+ config['connect_timeout'] = connect_timeout
+
+ if _mysql_cursor_param == 'cursor':
+ # In case of PyMySQL driver:
+ db_connection = mysql_driver.connect(autocommit=autocommit, **config)
+ else:
+ # In case of MySQLdb driver
+ db_connection = mysql_driver.connect(**config)
+ if autocommit:
+ db_connection.autocommit(True)
+
+ if cursor_class == 'DictCursor':
+ return db_connection.cursor(**{_mysql_cursor_param: mysql_driver.cursors.DictCursor}), db_connection
+ else:
+ return db_connection.cursor(), db_connection
+
+
+def mysql_common_argument_spec():
+ return dict(
+ login_user=dict(type='str', default=None),
+ login_password=dict(type='str', no_log=True),
+ login_host=dict(type='str', default='localhost'),
+ login_port=dict(type='int', default=3306),
+ login_unix_socket=dict(type='str'),
+ config_file=dict(type='path', default='~/.my.cnf'),
+ connect_timeout=dict(type='int', default=30),
+ client_cert=dict(type='path', aliases=['ssl_cert']),
+ client_key=dict(type='path', aliases=['ssl_key']),
+ ca_cert=dict(type='path', aliases=['ssl_ca']),
+ )
diff --git a/test/support/integration/plugins/module_utils/postgres.py b/test/support/integration/plugins/module_utils/postgres.py
new file mode 100644
index 0000000000..63811c3055
--- /dev/null
+++ b/test/support/integration/plugins/module_utils/postgres.py
@@ -0,0 +1,330 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c), Ted Timmons <ted@timmons.me>, 2017.
+# Most of this was originally added by other creators in the postgresql_user module.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+psycopg2 = None # This line needs for unit tests
+try:
+ import psycopg2
+ HAS_PSYCOPG2 = True
+except ImportError:
+ HAS_PSYCOPG2 = False
+
+from ansible.module_utils.basic import missing_required_lib
+from ansible.module_utils._text import to_native
+from ansible.module_utils.six import iteritems
+from distutils.version import LooseVersion
+
+
+def postgres_common_argument_spec():
+ """
+ Return a dictionary with connection options.
+
+ The options are commonly used by most of PostgreSQL modules.
+ """
+ return dict(
+ login_user=dict(default='postgres'),
+ login_password=dict(default='', no_log=True),
+ login_host=dict(default=''),
+ login_unix_socket=dict(default=''),
+ port=dict(type='int', default=5432, aliases=['login_port']),
+ ssl_mode=dict(default='prefer', choices=['allow', 'disable', 'prefer', 'require', 'verify-ca', 'verify-full']),
+ ca_cert=dict(aliases=['ssl_rootcert']),
+ )
+
+
+def ensure_required_libs(module):
+ """Check required libraries."""
+ if not HAS_PSYCOPG2:
+ module.fail_json(msg=missing_required_lib('psycopg2'))
+
+ if module.params.get('ca_cert') and LooseVersion(psycopg2.__version__) < LooseVersion('2.4.3'):
+ module.fail_json(msg='psycopg2 must be at least 2.4.3 in order to use the ca_cert parameter')
+
+
+def connect_to_db(module, conn_params, autocommit=False, fail_on_conn=True):
+ """Connect to a PostgreSQL database.
+
+ Return psycopg2 connection object.
+
+ Args:
+ module (AnsibleModule) -- object of ansible.module_utils.basic.AnsibleModule class
+ conn_params (dict) -- dictionary with connection parameters
+
+ Kwargs:
+ autocommit (bool) -- commit automatically (default False)
+ fail_on_conn (bool) -- fail if connection failed or just warn and return None (default True)
+ """
+ ensure_required_libs(module)
+
+ db_connection = None
+ try:
+ db_connection = psycopg2.connect(**conn_params)
+ if autocommit:
+ if LooseVersion(psycopg2.__version__) >= LooseVersion('2.4.2'):
+ db_connection.set_session(autocommit=True)
+ else:
+ db_connection.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
+
+ # Switch role, if specified:
+ if module.params.get('session_role'):
+ cursor = db_connection.cursor(cursor_factory=psycopg2.extras.DictCursor)
+
+ try:
+ cursor.execute('SET ROLE "%s"' % module.params['session_role'])
+ except Exception as e:
+ module.fail_json(msg="Could not switch role: %s" % to_native(e))
+ finally:
+ cursor.close()
+
+ except TypeError as e:
+ if 'sslrootcert' in e.args[0]:
+ module.fail_json(msg='Postgresql server must be at least '
+ 'version 8.4 to support sslrootcert')
+
+ if fail_on_conn:
+ module.fail_json(msg="unable to connect to database: %s" % to_native(e))
+ else:
+ module.warn("PostgreSQL server is unavailable: %s" % to_native(e))
+ db_connection = None
+
+ except Exception as e:
+ if fail_on_conn:
+ module.fail_json(msg="unable to connect to database: %s" % to_native(e))
+ else:
+ module.warn("PostgreSQL server is unavailable: %s" % to_native(e))
+ db_connection = None
+
+ return db_connection
+
+
+def exec_sql(obj, query, query_params=None, ddl=False, add_to_executed=True, dont_exec=False):
+ """Execute SQL.
+
+ Auxiliary function for PostgreSQL user classes.
+
+ Returns a query result if possible or True/False if ddl=True arg was passed.
+ It necessary for statements that don't return any result (like DDL queries).
+
+ Args:
+ obj (obj) -- must be an object of a user class.
+ The object must have module (AnsibleModule class object) and
+ cursor (psycopg cursor object) attributes
+ query (str) -- SQL query to execute
+
+ Kwargs:
+ query_params (dict or tuple) -- Query parameters to prevent SQL injections,
+ could be a dict or tuple
+ ddl (bool) -- must return True or False instead of rows (typical for DDL queries)
+ (default False)
+ add_to_executed (bool) -- append the query to obj.executed_queries attribute
+ dont_exec (bool) -- used with add_to_executed=True to generate a query, add it
+ to obj.executed_queries list and return True (default False)
+ """
+
+ if dont_exec:
+ # This is usually needed to return queries in check_mode
+ # without execution
+ query = obj.cursor.mogrify(query, query_params)
+ if add_to_executed:
+ obj.executed_queries.append(query)
+
+ return True
+
+ try:
+ if query_params is not None:
+ obj.cursor.execute(query, query_params)
+ else:
+ obj.cursor.execute(query)
+
+ if add_to_executed:
+ if query_params is not None:
+ obj.executed_queries.append(obj.cursor.mogrify(query, query_params))
+ else:
+ obj.executed_queries.append(query)
+
+ if not ddl:
+ res = obj.cursor.fetchall()
+ return res
+ return True
+ except Exception as e:
+ obj.module.fail_json(msg="Cannot execute SQL '%s': %s" % (query, to_native(e)))
+ return False
+
+
+def get_conn_params(module, params_dict, warn_db_default=True):
+ """Get connection parameters from the passed dictionary.
+
+ Return a dictionary with parameters to connect to PostgreSQL server.
+
+ Args:
+ module (AnsibleModule) -- object of ansible.module_utils.basic.AnsibleModule class
+ params_dict (dict) -- dictionary with variables
+
+ Kwargs:
+ warn_db_default (bool) -- warn that the default DB is used (default True)
+ """
+ # To use defaults values, keyword arguments must be absent, so
+ # check which values are empty and don't include in the return dictionary
+ params_map = {
+ "login_host": "host",
+ "login_user": "user",
+ "login_password": "password",
+ "port": "port",
+ "ssl_mode": "sslmode",
+ "ca_cert": "sslrootcert"
+ }
+
+ # Might be different in the modules:
+ if params_dict.get('db'):
+ params_map['db'] = 'database'
+ elif params_dict.get('database'):
+ params_map['database'] = 'database'
+ elif params_dict.get('login_db'):
+ params_map['login_db'] = 'database'
+ else:
+ if warn_db_default:
+ module.warn('Database name has not been passed, '
+ 'used default database to connect to.')
+
+ kw = dict((params_map[k], v) for (k, v) in iteritems(params_dict)
+ if k in params_map and v != '' and v is not None)
+
+ # If a login_unix_socket is specified, incorporate it here.
+ is_localhost = "host" not in kw or kw["host"] is None or kw["host"] == "localhost"
+ if is_localhost and params_dict["login_unix_socket"] != "":
+ kw["host"] = params_dict["login_unix_socket"]
+
+ return kw
+
+
+class PgMembership(object):
+ def __init__(self, module, cursor, groups, target_roles, fail_on_role=True):
+ self.module = module
+ self.cursor = cursor
+ self.target_roles = [r.strip() for r in target_roles]
+ self.groups = [r.strip() for r in groups]
+ self.executed_queries = []
+ self.granted = {}
+ self.revoked = {}
+ self.fail_on_role = fail_on_role
+ self.non_existent_roles = []
+ self.changed = False
+ self.__check_roles_exist()
+
+ def grant(self):
+ for group in self.groups:
+ self.granted[group] = []
+
+ for role in self.target_roles:
+ # If role is in a group now, pass:
+ if self.__check_membership(group, role):
+ continue
+
+ query = 'GRANT "%s" TO "%s"' % (group, role)
+ self.changed = exec_sql(self, query, ddl=True)
+
+ if self.changed:
+ self.granted[group].append(role)
+
+ return self.changed
+
+ def revoke(self):
+ for group in self.groups:
+ self.revoked[group] = []
+
+ for role in self.target_roles:
+ # If role is not in a group now, pass:
+ if not self.__check_membership(group, role):
+ continue
+
+ query = 'REVOKE "%s" FROM "%s"' % (group, role)
+ self.changed = exec_sql(self, query, ddl=True)
+
+ if self.changed:
+ self.revoked[group].append(role)
+
+ return self.changed
+
+ def __check_membership(self, src_role, dst_role):
+ query = ("SELECT ARRAY(SELECT b.rolname FROM "
+ "pg_catalog.pg_auth_members m "
+ "JOIN pg_catalog.pg_roles b ON (m.roleid = b.oid) "
+ "WHERE m.member = r.oid) "
+ "FROM pg_catalog.pg_roles r "
+ "WHERE r.rolname = %(dst_role)s")
+
+ res = exec_sql(self, query, query_params={'dst_role': dst_role}, add_to_executed=False)
+ membership = []
+ if res:
+ membership = res[0][0]
+
+ if not membership:
+ return False
+
+ if src_role in membership:
+ return True
+
+ return False
+
+ def __check_roles_exist(self):
+ existent_groups = self.__roles_exist(self.groups)
+ existent_roles = self.__roles_exist(self.target_roles)
+
+ for group in self.groups:
+ if group not in existent_groups:
+ if self.fail_on_role:
+ self.module.fail_json(msg="Role %s does not exist" % group)
+ else:
+ self.module.warn("Role %s does not exist, pass" % group)
+ self.non_existent_roles.append(group)
+
+ for role in self.target_roles:
+ if role not in existent_roles:
+ if self.fail_on_role:
+ self.module.fail_json(msg="Role %s does not exist" % role)
+ else:
+ self.module.warn("Role %s does not exist, pass" % role)
+
+ if role not in self.groups:
+ self.non_existent_roles.append(role)
+
+ else:
+ if self.fail_on_role:
+ self.module.exit_json(msg="Role role '%s' is a member of role '%s'" % (role, role))
+ else:
+ self.module.warn("Role role '%s' is a member of role '%s', pass" % (role, role))
+
+ # Update role lists, excluding non existent roles:
+ self.groups = [g for g in self.groups if g not in self.non_existent_roles]
+
+ self.target_roles = [r for r in self.target_roles if r not in self.non_existent_roles]
+
+ def __roles_exist(self, roles):
+ tmp = ["'" + x + "'" for x in roles]
+ query = "SELECT rolname FROM pg_roles WHERE rolname IN (%s)" % ','.join(tmp)
+ return [x[0] for x in exec_sql(self, query, add_to_executed=False)]
diff --git a/test/support/integration/plugins/module_utils/rabbitmq.py b/test/support/integration/plugins/module_utils/rabbitmq.py
new file mode 100644
index 0000000000..cf76400644
--- /dev/null
+++ b/test/support/integration/plugins/module_utils/rabbitmq.py
@@ -0,0 +1,220 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright: (c) 2016, Jorge Rodriguez <jorge.rodriguez@tiriel.eu>
+# Copyright: (c) 2018, John Imison <john+github@imison.net>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible.module_utils._text import to_native
+from ansible.module_utils.basic import missing_required_lib
+from ansible.module_utils.six.moves.urllib import parse as urllib_parse
+from mimetypes import MimeTypes
+
+import os
+import json
+import traceback
+
+PIKA_IMP_ERR = None
+try:
+ import pika
+ import pika.exceptions
+ from pika import spec
+ HAS_PIKA = True
+except ImportError:
+ PIKA_IMP_ERR = traceback.format_exc()
+ HAS_PIKA = False
+
+
+def rabbitmq_argument_spec():
+ return dict(
+ login_user=dict(type='str', default='guest'),
+ login_password=dict(type='str', default='guest', no_log=True),
+ login_host=dict(type='str', default='localhost'),
+ login_port=dict(type='str', default='15672'),
+ login_protocol=dict(type='str', default='http', choices=['http', 'https']),
+ ca_cert=dict(type='path', aliases=['cacert']),
+ client_cert=dict(type='path', aliases=['cert']),
+ client_key=dict(type='path', aliases=['key']),
+ vhost=dict(type='str', default='/'),
+ )
+
+
+# notification/rabbitmq_basic_publish.py
+class RabbitClient():
+ def __init__(self, module):
+ self.module = module
+ self.params = module.params
+ self.check_required_library()
+ self.check_host_params()
+ self.url = self.params['url']
+ self.proto = self.params['proto']
+ self.username = self.params['username']
+ self.password = self.params['password']
+ self.host = self.params['host']
+ self.port = self.params['port']
+ self.vhost = self.params['vhost']
+ self.queue = self.params['queue']
+ self.headers = self.params['headers']
+ self.cafile = self.params['cafile']
+ self.certfile = self.params['certfile']
+ self.keyfile = self.params['keyfile']
+
+ if self.host is not None:
+ self.build_url()
+
+ if self.cafile is not None:
+ self.append_ssl_certs()
+
+ self.connect_to_rabbitmq()
+
+ def check_required_library(self):
+ if not HAS_PIKA:
+ self.module.fail_json(msg=missing_required_lib("pika"), exception=PIKA_IMP_ERR)
+
+ def check_host_params(self):
+ # Fail if url is specified and other conflicting parameters have been specified
+ if self.params['url'] is not None and any(self.params[k] is not None for k in ['proto', 'host', 'port', 'password', 'username', 'vhost']):
+ self.module.fail_json(msg="url and proto, host, port, vhost, username or password cannot be specified at the same time.")
+
+ # Fail if url not specified and there is a missing parameter to build the url
+ if self.params['url'] is None and any(self.params[k] is None for k in ['proto', 'host', 'port', 'password', 'username', 'vhost']):
+ self.module.fail_json(msg="Connection parameters must be passed via url, or, proto, host, port, vhost, username or password.")
+
+ def append_ssl_certs(self):
+ ssl_options = {}
+ if self.cafile:
+ ssl_options['cafile'] = self.cafile
+ if self.certfile:
+ ssl_options['certfile'] = self.certfile
+ if self.keyfile:
+ ssl_options['keyfile'] = self.keyfile
+
+ self.url = self.url + '?ssl_options=' + urllib_parse.quote(json.dumps(ssl_options))
+
+ @staticmethod
+ def rabbitmq_argument_spec():
+ return dict(
+ url=dict(type='str'),
+ proto=dict(type='str', choices=['amqp', 'amqps']),
+ host=dict(type='str'),
+ port=dict(type='int'),
+ username=dict(type='str'),
+ password=dict(type='str', no_log=True),
+ vhost=dict(type='str'),
+ queue=dict(type='str')
+ )
+
+ ''' Consider some file size limits here '''
+ def _read_file(self, path):
+ try:
+ with open(path, "rb") as file_handle:
+ return file_handle.read()
+ except IOError as e:
+ self.module.fail_json(msg="Unable to open file %s: %s" % (path, to_native(e)))
+
+ @staticmethod
+ def _check_file_mime_type(path):
+ mime = MimeTypes()
+ return mime.guess_type(path)
+
+ def build_url(self):
+ self.url = '{0}://{1}:{2}@{3}:{4}/{5}'.format(self.proto,
+ self.username,
+ self.password,
+ self.host,
+ self.port,
+ self.vhost)
+
+ def connect_to_rabbitmq(self):
+ """
+ Function to connect to rabbitmq using username and password
+ """
+ try:
+ parameters = pika.URLParameters(self.url)
+ except Exception as e:
+ self.module.fail_json(msg="URL malformed: %s" % to_native(e))
+
+ try:
+ self.connection = pika.BlockingConnection(parameters)
+ except Exception as e:
+ self.module.fail_json(msg="Connection issue: %s" % to_native(e))
+
+ try:
+ self.conn_channel = self.connection.channel()
+ except pika.exceptions.AMQPChannelError as e:
+ self.close_connection()
+ self.module.fail_json(msg="Channel issue: %s" % to_native(e))
+
+ def close_connection(self):
+ try:
+ self.connection.close()
+ except pika.exceptions.AMQPConnectionError:
+ pass
+
+ def basic_publish(self):
+ self.content_type = self.params.get("content_type")
+
+ if self.params.get("body") is not None:
+ args = dict(
+ body=self.params.get("body"),
+ exchange=self.params.get("exchange"),
+ routing_key=self.params.get("routing_key"),
+ properties=pika.BasicProperties(content_type=self.content_type, delivery_mode=1, headers=self.headers))
+
+ # If src (file) is defined and content_type is left as default, do a mime lookup on the file
+ if self.params.get("src") is not None and self.content_type == 'text/plain':
+ self.content_type = RabbitClient._check_file_mime_type(self.params.get("src"))[0]
+ self.headers.update(
+ filename=os.path.basename(self.params.get("src"))
+ )
+
+ args = dict(
+ body=self._read_file(self.params.get("src")),
+ exchange=self.params.get("exchange"),
+ routing_key=self.params.get("routing_key"),
+ properties=pika.BasicProperties(content_type=self.content_type,
+ delivery_mode=1,
+ headers=self.headers
+ ))
+ elif self.params.get("src") is not None:
+ args = dict(
+ body=self._read_file(self.params.get("src")),
+ exchange=self.params.get("exchange"),
+ routing_key=self.params.get("routing_key"),
+ properties=pika.BasicProperties(content_type=self.content_type,
+ delivery_mode=1,
+ headers=self.headers
+ ))
+
+ try:
+ # If queue is not defined, RabbitMQ will return the queue name of the automatically generated queue.
+ if self.queue is None:
+ result = self.conn_channel.queue_declare(durable=self.params.get("durable"),
+ exclusive=self.params.get("exclusive"),
+ auto_delete=self.params.get("auto_delete"))
+ self.conn_channel.confirm_delivery()
+ self.queue = result.method.queue
+ else:
+ self.conn_channel.queue_declare(queue=self.queue,
+ durable=self.params.get("durable"),
+ exclusive=self.params.get("exclusive"),
+ auto_delete=self.params.get("auto_delete"))
+ self.conn_channel.confirm_delivery()
+ except Exception as e:
+ self.module.fail_json(msg="Queue declare issue: %s" % to_native(e))
+
+ # https://github.com/ansible/ansible/blob/devel/lib/ansible/module_utils/cloudstack.py#L150
+ if args['routing_key'] is None:
+ args['routing_key'] = self.queue
+
+ if args['exchange'] is None:
+ args['exchange'] = ''
+
+ try:
+ self.conn_channel.basic_publish(**args)
+ return True
+ except pika.exceptions.UnroutableError:
+ return False
diff --git a/test/support/integration/plugins/modules/assemble.py b/test/support/integration/plugins/modules/assemble.py
new file mode 100644
index 0000000000..57ece419f9
--- /dev/null
+++ b/test/support/integration/plugins/modules/assemble.py
@@ -0,0 +1,261 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2012, Stephen Fromm <sfromm@gmail.com>
+# Copyright: (c) 2016, Toshio Kuratomi <tkuratomi@ansible.com>
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['stableinterface'],
+ 'supported_by': 'core'}
+
+DOCUMENTATION = r'''
+---
+module: assemble
+short_description: Assemble configuration files from fragments
+description:
+- Assembles a configuration file from fragments.
+- Often a particular program will take a single configuration file and does not support a
+ C(conf.d) style structure where it is easy to build up the configuration
+ from multiple sources. C(assemble) will take a directory of files that can be
+ local or have already been transferred to the system, and concatenate them
+ together to produce a destination file.
+- Files are assembled in string sorting order.
+- Puppet calls this idea I(fragments).
+version_added: '0.5'
+options:
+ src:
+ description:
+ - An already existing directory full of source files.
+ type: path
+ required: true
+ dest:
+ description:
+ - A file to create using the concatenation of all of the source files.
+ type: path
+ required: true
+ backup:
+ description:
+ - Create a backup file (if C(yes)), including the timestamp information so
+ you can get the original file back if you somehow clobbered it
+ incorrectly.
+ type: bool
+ default: no
+ delimiter:
+ description:
+ - A delimiter to separate the file contents.
+ type: str
+ version_added: '1.4'
+ remote_src:
+ description:
+ - If C(no), it will search for src at originating/master machine.
+ - If C(yes), it will go to the remote/target machine for the src.
+ type: bool
+ default: no
+ version_added: '1.4'
+ regexp:
+ description:
+ - Assemble files only if C(regex) matches the filename.
+ - If not set, all files are assembled.
+ - Every "\" (backslash) must be escaped as "\\" to comply to YAML syntax.
+ - Uses L(Python regular expressions,http://docs.python.org/2/library/re.html).
+ type: str
+ ignore_hidden:
+ description:
+ - A boolean that controls if files that start with a '.' will be included or not.
+ type: bool
+ default: no
+ version_added: '2.0'
+ validate:
+ description:
+ - The validation command to run before copying into place.
+ - The path to the file to validate is passed in via '%s' which must be present as in the sshd example below.
+ - The command is passed securely so shell features like expansion and pipes won't work.
+ type: str
+ version_added: '2.0'
+seealso:
+- module: copy
+- module: template
+- module: win_copy
+author:
+- Stephen Fromm (@sfromm)
+extends_documentation_fragment:
+- decrypt
+- files
+'''
+
+EXAMPLES = r'''
+- name: Assemble from fragments from a directory
+ assemble:
+ src: /etc/someapp/fragments
+ dest: /etc/someapp/someapp.conf
+
+- name: Inserted provided delimiter in between each fragment
+ assemble:
+ src: /etc/someapp/fragments
+ dest: /etc/someapp/someapp.conf
+ delimiter: '### START FRAGMENT ###'
+
+- name: Assemble a new "sshd_config" file into place, after passing validation with sshd
+ assemble:
+ src: /etc/ssh/conf.d/
+ dest: /etc/ssh/sshd_config
+ validate: /usr/sbin/sshd -t -f %s
+'''
+
+import codecs
+import os
+import re
+import tempfile
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import b, indexbytes
+from ansible.module_utils._text import to_native
+
+
+def assemble_from_fragments(src_path, delimiter=None, compiled_regexp=None, ignore_hidden=False, tmpdir=None):
+ ''' assemble a file from a directory of fragments '''
+ tmpfd, temp_path = tempfile.mkstemp(dir=tmpdir)
+ tmp = os.fdopen(tmpfd, 'wb')
+ delimit_me = False
+ add_newline = False
+
+ for f in sorted(os.listdir(src_path)):
+ if compiled_regexp and not compiled_regexp.search(f):
+ continue
+ fragment = os.path.join(src_path, f)
+ if not os.path.isfile(fragment) or (ignore_hidden and os.path.basename(fragment).startswith('.')):
+ continue
+ with open(fragment, 'rb') as fragment_fh:
+ fragment_content = fragment_fh.read()
+
+ # always put a newline between fragments if the previous fragment didn't end with a newline.
+ if add_newline:
+ tmp.write(b('\n'))
+
+ # delimiters should only appear between fragments
+ if delimit_me:
+ if delimiter:
+ # un-escape anything like newlines
+ delimiter = codecs.escape_decode(delimiter)[0]
+ tmp.write(delimiter)
+ # always make sure there's a newline after the
+ # delimiter, so lines don't run together
+
+ # byte indexing differs on Python 2 and 3,
+ # use indexbytes for compat
+ # chr(10) == '\n'
+ if indexbytes(delimiter, -1) != 10:
+ tmp.write(b('\n'))
+
+ tmp.write(fragment_content)
+ delimit_me = True
+ if fragment_content.endswith(b('\n')):
+ add_newline = False
+ else:
+ add_newline = True
+
+ tmp.close()
+ return temp_path
+
+
+def cleanup(path, result=None):
+ # cleanup just in case
+ if os.path.exists(path):
+ try:
+ os.remove(path)
+ except (IOError, OSError) as e:
+ # don't error on possible race conditions, but keep warning
+ if result is not None:
+ result['warnings'] = ['Unable to remove temp file (%s): %s' % (path, to_native(e))]
+
+
+def main():
+
+ module = AnsibleModule(
+ # not checking because of daisy chain to file module
+ argument_spec=dict(
+ src=dict(type='path', required=True),
+ delimiter=dict(type='str'),
+ dest=dict(type='path', required=True),
+ backup=dict(type='bool', default=False),
+ remote_src=dict(type='bool', default=False),
+ regexp=dict(type='str'),
+ ignore_hidden=dict(type='bool', default=False),
+ validate=dict(type='str'),
+ ),
+ add_file_common_args=True,
+ )
+
+ changed = False
+ path_hash = None
+ dest_hash = None
+ src = module.params['src']
+ dest = module.params['dest']
+ backup = module.params['backup']
+ delimiter = module.params['delimiter']
+ regexp = module.params['regexp']
+ compiled_regexp = None
+ ignore_hidden = module.params['ignore_hidden']
+ validate = module.params.get('validate', None)
+
+ result = dict(src=src, dest=dest)
+ if not os.path.exists(src):
+ module.fail_json(msg="Source (%s) does not exist" % src)
+
+ if not os.path.isdir(src):
+ module.fail_json(msg="Source (%s) is not a directory" % src)
+
+ if regexp is not None:
+ try:
+ compiled_regexp = re.compile(regexp)
+ except re.error as e:
+ module.fail_json(msg="Invalid Regexp (%s) in \"%s\"" % (to_native(e), regexp))
+
+ if validate and "%s" not in validate:
+ module.fail_json(msg="validate must contain %%s: %s" % validate)
+
+ path = assemble_from_fragments(src, delimiter, compiled_regexp, ignore_hidden, module.tmpdir)
+ path_hash = module.sha1(path)
+ result['checksum'] = path_hash
+
+ # Backwards compat. This won't return data if FIPS mode is active
+ try:
+ pathmd5 = module.md5(path)
+ except ValueError:
+ pathmd5 = None
+ result['md5sum'] = pathmd5
+
+ if os.path.exists(dest):
+ dest_hash = module.sha1(dest)
+
+ if path_hash != dest_hash:
+ if validate:
+ (rc, out, err) = module.run_command(validate % path)
+ result['validation'] = dict(rc=rc, stdout=out, stderr=err)
+ if rc != 0:
+ cleanup(path)
+ module.fail_json(msg="failed to validate: rc:%s error:%s" % (rc, err))
+ if backup and dest_hash is not None:
+ result['backup_file'] = module.backup_local(dest)
+
+ module.atomic_move(path, dest, unsafe_writes=module.params['unsafe_writes'])
+ changed = True
+
+ cleanup(path, result)
+
+ # handle file permissions
+ file_args = module.load_file_common_arguments(module.params)
+ result['changed'] = module.set_fs_attributes_if_different(file_args, changed)
+
+ # Mission complete
+ result['msg'] = "OK"
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/cloud_init_data_facts.py b/test/support/integration/plugins/modules/cloud_init_data_facts.py
new file mode 100644
index 0000000000..4f871b99c5
--- /dev/null
+++ b/test/support/integration/plugins/modules/cloud_init_data_facts.py
@@ -0,0 +1,134 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2018, René Moser <mail@renemoser.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+---
+module: cloud_init_data_facts
+short_description: Retrieve facts of cloud-init.
+description:
+ - Gathers facts by reading the status.json and result.json of cloud-init.
+version_added: 2.6
+author: René Moser (@resmo)
+options:
+ filter:
+ description:
+ - Filter facts
+ choices: [ status, result ]
+notes:
+ - See http://cloudinit.readthedocs.io/ for more information about cloud-init.
+'''
+
+EXAMPLES = '''
+- name: Gather all facts of cloud init
+ cloud_init_data_facts:
+ register: result
+
+- debug:
+ var: result
+
+- name: Wait for cloud init to finish
+ cloud_init_data_facts:
+ filter: status
+ register: res
+ until: "res.cloud_init_data_facts.status.v1.stage is defined and not res.cloud_init_data_facts.status.v1.stage"
+ retries: 50
+ delay: 5
+'''
+
+RETURN = '''
+---
+cloud_init_data_facts:
+ description: Facts of result and status.
+ returned: success
+ type: dict
+ sample: '{
+ "status": {
+ "v1": {
+ "datasource": "DataSourceCloudStack",
+ "errors": []
+ },
+ "result": {
+ "v1": {
+ "datasource": "DataSourceCloudStack",
+ "init": {
+ "errors": [],
+ "finished": 1522066377.0185432,
+ "start": 1522066375.2648022
+ },
+ "init-local": {
+ "errors": [],
+ "finished": 1522066373.70919,
+ "start": 1522066373.4726632
+ },
+ "modules-config": {
+ "errors": [],
+ "finished": 1522066380.9097016,
+ "start": 1522066379.0011985
+ },
+ "modules-final": {
+ "errors": [],
+ "finished": 1522066383.56594,
+ "start": 1522066382.3449218
+ },
+ "stage": null
+ }
+ }'
+'''
+
+import os
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_text
+
+
+CLOUD_INIT_PATH = "/var/lib/cloud/data/"
+
+
+def gather_cloud_init_data_facts(module):
+ res = {
+ 'cloud_init_data_facts': dict()
+ }
+
+ for i in ['result', 'status']:
+ filter = module.params.get('filter')
+ if filter is None or filter == i:
+ res['cloud_init_data_facts'][i] = dict()
+ json_file = CLOUD_INIT_PATH + i + '.json'
+
+ if os.path.exists(json_file):
+ f = open(json_file, 'rb')
+ contents = to_text(f.read(), errors='surrogate_or_strict')
+ f.close()
+
+ if contents:
+ res['cloud_init_data_facts'][i] = module.from_json(contents)
+ return res
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ filter=dict(choices=['result', 'status']),
+ ),
+ supports_check_mode=True,
+ )
+
+ facts = gather_cloud_init_data_facts(module)
+ result = dict(changed=False, ansible_facts=facts, **facts)
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/consul_session.py b/test/support/integration/plugins/modules/consul_session.py
new file mode 100644
index 0000000000..6802ebe64e
--- /dev/null
+++ b/test/support/integration/plugins/modules/consul_session.py
@@ -0,0 +1,284 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Steve Gargan <steve.gargan@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = """
+module: consul_session
+short_description: Manipulate consul sessions
+description:
+ - Allows the addition, modification and deletion of sessions in a consul
+ cluster. These sessions can then be used in conjunction with key value pairs
+ to implement distributed locks. In depth documentation for working with
+ sessions can be found at http://www.consul.io/docs/internals/sessions.html
+requirements:
+ - python-consul
+ - requests
+version_added: "2.0"
+author:
+- Steve Gargan (@sgargan)
+options:
+ id:
+ description:
+ - ID of the session, required when I(state) is either C(info) or
+ C(remove).
+ type: str
+ state:
+ description:
+ - Whether the session should be present i.e. created if it doesn't
+ exist, or absent, removed if present. If created, the I(id) for the
+ session is returned in the output. If C(absent), I(id) is
+ required to remove the session. Info for a single session, all the
+ sessions for a node or all available sessions can be retrieved by
+ specifying C(info), C(node) or C(list) for the I(state); for C(node)
+ or C(info), the node I(name) or session I(id) is required as parameter.
+ choices: [ absent, info, list, node, present ]
+ type: str
+ default: present
+ name:
+ description:
+ - The name that should be associated with the session. Required when
+ I(state=node) is used.
+ type: str
+ delay:
+ description:
+ - The optional lock delay that can be attached to the session when it
+ is created. Locks for invalidated sessions ar blocked from being
+ acquired until this delay has expired. Durations are in seconds.
+ type: int
+ default: 15
+ node:
+ description:
+ - The name of the node that with which the session will be associated.
+ by default this is the name of the agent.
+ type: str
+ datacenter:
+ description:
+ - The name of the datacenter in which the session exists or should be
+ created.
+ type: str
+ checks:
+ description:
+ - Checks that will be used to verify the session health. If
+ all the checks fail, the session will be invalidated and any locks
+ associated with the session will be release and can be acquired once
+ the associated lock delay has expired.
+ type: list
+ host:
+ description:
+ - The host of the consul agent defaults to localhost.
+ type: str
+ default: localhost
+ port:
+ description:
+ - The port on which the consul agent is running.
+ type: int
+ default: 8500
+ scheme:
+ description:
+ - The protocol scheme on which the consul agent is running.
+ type: str
+ default: http
+ version_added: "2.1"
+ validate_certs:
+ description:
+ - Whether to verify the TLS certificate of the consul agent.
+ type: bool
+ default: True
+ version_added: "2.1"
+ behavior:
+ description:
+ - The optional behavior that can be attached to the session when it
+ is created. This controls the behavior when a session is invalidated.
+ choices: [ delete, release ]
+ type: str
+ default: release
+ version_added: "2.2"
+"""
+
+EXAMPLES = '''
+- name: register basic session with consul
+ consul_session:
+ name: session1
+
+- name: register a session with an existing check
+ consul_session:
+ name: session_with_check
+ checks:
+ - existing_check_name
+
+- name: register a session with lock_delay
+ consul_session:
+ name: session_with_delay
+ delay: 20s
+
+- name: retrieve info about session by id
+ consul_session:
+ id: session_id
+ state: info
+
+- name: retrieve active sessions
+ consul_session:
+ state: list
+'''
+
+try:
+ import consul
+ from requests.exceptions import ConnectionError
+ python_consul_installed = True
+except ImportError:
+ python_consul_installed = False
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def execute(module):
+
+ state = module.params.get('state')
+
+ if state in ['info', 'list', 'node']:
+ lookup_sessions(module)
+ elif state == 'present':
+ update_session(module)
+ else:
+ remove_session(module)
+
+
+def lookup_sessions(module):
+
+ datacenter = module.params.get('datacenter')
+
+ state = module.params.get('state')
+ consul_client = get_consul_api(module)
+ try:
+ if state == 'list':
+ sessions_list = consul_client.session.list(dc=datacenter)
+ # Ditch the index, this can be grabbed from the results
+ if sessions_list and len(sessions_list) >= 2:
+ sessions_list = sessions_list[1]
+ module.exit_json(changed=True,
+ sessions=sessions_list)
+ elif state == 'node':
+ node = module.params.get('node')
+ sessions = consul_client.session.node(node, dc=datacenter)
+ module.exit_json(changed=True,
+ node=node,
+ sessions=sessions)
+ elif state == 'info':
+ session_id = module.params.get('id')
+
+ session_by_id = consul_client.session.info(session_id, dc=datacenter)
+ module.exit_json(changed=True,
+ session_id=session_id,
+ sessions=session_by_id)
+
+ except Exception as e:
+ module.fail_json(msg="Could not retrieve session info %s" % e)
+
+
+def update_session(module):
+
+ name = module.params.get('name')
+ delay = module.params.get('delay')
+ checks = module.params.get('checks')
+ datacenter = module.params.get('datacenter')
+ node = module.params.get('node')
+ behavior = module.params.get('behavior')
+
+ consul_client = get_consul_api(module)
+
+ try:
+ session = consul_client.session.create(
+ name=name,
+ behavior=behavior,
+ node=node,
+ lock_delay=delay,
+ dc=datacenter,
+ checks=checks
+ )
+ module.exit_json(changed=True,
+ session_id=session,
+ name=name,
+ behavior=behavior,
+ delay=delay,
+ checks=checks,
+ node=node)
+ except Exception as e:
+ module.fail_json(msg="Could not create/update session %s" % e)
+
+
+def remove_session(module):
+ session_id = module.params.get('id')
+
+ consul_client = get_consul_api(module)
+
+ try:
+ consul_client.session.destroy(session_id)
+
+ module.exit_json(changed=True,
+ session_id=session_id)
+ except Exception as e:
+ module.fail_json(msg="Could not remove session with id '%s' %s" % (
+ session_id, e))
+
+
+def get_consul_api(module):
+ return consul.Consul(host=module.params.get('host'),
+ port=module.params.get('port'),
+ scheme=module.params.get('scheme'),
+ verify=module.params.get('validate_certs'))
+
+
+def test_dependencies(module):
+ if not python_consul_installed:
+ module.fail_json(msg="python-consul required for this module. "
+ "see https://python-consul.readthedocs.io/en/latest/#installation")
+
+
+def main():
+ argument_spec = dict(
+ checks=dict(type='list'),
+ delay=dict(type='int', default='15'),
+ behavior=dict(type='str', default='release', choices=['release', 'delete']),
+ host=dict(type='str', default='localhost'),
+ port=dict(type='int', default=8500),
+ scheme=dict(type='str', default='http'),
+ validate_certs=dict(type='bool', default=True),
+ id=dict(type='str'),
+ name=dict(type='str'),
+ node=dict(type='str'),
+ state=dict(type='str', default='present', choices=['absent', 'info', 'list', 'node', 'present']),
+ datacenter=dict(type='str'),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_if=[
+ ('state', 'node', ['name']),
+ ('state', 'info', ['id']),
+ ('state', 'remove', ['id']),
+ ],
+ supports_check_mode=False
+ )
+
+ test_dependencies(module)
+
+ try:
+ execute(module)
+ except ConnectionError as e:
+ module.fail_json(msg='Could not connect to consul agent at %s:%s, error was %s' % (
+ module.params.get('host'), module.params.get('port'), e))
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/deploy_helper.py b/test/support/integration/plugins/modules/deploy_helper.py
new file mode 100644
index 0000000000..38594dde36
--- /dev/null
+++ b/test/support/integration/plugins/modules/deploy_helper.py
@@ -0,0 +1,521 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2014, Jasper N. Brouwer <jasper@nerdsweide.nl>
+# (c) 2014, Ramon de la Fuente <ramon@delafuente.nl>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+---
+module: deploy_helper
+version_added: "2.0"
+author: "Ramon de la Fuente (@ramondelafuente)"
+short_description: Manages some of the steps common in deploying projects.
+description:
+ - The Deploy Helper manages some of the steps common in deploying software.
+ It creates a folder structure, manages a symlink for the current release
+ and cleans up old releases.
+ - "Running it with the C(state=query) or C(state=present) will return the C(deploy_helper) fact.
+ C(project_path), whatever you set in the path parameter,
+ C(current_path), the path to the symlink that points to the active release,
+ C(releases_path), the path to the folder to keep releases in,
+ C(shared_path), the path to the folder to keep shared resources in,
+ C(unfinished_filename), the file to check for to recognize unfinished builds,
+ C(previous_release), the release the 'current' symlink is pointing to,
+ C(previous_release_path), the full path to the 'current' symlink target,
+ C(new_release), either the 'release' parameter or a generated timestamp,
+ C(new_release_path), the path to the new release folder (not created by the module)."
+
+options:
+ path:
+ required: True
+ aliases: ['dest']
+ description:
+ - the root path of the project. Alias I(dest).
+ Returned in the C(deploy_helper.project_path) fact.
+
+ state:
+ description:
+ - the state of the project.
+ C(query) will only gather facts,
+ C(present) will create the project I(root) folder, and in it the I(releases) and I(shared) folders,
+ C(finalize) will remove the unfinished_filename file, create a symlink to the newly
+ deployed release and optionally clean old releases,
+ C(clean) will remove failed & old releases,
+ C(absent) will remove the project folder (synonymous to the M(file) module with C(state=absent))
+ choices: [ present, finalize, absent, clean, query ]
+ default: present
+
+ release:
+ description:
+ - the release version that is being deployed. Defaults to a timestamp format %Y%m%d%H%M%S (i.e. '20141119223359').
+ This parameter is optional during C(state=present), but needs to be set explicitly for C(state=finalize).
+ You can use the generated fact C(release={{ deploy_helper.new_release }}).
+
+ releases_path:
+ description:
+ - the name of the folder that will hold the releases. This can be relative to C(path) or absolute.
+ Returned in the C(deploy_helper.releases_path) fact.
+ default: releases
+
+ shared_path:
+ description:
+ - the name of the folder that will hold the shared resources. This can be relative to C(path) or absolute.
+ If this is set to an empty string, no shared folder will be created.
+ Returned in the C(deploy_helper.shared_path) fact.
+ default: shared
+
+ current_path:
+ description:
+ - the name of the symlink that is created when the deploy is finalized. Used in C(finalize) and C(clean).
+ Returned in the C(deploy_helper.current_path) fact.
+ default: current
+
+ unfinished_filename:
+ description:
+ - the name of the file that indicates a deploy has not finished. All folders in the releases_path that
+ contain this file will be deleted on C(state=finalize) with clean=True, or C(state=clean). This file is
+ automatically deleted from the I(new_release_path) during C(state=finalize).
+ default: DEPLOY_UNFINISHED
+
+ clean:
+ description:
+ - Whether to run the clean procedure in case of C(state=finalize).
+ type: bool
+ default: 'yes'
+
+ keep_releases:
+ description:
+ - the number of old releases to keep when cleaning. Used in C(finalize) and C(clean). Any unfinished builds
+ will be deleted first, so only correct releases will count. The current version will not count.
+ default: 5
+
+notes:
+ - Facts are only returned for C(state=query) and C(state=present). If you use both, you should pass any overridden
+ parameters to both calls, otherwise the second call will overwrite the facts of the first one.
+ - When using C(state=clean), the releases are ordered by I(creation date). You should be able to switch to a
+ new naming strategy without problems.
+ - Because of the default behaviour of generating the I(new_release) fact, this module will not be idempotent
+ unless you pass your own release name with C(release). Due to the nature of deploying software, this should not
+ be much of a problem.
+'''
+
+EXAMPLES = '''
+
+# General explanation, starting with an example folder structure for a project:
+
+# root:
+# releases:
+# - 20140415234508
+# - 20140415235146
+# - 20140416082818
+#
+# shared:
+# - sessions
+# - uploads
+#
+# current: releases/20140416082818
+
+
+# The 'releases' folder holds all the available releases. A release is a complete build of the application being
+# deployed. This can be a clone of a repository for example, or a sync of a local folder on your filesystem.
+# Having timestamped folders is one way of having distinct releases, but you could choose your own strategy like
+# git tags or commit hashes.
+#
+# During a deploy, a new folder should be created in the releases folder and any build steps required should be
+# performed. Once the new build is ready, the deploy procedure is 'finalized' by replacing the 'current' symlink
+# with a link to this build.
+#
+# The 'shared' folder holds any resource that is shared between releases. Examples of this are web-server
+# session files, or files uploaded by users of your application. It's quite common to have symlinks from a release
+# folder pointing to a shared/subfolder, and creating these links would be automated as part of the build steps.
+#
+# The 'current' symlink points to one of the releases. Probably the latest one, unless a deploy is in progress.
+# The web-server's root for the project will go through this symlink, so the 'downtime' when switching to a new
+# release is reduced to the time it takes to switch the link.
+#
+# To distinguish between successful builds and unfinished ones, a file can be placed in the folder of the release
+# that is currently in progress. The existence of this file will mark it as unfinished, and allow an automated
+# procedure to remove it during cleanup.
+
+
+# Typical usage
+- name: Initialize the deploy root and gather facts
+ deploy_helper:
+ path: /path/to/root
+- name: Clone the project to the new release folder
+ git:
+ repo: git://foosball.example.org/path/to/repo.git
+ dest: '{{ deploy_helper.new_release_path }}'
+ version: v1.1.1
+- name: Add an unfinished file, to allow cleanup on successful finalize
+ file:
+ path: '{{ deploy_helper.new_release_path }}/{{ deploy_helper.unfinished_filename }}'
+ state: touch
+- name: Perform some build steps, like running your dependency manager for example
+ composer:
+ command: install
+ working_dir: '{{ deploy_helper.new_release_path }}'
+- name: Create some folders in the shared folder
+ file:
+ path: '{{ deploy_helper.shared_path }}/{{ item }}'
+ state: directory
+ with_items:
+ - sessions
+ - uploads
+- name: Add symlinks from the new release to the shared folder
+ file:
+ path: '{{ deploy_helper.new_release_path }}/{{ item.path }}'
+ src: '{{ deploy_helper.shared_path }}/{{ item.src }}'
+ state: link
+ with_items:
+ - path: app/sessions
+ src: sessions
+ - path: web/uploads
+ src: uploads
+- name: Finalize the deploy, removing the unfinished file and switching the symlink
+ deploy_helper:
+ path: /path/to/root
+ release: '{{ deploy_helper.new_release }}'
+ state: finalize
+
+# Retrieving facts before running a deploy
+- name: Run 'state=query' to gather facts without changing anything
+ deploy_helper:
+ path: /path/to/root
+ state: query
+# Remember to set the 'release' parameter when you actually call 'state=present' later
+- name: Initialize the deploy root
+ deploy_helper:
+ path: /path/to/root
+ release: '{{ deploy_helper.new_release }}'
+ state: present
+
+# all paths can be absolute or relative (to the 'path' parameter)
+- deploy_helper:
+ path: /path/to/root
+ releases_path: /var/www/project/releases
+ shared_path: /var/www/shared
+ current_path: /var/www/active
+
+# Using your own naming strategy for releases (a version tag in this case):
+- deploy_helper:
+ path: /path/to/root
+ release: v1.1.1
+ state: present
+- deploy_helper:
+ path: /path/to/root
+ release: '{{ deploy_helper.new_release }}'
+ state: finalize
+
+# Using a different unfinished_filename:
+- deploy_helper:
+ path: /path/to/root
+ unfinished_filename: README.md
+ release: '{{ deploy_helper.new_release }}'
+ state: finalize
+
+# Postponing the cleanup of older builds:
+- deploy_helper:
+ path: /path/to/root
+ release: '{{ deploy_helper.new_release }}'
+ state: finalize
+ clean: False
+- deploy_helper:
+ path: /path/to/root
+ state: clean
+# Or running the cleanup ahead of the new deploy
+- deploy_helper:
+ path: /path/to/root
+ state: clean
+- deploy_helper:
+ path: /path/to/root
+ state: present
+
+# Keeping more old releases:
+- deploy_helper:
+ path: /path/to/root
+ release: '{{ deploy_helper.new_release }}'
+ state: finalize
+ keep_releases: 10
+# Or, if you use 'clean=false' on finalize:
+- deploy_helper:
+ path: /path/to/root
+ state: clean
+ keep_releases: 10
+
+# Removing the entire project root folder
+- deploy_helper:
+ path: /path/to/root
+ state: absent
+
+# Debugging the facts returned by the module
+- deploy_helper:
+ path: /path/to/root
+- debug:
+ var: deploy_helper
+'''
+import os
+import shutil
+import time
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+
+class DeployHelper(object):
+
+ def __init__(self, module):
+ self.module = module
+ self.file_args = module.load_file_common_arguments(module.params)
+
+ self.clean = module.params['clean']
+ self.current_path = module.params['current_path']
+ self.keep_releases = module.params['keep_releases']
+ self.path = module.params['path']
+ self.release = module.params['release']
+ self.releases_path = module.params['releases_path']
+ self.shared_path = module.params['shared_path']
+ self.state = module.params['state']
+ self.unfinished_filename = module.params['unfinished_filename']
+
+ def gather_facts(self):
+ current_path = os.path.join(self.path, self.current_path)
+ releases_path = os.path.join(self.path, self.releases_path)
+ if self.shared_path:
+ shared_path = os.path.join(self.path, self.shared_path)
+ else:
+ shared_path = None
+
+ previous_release, previous_release_path = self._get_last_release(current_path)
+
+ if not self.release and (self.state == 'query' or self.state == 'present'):
+ self.release = time.strftime("%Y%m%d%H%M%S")
+
+ if self.release:
+ new_release_path = os.path.join(releases_path, self.release)
+ else:
+ new_release_path = None
+
+ return {
+ 'project_path': self.path,
+ 'current_path': current_path,
+ 'releases_path': releases_path,
+ 'shared_path': shared_path,
+ 'previous_release': previous_release,
+ 'previous_release_path': previous_release_path,
+ 'new_release': self.release,
+ 'new_release_path': new_release_path,
+ 'unfinished_filename': self.unfinished_filename
+ }
+
+ def delete_path(self, path):
+ if not os.path.lexists(path):
+ return False
+
+ if not os.path.isdir(path):
+ self.module.fail_json(msg="%s exists but is not a directory" % path)
+
+ if not self.module.check_mode:
+ try:
+ shutil.rmtree(path, ignore_errors=False)
+ except Exception as e:
+ self.module.fail_json(msg="rmtree failed: %s" % to_native(e), exception=traceback.format_exc())
+
+ return True
+
+ def create_path(self, path):
+ changed = False
+
+ if not os.path.lexists(path):
+ changed = True
+ if not self.module.check_mode:
+ os.makedirs(path)
+
+ elif not os.path.isdir(path):
+ self.module.fail_json(msg="%s exists but is not a directory" % path)
+
+ changed += self.module.set_directory_attributes_if_different(self._get_file_args(path), changed)
+
+ return changed
+
+ def check_link(self, path):
+ if os.path.lexists(path):
+ if not os.path.islink(path):
+ self.module.fail_json(msg="%s exists but is not a symbolic link" % path)
+
+ def create_link(self, source, link_name):
+ changed = False
+
+ if os.path.islink(link_name):
+ norm_link = os.path.normpath(os.path.realpath(link_name))
+ norm_source = os.path.normpath(os.path.realpath(source))
+ if norm_link == norm_source:
+ changed = False
+ else:
+ changed = True
+ if not self.module.check_mode:
+ if not os.path.lexists(source):
+ self.module.fail_json(msg="the symlink target %s doesn't exists" % source)
+ tmp_link_name = link_name + '.' + self.unfinished_filename
+ if os.path.islink(tmp_link_name):
+ os.unlink(tmp_link_name)
+ os.symlink(source, tmp_link_name)
+ os.rename(tmp_link_name, link_name)
+ else:
+ changed = True
+ if not self.module.check_mode:
+ os.symlink(source, link_name)
+
+ return changed
+
+ def remove_unfinished_file(self, new_release_path):
+ changed = False
+ unfinished_file_path = os.path.join(new_release_path, self.unfinished_filename)
+ if os.path.lexists(unfinished_file_path):
+ changed = True
+ if not self.module.check_mode:
+ os.remove(unfinished_file_path)
+
+ return changed
+
+ def remove_unfinished_builds(self, releases_path):
+ changes = 0
+
+ for release in os.listdir(releases_path):
+ if os.path.isfile(os.path.join(releases_path, release, self.unfinished_filename)):
+ if self.module.check_mode:
+ changes += 1
+ else:
+ changes += self.delete_path(os.path.join(releases_path, release))
+
+ return changes
+
+ def remove_unfinished_link(self, path):
+ changed = False
+
+ tmp_link_name = os.path.join(path, self.release + '.' + self.unfinished_filename)
+ if not self.module.check_mode and os.path.exists(tmp_link_name):
+ changed = True
+ os.remove(tmp_link_name)
+
+ return changed
+
+ def cleanup(self, releases_path, reserve_version):
+ changes = 0
+
+ if os.path.lexists(releases_path):
+ releases = [f for f in os.listdir(releases_path) if os.path.isdir(os.path.join(releases_path, f))]
+ try:
+ releases.remove(reserve_version)
+ except ValueError:
+ pass
+
+ if not self.module.check_mode:
+ releases.sort(key=lambda x: os.path.getctime(os.path.join(releases_path, x)), reverse=True)
+ for release in releases[self.keep_releases:]:
+ changes += self.delete_path(os.path.join(releases_path, release))
+ elif len(releases) > self.keep_releases:
+ changes += (len(releases) - self.keep_releases)
+
+ return changes
+
+ def _get_file_args(self, path):
+ file_args = self.file_args.copy()
+ file_args['path'] = path
+ return file_args
+
+ def _get_last_release(self, current_path):
+ previous_release = None
+ previous_release_path = None
+
+ if os.path.lexists(current_path):
+ previous_release_path = os.path.realpath(current_path)
+ previous_release = os.path.basename(previous_release_path)
+
+ return previous_release, previous_release_path
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ path=dict(aliases=['dest'], required=True, type='path'),
+ release=dict(required=False, type='str', default=None),
+ releases_path=dict(required=False, type='str', default='releases'),
+ shared_path=dict(required=False, type='path', default='shared'),
+ current_path=dict(required=False, type='path', default='current'),
+ keep_releases=dict(required=False, type='int', default=5),
+ clean=dict(required=False, type='bool', default=True),
+ unfinished_filename=dict(required=False, type='str', default='DEPLOY_UNFINISHED'),
+ state=dict(required=False, choices=['present', 'absent', 'clean', 'finalize', 'query'], default='present')
+ ),
+ add_file_common_args=True,
+ supports_check_mode=True
+ )
+
+ deploy_helper = DeployHelper(module)
+ facts = deploy_helper.gather_facts()
+
+ result = {
+ 'state': deploy_helper.state
+ }
+
+ changes = 0
+
+ if deploy_helper.state == 'query':
+ result['ansible_facts'] = {'deploy_helper': facts}
+
+ elif deploy_helper.state == 'present':
+ deploy_helper.check_link(facts['current_path'])
+ changes += deploy_helper.create_path(facts['project_path'])
+ changes += deploy_helper.create_path(facts['releases_path'])
+ if deploy_helper.shared_path:
+ changes += deploy_helper.create_path(facts['shared_path'])
+
+ result['ansible_facts'] = {'deploy_helper': facts}
+
+ elif deploy_helper.state == 'finalize':
+ if not deploy_helper.release:
+ module.fail_json(msg="'release' is a required parameter for state=finalize (try the 'deploy_helper.new_release' fact)")
+ if deploy_helper.keep_releases <= 0:
+ module.fail_json(msg="'keep_releases' should be at least 1")
+
+ changes += deploy_helper.remove_unfinished_file(facts['new_release_path'])
+ changes += deploy_helper.create_link(facts['new_release_path'], facts['current_path'])
+ if deploy_helper.clean:
+ changes += deploy_helper.remove_unfinished_link(facts['project_path'])
+ changes += deploy_helper.remove_unfinished_builds(facts['releases_path'])
+ changes += deploy_helper.cleanup(facts['releases_path'], facts['new_release'])
+
+ elif deploy_helper.state == 'clean':
+ changes += deploy_helper.remove_unfinished_link(facts['project_path'])
+ changes += deploy_helper.remove_unfinished_builds(facts['releases_path'])
+ changes += deploy_helper.cleanup(facts['releases_path'], facts['new_release'])
+
+ elif deploy_helper.state == 'absent':
+ # destroy the facts
+ result['ansible_facts'] = {'deploy_helper': []}
+ changes += deploy_helper.delete_path(facts['project_path'])
+
+ if changes > 0:
+ result['changed'] = True
+ else:
+ result['changed'] = False
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/docker_swarm.py b/test/support/integration/plugins/modules/docker_swarm.py
new file mode 100644
index 0000000000..4fd4c875c4
--- /dev/null
+++ b/test/support/integration/plugins/modules/docker_swarm.py
@@ -0,0 +1,681 @@
+#!/usr/bin/python
+
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = '''
+---
+module: docker_swarm
+short_description: Manage Swarm cluster
+version_added: "2.7"
+description:
+ - Create a new Swarm cluster.
+ - Add/Remove nodes or managers to an existing cluster.
+options:
+ advertise_addr:
+ description:
+ - Externally reachable address advertised to other nodes.
+ - This can either be an address/port combination
+ in the form C(192.168.1.1:4567), or an interface followed by a
+ port number, like C(eth0:4567).
+ - If the port number is omitted,
+ the port number from the listen address is used.
+ - If I(advertise_addr) is not specified, it will be automatically
+ detected when possible.
+ - Only used when swarm is initialised or joined. Because of this it's not
+ considered for idempotency checking.
+ type: str
+ default_addr_pool:
+ description:
+ - Default address pool in CIDR format.
+ - Only used when swarm is initialised. Because of this it's not considered
+ for idempotency checking.
+ - Requires API version >= 1.39.
+ type: list
+ elements: str
+ version_added: "2.8"
+ subnet_size:
+ description:
+ - Default address pool subnet mask length.
+ - Only used when swarm is initialised. Because of this it's not considered
+ for idempotency checking.
+ - Requires API version >= 1.39.
+ type: int
+ version_added: "2.8"
+ listen_addr:
+ description:
+ - Listen address used for inter-manager communication.
+ - This can either be an address/port combination in the form
+ C(192.168.1.1:4567), or an interface followed by a port number,
+ like C(eth0:4567).
+ - If the port number is omitted, the default swarm listening port
+ is used.
+ - Only used when swarm is initialised or joined. Because of this it's not
+ considered for idempotency checking.
+ type: str
+ default: 0.0.0.0:2377
+ force:
+ description:
+ - Use with state C(present) to force creating a new Swarm, even if already part of one.
+ - Use with state C(absent) to Leave the swarm even if this node is a manager.
+ type: bool
+ default: no
+ state:
+ description:
+ - Set to C(present), to create/update a new cluster.
+ - Set to C(join), to join an existing cluster.
+ - Set to C(absent), to leave an existing cluster.
+ - Set to C(remove), to remove an absent node from the cluster.
+ Note that removing requires Docker SDK for Python >= 2.4.0.
+ - Set to C(inspect) to display swarm informations.
+ type: str
+ default: present
+ choices:
+ - present
+ - join
+ - absent
+ - remove
+ - inspect
+ node_id:
+ description:
+ - Swarm id of the node to remove.
+ - Used with I(state=remove).
+ type: str
+ join_token:
+ description:
+ - Swarm token used to join a swarm cluster.
+ - Used with I(state=join).
+ type: str
+ remote_addrs:
+ description:
+ - Remote address of one or more manager nodes of an existing Swarm to connect to.
+ - Used with I(state=join).
+ type: list
+ elements: str
+ task_history_retention_limit:
+ description:
+ - Maximum number of tasks history stored.
+ - Docker default value is C(5).
+ type: int
+ snapshot_interval:
+ description:
+ - Number of logs entries between snapshot.
+ - Docker default value is C(10000).
+ type: int
+ keep_old_snapshots:
+ description:
+ - Number of snapshots to keep beyond the current snapshot.
+ - Docker default value is C(0).
+ type: int
+ log_entries_for_slow_followers:
+ description:
+ - Number of log entries to keep around to sync up slow followers after a snapshot is created.
+ type: int
+ heartbeat_tick:
+ description:
+ - Amount of ticks (in seconds) between each heartbeat.
+ - Docker default value is C(1s).
+ type: int
+ election_tick:
+ description:
+ - Amount of ticks (in seconds) needed without a leader to trigger a new election.
+ - Docker default value is C(10s).
+ type: int
+ dispatcher_heartbeat_period:
+ description:
+ - The delay for an agent to send a heartbeat to the dispatcher.
+ - Docker default value is C(5s).
+ type: int
+ node_cert_expiry:
+ description:
+ - Automatic expiry for nodes certificates.
+ - Docker default value is C(3months).
+ type: int
+ name:
+ description:
+ - The name of the swarm.
+ type: str
+ labels:
+ description:
+ - User-defined key/value metadata.
+ - Label operations in this module apply to the docker swarm cluster.
+ Use M(docker_node) module to add/modify/remove swarm node labels.
+ - Requires API version >= 1.32.
+ type: dict
+ signing_ca_cert:
+ description:
+ - The desired signing CA certificate for all swarm node TLS leaf certificates, in PEM format.
+ - This must not be a path to a certificate, but the contents of the certificate.
+ - Requires API version >= 1.30.
+ type: str
+ signing_ca_key:
+ description:
+ - The desired signing CA key for all swarm node TLS leaf certificates, in PEM format.
+ - This must not be a path to a key, but the contents of the key.
+ - Requires API version >= 1.30.
+ type: str
+ ca_force_rotate:
+ description:
+ - An integer whose purpose is to force swarm to generate a new signing CA certificate and key,
+ if none have been specified.
+ - Docker default value is C(0).
+ - Requires API version >= 1.30.
+ type: int
+ autolock_managers:
+ description:
+ - If set, generate a key and use it to lock data stored on the managers.
+ - Docker default value is C(no).
+ - M(docker_swarm_info) can be used to retrieve the unlock key.
+ type: bool
+ rotate_worker_token:
+ description: Rotate the worker join token.
+ type: bool
+ default: no
+ rotate_manager_token:
+ description: Rotate the manager join token.
+ type: bool
+ default: no
+extends_documentation_fragment:
+ - docker
+ - docker.docker_py_1_documentation
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.10.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
+ - Docker API >= 1.25
+author:
+ - Thierry Bouvet (@tbouvet)
+ - Piotr Wojciechowski (@WojciechowskiPiotr)
+'''
+
+EXAMPLES = '''
+
+- name: Init a new swarm with default parameters
+ docker_swarm:
+ state: present
+
+- name: Update swarm configuration
+ docker_swarm:
+ state: present
+ election_tick: 5
+
+- name: Add nodes
+ docker_swarm:
+ state: join
+ advertise_addr: 192.168.1.2
+ join_token: SWMTKN-1--xxxxx
+ remote_addrs: [ '192.168.1.1:2377' ]
+
+- name: Leave swarm for a node
+ docker_swarm:
+ state: absent
+
+- name: Remove a swarm manager
+ docker_swarm:
+ state: absent
+ force: true
+
+- name: Remove node from swarm
+ docker_swarm:
+ state: remove
+ node_id: mynode
+
+- name: Inspect swarm
+ docker_swarm:
+ state: inspect
+ register: swarm_info
+'''
+
+RETURN = '''
+swarm_facts:
+ description: Informations about swarm.
+ returned: success
+ type: dict
+ contains:
+ JoinTokens:
+ description: Tokens to connect to the Swarm.
+ returned: success
+ type: dict
+ contains:
+ Worker:
+ description: Token to create a new *worker* node
+ returned: success
+ type: str
+ example: SWMTKN-1--xxxxx
+ Manager:
+ description: Token to create a new *manager* node
+ returned: success
+ type: str
+ example: SWMTKN-1--xxxxx
+ UnlockKey:
+ description: The swarm unlock-key if I(autolock_managers) is C(true).
+ returned: on success if I(autolock_managers) is C(true)
+ and swarm is initialised, or if I(autolock_managers) has changed.
+ type: str
+ example: SWMKEY-1-xxx
+
+actions:
+ description: Provides the actions done on the swarm.
+ returned: when action failed.
+ type: list
+ elements: str
+ example: "['This cluster is already a swarm cluster']"
+
+'''
+
+import json
+import traceback
+
+try:
+ from docker.errors import DockerException, APIError
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+from ansible.module_utils.docker.common import (
+ DockerBaseClass,
+ DifferenceTracker,
+ RequestException,
+)
+
+from ansible.module_utils.docker.swarm import AnsibleDockerSwarmClient
+
+from ansible.module_utils._text import to_native
+
+
+class TaskParameters(DockerBaseClass):
+ def __init__(self):
+ super(TaskParameters, self).__init__()
+
+ self.advertise_addr = None
+ self.listen_addr = None
+ self.remote_addrs = None
+ self.join_token = None
+
+ # Spec
+ self.snapshot_interval = None
+ self.task_history_retention_limit = None
+ self.keep_old_snapshots = None
+ self.log_entries_for_slow_followers = None
+ self.heartbeat_tick = None
+ self.election_tick = None
+ self.dispatcher_heartbeat_period = None
+ self.node_cert_expiry = None
+ self.name = None
+ self.labels = None
+ self.log_driver = None
+ self.signing_ca_cert = None
+ self.signing_ca_key = None
+ self.ca_force_rotate = None
+ self.autolock_managers = None
+ self.rotate_worker_token = None
+ self.rotate_manager_token = None
+ self.default_addr_pool = None
+ self.subnet_size = None
+
+ @staticmethod
+ def from_ansible_params(client):
+ result = TaskParameters()
+ for key, value in client.module.params.items():
+ if key in result.__dict__:
+ setattr(result, key, value)
+
+ result.update_parameters(client)
+ return result
+
+ def update_from_swarm_info(self, swarm_info):
+ spec = swarm_info['Spec']
+
+ ca_config = spec.get('CAConfig') or dict()
+ if self.node_cert_expiry is None:
+ self.node_cert_expiry = ca_config.get('NodeCertExpiry')
+ if self.ca_force_rotate is None:
+ self.ca_force_rotate = ca_config.get('ForceRotate')
+
+ dispatcher = spec.get('Dispatcher') or dict()
+ if self.dispatcher_heartbeat_period is None:
+ self.dispatcher_heartbeat_period = dispatcher.get('HeartbeatPeriod')
+
+ raft = spec.get('Raft') or dict()
+ if self.snapshot_interval is None:
+ self.snapshot_interval = raft.get('SnapshotInterval')
+ if self.keep_old_snapshots is None:
+ self.keep_old_snapshots = raft.get('KeepOldSnapshots')
+ if self.heartbeat_tick is None:
+ self.heartbeat_tick = raft.get('HeartbeatTick')
+ if self.log_entries_for_slow_followers is None:
+ self.log_entries_for_slow_followers = raft.get('LogEntriesForSlowFollowers')
+ if self.election_tick is None:
+ self.election_tick = raft.get('ElectionTick')
+
+ orchestration = spec.get('Orchestration') or dict()
+ if self.task_history_retention_limit is None:
+ self.task_history_retention_limit = orchestration.get('TaskHistoryRetentionLimit')
+
+ encryption_config = spec.get('EncryptionConfig') or dict()
+ if self.autolock_managers is None:
+ self.autolock_managers = encryption_config.get('AutoLockManagers')
+
+ if self.name is None:
+ self.name = spec['Name']
+
+ if self.labels is None:
+ self.labels = spec.get('Labels') or {}
+
+ if 'LogDriver' in spec['TaskDefaults']:
+ self.log_driver = spec['TaskDefaults']['LogDriver']
+
+ def update_parameters(self, client):
+ assign = dict(
+ snapshot_interval='snapshot_interval',
+ task_history_retention_limit='task_history_retention_limit',
+ keep_old_snapshots='keep_old_snapshots',
+ log_entries_for_slow_followers='log_entries_for_slow_followers',
+ heartbeat_tick='heartbeat_tick',
+ election_tick='election_tick',
+ dispatcher_heartbeat_period='dispatcher_heartbeat_period',
+ node_cert_expiry='node_cert_expiry',
+ name='name',
+ labels='labels',
+ signing_ca_cert='signing_ca_cert',
+ signing_ca_key='signing_ca_key',
+ ca_force_rotate='ca_force_rotate',
+ autolock_managers='autolock_managers',
+ log_driver='log_driver',
+ )
+ params = dict()
+ for dest, source in assign.items():
+ if not client.option_minimal_versions[source]['supported']:
+ continue
+ value = getattr(self, source)
+ if value is not None:
+ params[dest] = value
+ self.spec = client.create_swarm_spec(**params)
+
+ def compare_to_active(self, other, client, differences):
+ for k in self.__dict__:
+ if k in ('advertise_addr', 'listen_addr', 'remote_addrs', 'join_token',
+ 'rotate_worker_token', 'rotate_manager_token', 'spec',
+ 'default_addr_pool', 'subnet_size'):
+ continue
+ if not client.option_minimal_versions[k]['supported']:
+ continue
+ value = getattr(self, k)
+ if value is None:
+ continue
+ other_value = getattr(other, k)
+ if value != other_value:
+ differences.add(k, parameter=value, active=other_value)
+ if self.rotate_worker_token:
+ differences.add('rotate_worker_token', parameter=True, active=False)
+ if self.rotate_manager_token:
+ differences.add('rotate_manager_token', parameter=True, active=False)
+ return differences
+
+
+class SwarmManager(DockerBaseClass):
+
+ def __init__(self, client, results):
+
+ super(SwarmManager, self).__init__()
+
+ self.client = client
+ self.results = results
+ self.check_mode = self.client.check_mode
+ self.swarm_info = {}
+
+ self.state = client.module.params['state']
+ self.force = client.module.params['force']
+ self.node_id = client.module.params['node_id']
+
+ self.differences = DifferenceTracker()
+ self.parameters = TaskParameters.from_ansible_params(client)
+
+ self.created = False
+
+ def __call__(self):
+ choice_map = {
+ "present": self.init_swarm,
+ "join": self.join,
+ "absent": self.leave,
+ "remove": self.remove,
+ "inspect": self.inspect_swarm
+ }
+
+ if self.state == 'inspect':
+ self.client.module.deprecate(
+ "The 'inspect' state is deprecated, please use 'docker_swarm_info' to inspect swarm cluster",
+ version='2.12')
+
+ choice_map.get(self.state)()
+
+ if self.client.module._diff or self.parameters.debug:
+ diff = dict()
+ diff['before'], diff['after'] = self.differences.get_before_after()
+ self.results['diff'] = diff
+
+ def inspect_swarm(self):
+ try:
+ data = self.client.inspect_swarm()
+ json_str = json.dumps(data, ensure_ascii=False)
+ self.swarm_info = json.loads(json_str)
+
+ self.results['changed'] = False
+ self.results['swarm_facts'] = self.swarm_info
+
+ unlock_key = self.get_unlock_key()
+ self.swarm_info.update(unlock_key)
+ except APIError:
+ return
+
+ def get_unlock_key(self):
+ default = {'UnlockKey': None}
+ if not self.has_swarm_lock_changed():
+ return default
+ try:
+ return self.client.get_unlock_key() or default
+ except APIError:
+ return default
+
+ def has_swarm_lock_changed(self):
+ return self.parameters.autolock_managers and (
+ self.created or self.differences.has_difference_for('autolock_managers')
+ )
+
+ def init_swarm(self):
+ if not self.force and self.client.check_if_swarm_manager():
+ self.__update_swarm()
+ return
+
+ if not self.check_mode:
+ init_arguments = {
+ 'advertise_addr': self.parameters.advertise_addr,
+ 'listen_addr': self.parameters.listen_addr,
+ 'force_new_cluster': self.force,
+ 'swarm_spec': self.parameters.spec,
+ }
+ if self.parameters.default_addr_pool is not None:
+ init_arguments['default_addr_pool'] = self.parameters.default_addr_pool
+ if self.parameters.subnet_size is not None:
+ init_arguments['subnet_size'] = self.parameters.subnet_size
+ try:
+ self.client.init_swarm(**init_arguments)
+ except APIError as exc:
+ self.client.fail("Can not create a new Swarm Cluster: %s" % to_native(exc))
+
+ if not self.client.check_if_swarm_manager():
+ if not self.check_mode:
+ self.client.fail("Swarm not created or other error!")
+
+ self.created = True
+ self.inspect_swarm()
+ self.results['actions'].append("New Swarm cluster created: %s" % (self.swarm_info.get('ID')))
+ self.differences.add('state', parameter='present', active='absent')
+ self.results['changed'] = True
+ self.results['swarm_facts'] = {
+ 'JoinTokens': self.swarm_info.get('JoinTokens'),
+ 'UnlockKey': self.swarm_info.get('UnlockKey')
+ }
+
+ def __update_swarm(self):
+ try:
+ self.inspect_swarm()
+ version = self.swarm_info['Version']['Index']
+ self.parameters.update_from_swarm_info(self.swarm_info)
+ old_parameters = TaskParameters()
+ old_parameters.update_from_swarm_info(self.swarm_info)
+ self.parameters.compare_to_active(old_parameters, self.client, self.differences)
+ if self.differences.empty:
+ self.results['actions'].append("No modification")
+ self.results['changed'] = False
+ return
+ update_parameters = TaskParameters.from_ansible_params(self.client)
+ update_parameters.update_parameters(self.client)
+ if not self.check_mode:
+ self.client.update_swarm(
+ version=version, swarm_spec=update_parameters.spec,
+ rotate_worker_token=self.parameters.rotate_worker_token,
+ rotate_manager_token=self.parameters.rotate_manager_token)
+ except APIError as exc:
+ self.client.fail("Can not update a Swarm Cluster: %s" % to_native(exc))
+ return
+
+ self.inspect_swarm()
+ self.results['actions'].append("Swarm cluster updated")
+ self.results['changed'] = True
+
+ def join(self):
+ if self.client.check_if_swarm_node():
+ self.results['actions'].append("This node is already part of a swarm.")
+ return
+ if not self.check_mode:
+ try:
+ self.client.join_swarm(
+ remote_addrs=self.parameters.remote_addrs, join_token=self.parameters.join_token,
+ listen_addr=self.parameters.listen_addr, advertise_addr=self.parameters.advertise_addr)
+ except APIError as exc:
+ self.client.fail("Can not join the Swarm Cluster: %s" % to_native(exc))
+ self.results['actions'].append("New node is added to swarm cluster")
+ self.differences.add('joined', parameter=True, active=False)
+ self.results['changed'] = True
+
+ def leave(self):
+ if not self.client.check_if_swarm_node():
+ self.results['actions'].append("This node is not part of a swarm.")
+ return
+ if not self.check_mode:
+ try:
+ self.client.leave_swarm(force=self.force)
+ except APIError as exc:
+ self.client.fail("This node can not leave the Swarm Cluster: %s" % to_native(exc))
+ self.results['actions'].append("Node has left the swarm cluster")
+ self.differences.add('joined', parameter='absent', active='present')
+ self.results['changed'] = True
+
+ def remove(self):
+ if not self.client.check_if_swarm_manager():
+ self.client.fail("This node is not a manager.")
+
+ try:
+ status_down = self.client.check_if_swarm_node_is_down(node_id=self.node_id, repeat_check=5)
+ except APIError:
+ return
+
+ if not status_down:
+ self.client.fail("Can not remove the node. The status node is ready and not down.")
+
+ if not self.check_mode:
+ try:
+ self.client.remove_node(node_id=self.node_id, force=self.force)
+ except APIError as exc:
+ self.client.fail("Can not remove the node from the Swarm Cluster: %s" % to_native(exc))
+ self.results['actions'].append("Node is removed from swarm cluster.")
+ self.differences.add('joined', parameter=False, active=True)
+ self.results['changed'] = True
+
+
+def _detect_remove_operation(client):
+ return client.module.params['state'] == 'remove'
+
+
+def main():
+ argument_spec = dict(
+ advertise_addr=dict(type='str'),
+ state=dict(type='str', default='present', choices=['present', 'join', 'absent', 'remove', 'inspect']),
+ force=dict(type='bool', default=False),
+ listen_addr=dict(type='str', default='0.0.0.0:2377'),
+ remote_addrs=dict(type='list', elements='str'),
+ join_token=dict(type='str'),
+ snapshot_interval=dict(type='int'),
+ task_history_retention_limit=dict(type='int'),
+ keep_old_snapshots=dict(type='int'),
+ log_entries_for_slow_followers=dict(type='int'),
+ heartbeat_tick=dict(type='int'),
+ election_tick=dict(type='int'),
+ dispatcher_heartbeat_period=dict(type='int'),
+ node_cert_expiry=dict(type='int'),
+ name=dict(type='str'),
+ labels=dict(type='dict'),
+ signing_ca_cert=dict(type='str'),
+ signing_ca_key=dict(type='str'),
+ ca_force_rotate=dict(type='int'),
+ autolock_managers=dict(type='bool'),
+ node_id=dict(type='str'),
+ rotate_worker_token=dict(type='bool', default=False),
+ rotate_manager_token=dict(type='bool', default=False),
+ default_addr_pool=dict(type='list', elements='str'),
+ subnet_size=dict(type='int'),
+ )
+
+ required_if = [
+ ('state', 'join', ['advertise_addr', 'remote_addrs', 'join_token']),
+ ('state', 'remove', ['node_id'])
+ ]
+
+ option_minimal_versions = dict(
+ labels=dict(docker_py_version='2.6.0', docker_api_version='1.32'),
+ signing_ca_cert=dict(docker_py_version='2.6.0', docker_api_version='1.30'),
+ signing_ca_key=dict(docker_py_version='2.6.0', docker_api_version='1.30'),
+ ca_force_rotate=dict(docker_py_version='2.6.0', docker_api_version='1.30'),
+ autolock_managers=dict(docker_py_version='2.6.0'),
+ log_driver=dict(docker_py_version='2.6.0'),
+ remove_operation=dict(
+ docker_py_version='2.4.0',
+ detect_usage=_detect_remove_operation,
+ usage_msg='remove swarm nodes'
+ ),
+ default_addr_pool=dict(docker_py_version='4.0.0', docker_api_version='1.39'),
+ subnet_size=dict(docker_py_version='4.0.0', docker_api_version='1.39'),
+ )
+
+ client = AnsibleDockerSwarmClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_if=required_if,
+ min_docker_version='1.10.0',
+ min_docker_api_version='1.25',
+ option_minimal_versions=option_minimal_versions,
+ )
+
+ try:
+ results = dict(
+ changed=False,
+ result='',
+ actions=[]
+ )
+
+ SwarmManager(client, results)()
+ client.module.exit_json(**results)
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/flatpak_remote.py b/test/support/integration/plugins/modules/flatpak_remote.py
new file mode 100644
index 0000000000..db208f1bd9
--- /dev/null
+++ b/test/support/integration/plugins/modules/flatpak_remote.py
@@ -0,0 +1,243 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017 John Kwiatkoski (@JayKayy) <jkwiat40@gmail.com>
+# Copyright: (c) 2018 Alexander Bethke (@oolongbrothers) <oolongbrothers@gmx.net>
+# Copyright: (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+
+# ATTENTION CONTRIBUTORS!
+#
+# TL;DR: Run this module's integration tests manually before opening a pull request
+#
+# Long explanation:
+# The integration tests for this module are currently NOT run on the Ansible project's continuous
+# delivery pipeline. So please: When you make changes to this module, make sure that you run the
+# included integration tests manually for both Python 2 and Python 3:
+#
+# Python 2:
+# ansible-test integration -v --docker fedora28 --docker-privileged --allow-unsupported --python 2.7 flatpak_remote
+# Python 3:
+# ansible-test integration -v --docker fedora28 --docker-privileged --allow-unsupported --python 3.6 flatpak_remote
+#
+# Because of external dependencies, the current integration tests are somewhat too slow and brittle
+# to be included right now. I have plans to rewrite the integration tests based on a local flatpak
+# repository so that they can be included into the normal CI pipeline.
+# //oolongbrothers
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+module: flatpak_remote
+version_added: '2.6'
+short_description: Manage flatpak repository remotes
+description:
+- Allows users to add or remove flatpak remotes.
+- The flatpak remotes concept is comparable to what is called repositories in other packaging
+ formats.
+- Currently, remote addition is only supported via I(flatpakrepo) file URLs.
+- Existing remotes will not be updated.
+- See the M(flatpak) module for managing flatpaks.
+author:
+- John Kwiatkoski (@JayKayy)
+- Alexander Bethke (@oolongbrothers)
+requirements:
+- flatpak
+options:
+ executable:
+ description:
+ - The path to the C(flatpak) executable to use.
+ - By default, this module looks for the C(flatpak) executable on the path.
+ default: flatpak
+ flatpakrepo_url:
+ description:
+ - The URL to the I(flatpakrepo) file representing the repository remote to add.
+ - When used with I(state=present), the flatpak remote specified under the I(flatpakrepo_url)
+ is added using the specified installation C(method).
+ - When used with I(state=absent), this is not required.
+ - Required when I(state=present).
+ method:
+ description:
+ - The installation method to use.
+ - Defines if the I(flatpak) is supposed to be installed globally for the whole C(system)
+ or only for the current C(user).
+ choices: [ system, user ]
+ default: system
+ name:
+ description:
+ - The desired name for the flatpak remote to be registered under on the managed host.
+ - When used with I(state=present), the remote will be added to the managed host under
+ the specified I(name).
+ - When used with I(state=absent) the remote with that name will be removed.
+ required: true
+ state:
+ description:
+ - Indicates the desired package state.
+ choices: [ absent, present ]
+ default: present
+'''
+
+EXAMPLES = r'''
+- name: Add the Gnome flatpak remote to the system installation
+ flatpak_remote:
+ name: gnome
+ state: present
+ flatpakrepo_url: https://sdk.gnome.org/gnome-apps.flatpakrepo
+
+- name: Add the flathub flatpak repository remote to the user installation
+ flatpak_remote:
+ name: flathub
+ state: present
+ flatpakrepo_url: https://dl.flathub.org/repo/flathub.flatpakrepo
+ method: user
+
+- name: Remove the Gnome flatpak remote from the user installation
+ flatpak_remote:
+ name: gnome
+ state: absent
+ method: user
+
+- name: Remove the flathub remote from the system installation
+ flatpak_remote:
+ name: flathub
+ state: absent
+'''
+
+RETURN = r'''
+command:
+ description: The exact flatpak command that was executed
+ returned: When a flatpak command has been executed
+ type: str
+ sample: "/usr/bin/flatpak remote-add --system flatpak-test https://dl.flathub.org/repo/flathub.flatpakrepo"
+msg:
+ description: Module error message
+ returned: failure
+ type: str
+ sample: "Executable '/usr/local/bin/flatpak' was not found on the system."
+rc:
+ description: Return code from flatpak binary
+ returned: When a flatpak command has been executed
+ type: int
+ sample: 0
+stderr:
+ description: Error output from flatpak binary
+ returned: When a flatpak command has been executed
+ type: str
+ sample: "error: GPG verification enabled, but no summary found (check that the configured URL in remote config is correct)\n"
+stdout:
+ description: Output from flatpak binary
+ returned: When a flatpak command has been executed
+ type: str
+ sample: "flathub\tFlathub\thttps://dl.flathub.org/repo/\t1\t\n"
+'''
+
+import subprocess
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_bytes, to_native
+
+
+def add_remote(module, binary, name, flatpakrepo_url, method):
+ """Add a new remote."""
+ global result
+ command = "{0} remote-add --{1} {2} {3}".format(
+ binary, method, name, flatpakrepo_url)
+ _flatpak_command(module, module.check_mode, command)
+ result['changed'] = True
+
+
+def remove_remote(module, binary, name, method):
+ """Remove an existing remote."""
+ global result
+ command = "{0} remote-delete --{1} --force {2} ".format(
+ binary, method, name)
+ _flatpak_command(module, module.check_mode, command)
+ result['changed'] = True
+
+
+def remote_exists(module, binary, name, method):
+ """Check if the remote exists."""
+ command = "{0} remote-list -d --{1}".format(binary, method)
+ # The query operation for the remote needs to be run even in check mode
+ output = _flatpak_command(module, False, command)
+ for line in output.splitlines():
+ listed_remote = line.split()
+ if len(listed_remote) == 0:
+ continue
+ if listed_remote[0] == to_native(name):
+ return True
+ return False
+
+
+def _flatpak_command(module, noop, command):
+ global result
+ if noop:
+ result['rc'] = 0
+ result['command'] = command
+ return ""
+
+ process = subprocess.Popen(
+ command.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ stdout_data, stderr_data = process.communicate()
+ result['rc'] = process.returncode
+ result['command'] = command
+ result['stdout'] = stdout_data
+ result['stderr'] = stderr_data
+ if result['rc'] != 0:
+ module.fail_json(msg="Failed to execute flatpak command", **result)
+ return to_native(stdout_data)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ flatpakrepo_url=dict(type='str'),
+ method=dict(type='str', default='system',
+ choices=['user', 'system']),
+ state=dict(type='str', default="present",
+ choices=['absent', 'present']),
+ executable=dict(type='str', default="flatpak")
+ ),
+ # This module supports check mode
+ supports_check_mode=True,
+ )
+
+ name = module.params['name']
+ flatpakrepo_url = module.params['flatpakrepo_url']
+ method = module.params['method']
+ state = module.params['state']
+ executable = module.params['executable']
+ binary = module.get_bin_path(executable, None)
+
+ if flatpakrepo_url is None:
+ flatpakrepo_url = ''
+
+ global result
+ result = dict(
+ changed=False
+ )
+
+ # If the binary was not found, fail the operation
+ if not binary:
+ module.fail_json(msg="Executable '%s' was not found on the system." % executable, **result)
+
+ remote_already_exists = remote_exists(module, binary, to_bytes(name), method)
+
+ if state == 'present' and not remote_already_exists:
+ add_remote(module, binary, name, flatpakrepo_url, method)
+ elif state == 'absent' and remote_already_exists:
+ remove_remote(module, binary, name, method)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/locale_gen.py b/test/support/integration/plugins/modules/locale_gen.py
new file mode 100644
index 0000000000..4968b834af
--- /dev/null
+++ b/test/support/integration/plugins/modules/locale_gen.py
@@ -0,0 +1,237 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = '''
+---
+module: locale_gen
+short_description: Creates or removes locales
+description:
+ - Manages locales by editing /etc/locale.gen and invoking locale-gen.
+version_added: "1.6"
+author:
+- Augustus Kling (@AugustusKling)
+options:
+ name:
+ description:
+ - Name and encoding of the locale, such as "en_GB.UTF-8".
+ required: true
+ state:
+ description:
+ - Whether the locale shall be present.
+ choices: [ absent, present ]
+ default: present
+'''
+
+EXAMPLES = '''
+- name: Ensure a locale exists
+ locale_gen:
+ name: de_CH.UTF-8
+ state: present
+'''
+
+import os
+import re
+from subprocess import Popen, PIPE, call
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+LOCALE_NORMALIZATION = {
+ ".utf8": ".UTF-8",
+ ".eucjp": ".EUC-JP",
+ ".iso885915": ".ISO-8859-15",
+ ".cp1251": ".CP1251",
+ ".koi8r": ".KOI8-R",
+ ".armscii8": ".ARMSCII-8",
+ ".euckr": ".EUC-KR",
+ ".gbk": ".GBK",
+ ".gb18030": ".GB18030",
+ ".euctw": ".EUC-TW",
+}
+
+
+# ===========================================
+# location module specific support methods.
+#
+
+def is_available(name, ubuntuMode):
+ """Check if the given locale is available on the system. This is done by
+ checking either :
+ * if the locale is present in /etc/locales.gen
+ * or if the locale is present in /usr/share/i18n/SUPPORTED"""
+ if ubuntuMode:
+ __regexp = r'^(?P<locale>\S+_\S+) (?P<charset>\S+)\s*$'
+ __locales_available = '/usr/share/i18n/SUPPORTED'
+ else:
+ __regexp = r'^#{0,1}\s*(?P<locale>\S+_\S+) (?P<charset>\S+)\s*$'
+ __locales_available = '/etc/locale.gen'
+
+ re_compiled = re.compile(__regexp)
+ fd = open(__locales_available, 'r')
+ for line in fd:
+ result = re_compiled.match(line)
+ if result and result.group('locale') == name:
+ return True
+ fd.close()
+ return False
+
+
+def is_present(name):
+ """Checks if the given locale is currently installed."""
+ output = Popen(["locale", "-a"], stdout=PIPE).communicate()[0]
+ output = to_native(output)
+ return any(fix_case(name) == fix_case(line) for line in output.splitlines())
+
+
+def fix_case(name):
+ """locale -a might return the encoding in either lower or upper case.
+ Passing through this function makes them uniform for comparisons."""
+ for s, r in LOCALE_NORMALIZATION.items():
+ name = name.replace(s, r)
+ return name
+
+
+def replace_line(existing_line, new_line):
+ """Replaces lines in /etc/locale.gen"""
+ try:
+ f = open("/etc/locale.gen", "r")
+ lines = [line.replace(existing_line, new_line) for line in f]
+ finally:
+ f.close()
+ try:
+ f = open("/etc/locale.gen", "w")
+ f.write("".join(lines))
+ finally:
+ f.close()
+
+
+def set_locale(name, enabled=True):
+ """ Sets the state of the locale. Defaults to enabled. """
+ search_string = r'#{0,1}\s*%s (?P<charset>.+)' % name
+ if enabled:
+ new_string = r'%s \g<charset>' % (name)
+ else:
+ new_string = r'# %s \g<charset>' % (name)
+ try:
+ f = open("/etc/locale.gen", "r")
+ lines = [re.sub(search_string, new_string, line) for line in f]
+ finally:
+ f.close()
+ try:
+ f = open("/etc/locale.gen", "w")
+ f.write("".join(lines))
+ finally:
+ f.close()
+
+
+def apply_change(targetState, name):
+ """Create or remove locale.
+
+ Keyword arguments:
+ targetState -- Desired state, either present or absent.
+ name -- Name including encoding such as de_CH.UTF-8.
+ """
+ if targetState == "present":
+ # Create locale.
+ set_locale(name, enabled=True)
+ else:
+ # Delete locale.
+ set_locale(name, enabled=False)
+
+ localeGenExitValue = call("locale-gen")
+ if localeGenExitValue != 0:
+ raise EnvironmentError(localeGenExitValue, "locale.gen failed to execute, it returned " + str(localeGenExitValue))
+
+
+def apply_change_ubuntu(targetState, name):
+ """Create or remove locale.
+
+ Keyword arguments:
+ targetState -- Desired state, either present or absent.
+ name -- Name including encoding such as de_CH.UTF-8.
+ """
+ if targetState == "present":
+ # Create locale.
+ # Ubuntu's patched locale-gen automatically adds the new locale to /var/lib/locales/supported.d/local
+ localeGenExitValue = call(["locale-gen", name])
+ else:
+ # Delete locale involves discarding the locale from /var/lib/locales/supported.d/local and regenerating all locales.
+ try:
+ f = open("/var/lib/locales/supported.d/local", "r")
+ content = f.readlines()
+ finally:
+ f.close()
+ try:
+ f = open("/var/lib/locales/supported.d/local", "w")
+ for line in content:
+ locale, charset = line.split(' ')
+ if locale != name:
+ f.write(line)
+ finally:
+ f.close()
+ # Purge locales and regenerate.
+ # Please provide a patch if you know how to avoid regenerating the locales to keep!
+ localeGenExitValue = call(["locale-gen", "--purge"])
+
+ if localeGenExitValue != 0:
+ raise EnvironmentError(localeGenExitValue, "locale.gen failed to execute, it returned " + str(localeGenExitValue))
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ ),
+ supports_check_mode=True,
+ )
+
+ name = module.params['name']
+ state = module.params['state']
+
+ if not os.path.exists("/etc/locale.gen"):
+ if os.path.exists("/var/lib/locales/supported.d/"):
+ # Ubuntu created its own system to manage locales.
+ ubuntuMode = True
+ else:
+ module.fail_json(msg="/etc/locale.gen and /var/lib/locales/supported.d/local are missing. Is the package \"locales\" installed?")
+ else:
+ # We found the common way to manage locales.
+ ubuntuMode = False
+
+ if not is_available(name, ubuntuMode):
+ module.fail_json(msg="The locale you've entered is not available "
+ "on your system.")
+
+ if is_present(name):
+ prev_state = "present"
+ else:
+ prev_state = "absent"
+ changed = (prev_state != state)
+
+ if module.check_mode:
+ module.exit_json(changed=changed)
+ else:
+ if changed:
+ try:
+ if ubuntuMode is False:
+ apply_change(state, name)
+ else:
+ apply_change_ubuntu(state, name)
+ except EnvironmentError as e:
+ module.fail_json(msg=to_native(e), exitValue=e.errno)
+
+ module.exit_json(name=name, changed=changed, msg="OK")
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/lvg.py b/test/support/integration/plugins/modules/lvg.py
new file mode 100644
index 0000000000..e2035f688d
--- /dev/null
+++ b/test/support/integration/plugins/modules/lvg.py
@@ -0,0 +1,295 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2013, Alexander Bulimov <lazywolf0@gmail.com>
+# Based on lvol module by Jeroen Hoekx <jeroen.hoekx@dsquare.be>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+author:
+- Alexander Bulimov (@abulimov)
+module: lvg
+short_description: Configure LVM volume groups
+description:
+ - This module creates, removes or resizes volume groups.
+version_added: "1.1"
+options:
+ vg:
+ description:
+ - The name of the volume group.
+ type: str
+ required: true
+ pvs:
+ description:
+ - List of comma-separated devices to use as physical devices in this volume group.
+ - Required when creating or resizing volume group.
+ - The module will take care of running pvcreate if needed.
+ type: list
+ pesize:
+ description:
+ - "The size of the physical extent. I(pesize) must be a power of 2 of at least 1 sector
+ (where the sector size is the largest sector size of the PVs currently used in the VG),
+ or at least 128KiB."
+ - Since Ansible 2.6, pesize can be optionally suffixed by a UNIT (k/K/m/M/g/G), default unit is megabyte.
+ type: str
+ default: "4"
+ pv_options:
+ description:
+ - Additional options to pass to C(pvcreate) when creating the volume group.
+ type: str
+ version_added: "2.4"
+ vg_options:
+ description:
+ - Additional options to pass to C(vgcreate) when creating the volume group.
+ type: str
+ version_added: "1.6"
+ state:
+ description:
+ - Control if the volume group exists.
+ type: str
+ choices: [ absent, present ]
+ default: present
+ force:
+ description:
+ - If C(yes), allows to remove volume group with logical volumes.
+ type: bool
+ default: no
+seealso:
+- module: filesystem
+- module: lvol
+- module: parted
+notes:
+ - This module does not modify PE size for already present volume group.
+'''
+
+EXAMPLES = r'''
+- name: Create a volume group on top of /dev/sda1 with physical extent size = 32MB
+ lvg:
+ vg: vg.services
+ pvs: /dev/sda1
+ pesize: 32
+
+- name: Create a volume group on top of /dev/sdb with physical extent size = 128KiB
+ lvg:
+ vg: vg.services
+ pvs: /dev/sdb
+ pesize: 128K
+
+# If, for example, we already have VG vg.services on top of /dev/sdb1,
+# this VG will be extended by /dev/sdc5. Or if vg.services was created on
+# top of /dev/sda5, we first extend it with /dev/sdb1 and /dev/sdc5,
+# and then reduce by /dev/sda5.
+- name: Create or resize a volume group on top of /dev/sdb1 and /dev/sdc5.
+ lvg:
+ vg: vg.services
+ pvs: /dev/sdb1,/dev/sdc5
+
+- name: Remove a volume group with name vg.services
+ lvg:
+ vg: vg.services
+ state: absent
+'''
+
+import itertools
+import os
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def parse_vgs(data):
+ vgs = []
+ for line in data.splitlines():
+ parts = line.strip().split(';')
+ vgs.append({
+ 'name': parts[0],
+ 'pv_count': int(parts[1]),
+ 'lv_count': int(parts[2]),
+ })
+ return vgs
+
+
+def find_mapper_device_name(module, dm_device):
+ dmsetup_cmd = module.get_bin_path('dmsetup', True)
+ mapper_prefix = '/dev/mapper/'
+ rc, dm_name, err = module.run_command("%s info -C --noheadings -o name %s" % (dmsetup_cmd, dm_device))
+ if rc != 0:
+ module.fail_json(msg="Failed executing dmsetup command.", rc=rc, err=err)
+ mapper_device = mapper_prefix + dm_name.rstrip()
+ return mapper_device
+
+
+def parse_pvs(module, data):
+ pvs = []
+ dm_prefix = '/dev/dm-'
+ for line in data.splitlines():
+ parts = line.strip().split(';')
+ if parts[0].startswith(dm_prefix):
+ parts[0] = find_mapper_device_name(module, parts[0])
+ pvs.append({
+ 'name': parts[0],
+ 'vg_name': parts[1],
+ })
+ return pvs
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ vg=dict(type='str', required=True),
+ pvs=dict(type='list'),
+ pesize=dict(type='str', default='4'),
+ pv_options=dict(type='str', default=''),
+ vg_options=dict(type='str', default=''),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ force=dict(type='bool', default=False),
+ ),
+ supports_check_mode=True,
+ )
+
+ vg = module.params['vg']
+ state = module.params['state']
+ force = module.boolean(module.params['force'])
+ pesize = module.params['pesize']
+ pvoptions = module.params['pv_options'].split()
+ vgoptions = module.params['vg_options'].split()
+
+ dev_list = []
+ if module.params['pvs']:
+ dev_list = list(module.params['pvs'])
+ elif state == 'present':
+ module.fail_json(msg="No physical volumes given.")
+
+ # LVM always uses real paths not symlinks so replace symlinks with actual path
+ for idx, dev in enumerate(dev_list):
+ dev_list[idx] = os.path.realpath(dev)
+
+ if state == 'present':
+ # check given devices
+ for test_dev in dev_list:
+ if not os.path.exists(test_dev):
+ module.fail_json(msg="Device %s not found." % test_dev)
+
+ # get pv list
+ pvs_cmd = module.get_bin_path('pvs', True)
+ if dev_list:
+ pvs_filter_pv_name = ' || '.join(
+ 'pv_name = {0}'.format(x)
+ for x in itertools.chain(dev_list, module.params['pvs'])
+ )
+ pvs_filter_vg_name = 'vg_name = {0}'.format(vg)
+ pvs_filter = "--select '{0} || {1}' ".format(pvs_filter_pv_name, pvs_filter_vg_name)
+ else:
+ pvs_filter = ''
+ rc, current_pvs, err = module.run_command("%s --noheadings -o pv_name,vg_name --separator ';' %s" % (pvs_cmd, pvs_filter))
+ if rc != 0:
+ module.fail_json(msg="Failed executing pvs command.", rc=rc, err=err)
+
+ # check pv for devices
+ pvs = parse_pvs(module, current_pvs)
+ used_pvs = [pv for pv in pvs if pv['name'] in dev_list and pv['vg_name'] and pv['vg_name'] != vg]
+ if used_pvs:
+ module.fail_json(msg="Device %s is already in %s volume group." % (used_pvs[0]['name'], used_pvs[0]['vg_name']))
+
+ vgs_cmd = module.get_bin_path('vgs', True)
+ rc, current_vgs, err = module.run_command("%s --noheadings -o vg_name,pv_count,lv_count --separator ';'" % vgs_cmd)
+
+ if rc != 0:
+ module.fail_json(msg="Failed executing vgs command.", rc=rc, err=err)
+
+ changed = False
+
+ vgs = parse_vgs(current_vgs)
+
+ for test_vg in vgs:
+ if test_vg['name'] == vg:
+ this_vg = test_vg
+ break
+ else:
+ this_vg = None
+
+ if this_vg is None:
+ if state == 'present':
+ # create VG
+ if module.check_mode:
+ changed = True
+ else:
+ # create PV
+ pvcreate_cmd = module.get_bin_path('pvcreate', True)
+ for current_dev in dev_list:
+ rc, _, err = module.run_command([pvcreate_cmd] + pvoptions + ['-f', str(current_dev)])
+ if rc == 0:
+ changed = True
+ else:
+ module.fail_json(msg="Creating physical volume '%s' failed" % current_dev, rc=rc, err=err)
+ vgcreate_cmd = module.get_bin_path('vgcreate')
+ rc, _, err = module.run_command([vgcreate_cmd] + vgoptions + ['-s', pesize, vg] + dev_list)
+ if rc == 0:
+ changed = True
+ else:
+ module.fail_json(msg="Creating volume group '%s' failed" % vg, rc=rc, err=err)
+ else:
+ if state == 'absent':
+ if module.check_mode:
+ module.exit_json(changed=True)
+ else:
+ if this_vg['lv_count'] == 0 or force:
+ # remove VG
+ vgremove_cmd = module.get_bin_path('vgremove', True)
+ rc, _, err = module.run_command("%s --force %s" % (vgremove_cmd, vg))
+ if rc == 0:
+ module.exit_json(changed=True)
+ else:
+ module.fail_json(msg="Failed to remove volume group %s" % (vg), rc=rc, err=err)
+ else:
+ module.fail_json(msg="Refuse to remove non-empty volume group %s without force=yes" % (vg))
+
+ # resize VG
+ current_devs = [os.path.realpath(pv['name']) for pv in pvs if pv['vg_name'] == vg]
+ devs_to_remove = list(set(current_devs) - set(dev_list))
+ devs_to_add = list(set(dev_list) - set(current_devs))
+
+ if devs_to_add or devs_to_remove:
+ if module.check_mode:
+ changed = True
+ else:
+ if devs_to_add:
+ devs_to_add_string = ' '.join(devs_to_add)
+ # create PV
+ pvcreate_cmd = module.get_bin_path('pvcreate', True)
+ for current_dev in devs_to_add:
+ rc, _, err = module.run_command([pvcreate_cmd] + pvoptions + ['-f', str(current_dev)])
+ if rc == 0:
+ changed = True
+ else:
+ module.fail_json(msg="Creating physical volume '%s' failed" % current_dev, rc=rc, err=err)
+ # add PV to our VG
+ vgextend_cmd = module.get_bin_path('vgextend', True)
+ rc, _, err = module.run_command("%s %s %s" % (vgextend_cmd, vg, devs_to_add_string))
+ if rc == 0:
+ changed = True
+ else:
+ module.fail_json(msg="Unable to extend %s by %s." % (vg, devs_to_add_string), rc=rc, err=err)
+
+ # remove some PV from our VG
+ if devs_to_remove:
+ devs_to_remove_string = ' '.join(devs_to_remove)
+ vgreduce_cmd = module.get_bin_path('vgreduce', True)
+ rc, _, err = module.run_command("%s --force %s %s" % (vgreduce_cmd, vg, devs_to_remove_string))
+ if rc == 0:
+ changed = True
+ else:
+ module.fail_json(msg="Unable to reduce %s by %s." % (vg, devs_to_remove_string), rc=rc, err=err)
+
+ module.exit_json(changed=changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/mongodb_parameter.py b/test/support/integration/plugins/modules/mongodb_parameter.py
new file mode 100644
index 0000000000..05de42b2ea
--- /dev/null
+++ b/test/support/integration/plugins/modules/mongodb_parameter.py
@@ -0,0 +1,223 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2016, Loic Blot <loic.blot@unix-experience.fr>
+# Sponsored by Infopro Digital. http://www.infopro-digital.com/
+# Sponsored by E.T.A.I. http://www.etai.fr/
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = r'''
+---
+module: mongodb_parameter
+short_description: Change an administrative parameter on a MongoDB server
+description:
+ - Change an administrative parameter on a MongoDB server.
+version_added: "2.1"
+options:
+ login_user:
+ description:
+ - The MongoDB username used to authenticate with.
+ type: str
+ login_password:
+ description:
+ - The login user's password used to authenticate with.
+ type: str
+ login_host:
+ description:
+ - The host running the database.
+ type: str
+ default: localhost
+ login_port:
+ description:
+ - The MongoDB port to connect to.
+ default: 27017
+ type: int
+ login_database:
+ description:
+ - The database where login credentials are stored.
+ type: str
+ replica_set:
+ description:
+ - Replica set to connect to (automatically connects to primary for writes).
+ type: str
+ ssl:
+ description:
+ - Whether to use an SSL connection when connecting to the database.
+ type: bool
+ default: no
+ param:
+ description:
+ - MongoDB administrative parameter to modify.
+ type: str
+ required: true
+ value:
+ description:
+ - MongoDB administrative parameter value to set.
+ type: str
+ required: true
+ param_type:
+ description:
+ - Define the type of parameter value.
+ default: str
+ type: str
+ choices: [int, str]
+
+notes:
+ - Requires the pymongo Python package on the remote host, version 2.4.2+.
+ - This can be installed using pip or the OS package manager.
+ - See also U(http://api.mongodb.org/python/current/installation.html)
+requirements: [ "pymongo" ]
+author: "Loic Blot (@nerzhul)"
+'''
+
+EXAMPLES = r'''
+- name: Set MongoDB syncdelay to 60 (this is an int)
+ mongodb_parameter:
+ param: syncdelay
+ value: 60
+ param_type: int
+'''
+
+RETURN = r'''
+before:
+ description: value before modification
+ returned: success
+ type: str
+after:
+ description: value after modification
+ returned: success
+ type: str
+'''
+
+import os
+import traceback
+
+try:
+ from pymongo.errors import ConnectionFailure
+ from pymongo.errors import OperationFailure
+ from pymongo import version as PyMongoVersion
+ from pymongo import MongoClient
+except ImportError:
+ try: # for older PyMongo 2.2
+ from pymongo import Connection as MongoClient
+ except ImportError:
+ pymongo_found = False
+ else:
+ pymongo_found = True
+else:
+ pymongo_found = True
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.six.moves import configparser
+from ansible.module_utils._text import to_native
+
+
+# =========================================
+# MongoDB module specific support methods.
+#
+
+def load_mongocnf():
+ config = configparser.RawConfigParser()
+ mongocnf = os.path.expanduser('~/.mongodb.cnf')
+
+ try:
+ config.readfp(open(mongocnf))
+ creds = dict(
+ user=config.get('client', 'user'),
+ password=config.get('client', 'pass')
+ )
+ except (configparser.NoOptionError, IOError):
+ return False
+
+ return creds
+
+
+# =========================================
+# Module execution.
+#
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ login_user=dict(default=None),
+ login_password=dict(default=None, no_log=True),
+ login_host=dict(default='localhost'),
+ login_port=dict(default=27017, type='int'),
+ login_database=dict(default=None),
+ replica_set=dict(default=None),
+ param=dict(required=True),
+ value=dict(required=True),
+ param_type=dict(default="str", choices=['str', 'int']),
+ ssl=dict(default=False, type='bool'),
+ )
+ )
+
+ if not pymongo_found:
+ module.fail_json(msg=missing_required_lib('pymongo'))
+
+ login_user = module.params['login_user']
+ login_password = module.params['login_password']
+ login_host = module.params['login_host']
+ login_port = module.params['login_port']
+ login_database = module.params['login_database']
+
+ replica_set = module.params['replica_set']
+ ssl = module.params['ssl']
+
+ param = module.params['param']
+ param_type = module.params['param_type']
+ value = module.params['value']
+
+ # Verify parameter is coherent with specified type
+ try:
+ if param_type == 'int':
+ value = int(value)
+ except ValueError:
+ module.fail_json(msg="value '%s' is not %s" % (value, param_type))
+
+ try:
+ if replica_set:
+ client = MongoClient(login_host, int(login_port), replicaset=replica_set, ssl=ssl)
+ else:
+ client = MongoClient(login_host, int(login_port), ssl=ssl)
+
+ if login_user is None and login_password is None:
+ mongocnf_creds = load_mongocnf()
+ if mongocnf_creds is not False:
+ login_user = mongocnf_creds['user']
+ login_password = mongocnf_creds['password']
+ elif login_password is None or login_user is None:
+ module.fail_json(msg='when supplying login arguments, both login_user and login_password must be provided')
+
+ if login_user is not None and login_password is not None:
+ client.admin.authenticate(login_user, login_password, source=login_database)
+
+ except ConnectionFailure as e:
+ module.fail_json(msg='unable to connect to database: %s' % to_native(e), exception=traceback.format_exc())
+
+ db = client.admin
+
+ try:
+ after_value = db.command("setParameter", **{param: value})
+ except OperationFailure as e:
+ module.fail_json(msg="unable to change parameter: %s" % to_native(e), exception=traceback.format_exc())
+
+ if "was" not in after_value:
+ module.exit_json(changed=True, msg="Unable to determine old value, assume it changed.")
+ else:
+ module.exit_json(changed=(value != after_value["was"]), before=after_value["was"],
+ after=value)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/mongodb_user.py b/test/support/integration/plugins/modules/mongodb_user.py
new file mode 100644
index 0000000000..362b3aa45e
--- /dev/null
+++ b/test/support/integration/plugins/modules/mongodb_user.py
@@ -0,0 +1,474 @@
+#!/usr/bin/python
+
+# (c) 2012, Elliott Foster <elliott@fourkitchens.com>
+# Sponsored by Four Kitchens http://fourkitchens.com.
+# (c) 2014, Epic Games, Inc.
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+---
+module: mongodb_user
+short_description: Adds or removes a user from a MongoDB database
+description:
+ - Adds or removes a user from a MongoDB database.
+version_added: "1.1"
+options:
+ login_user:
+ description:
+ - The MongoDB username used to authenticate with.
+ type: str
+ login_password:
+ description:
+ - The login user's password used to authenticate with.
+ type: str
+ login_host:
+ description:
+ - The host running the database.
+ default: localhost
+ type: str
+ login_port:
+ description:
+ - The MongoDB port to connect to.
+ default: '27017'
+ type: str
+ login_database:
+ version_added: "2.0"
+ description:
+ - The database where login credentials are stored.
+ type: str
+ replica_set:
+ version_added: "1.6"
+ description:
+ - Replica set to connect to (automatically connects to primary for writes).
+ type: str
+ database:
+ description:
+ - The name of the database to add/remove the user from.
+ required: true
+ type: str
+ aliases: [db]
+ name:
+ description:
+ - The name of the user to add or remove.
+ required: true
+ aliases: [user]
+ type: str
+ password:
+ description:
+ - The password to use for the user.
+ type: str
+ aliases: [pass]
+ ssl:
+ version_added: "1.8"
+ description:
+ - Whether to use an SSL connection when connecting to the database.
+ type: bool
+ ssl_cert_reqs:
+ version_added: "2.2"
+ description:
+ - Specifies whether a certificate is required from the other side of the connection,
+ and whether it will be validated if provided.
+ default: CERT_REQUIRED
+ choices: [CERT_NONE, CERT_OPTIONAL, CERT_REQUIRED]
+ type: str
+ roles:
+ version_added: "1.3"
+ type: list
+ elements: raw
+ description:
+ - >
+ The database user roles valid values could either be one or more of the following strings:
+ 'read', 'readWrite', 'dbAdmin', 'userAdmin', 'clusterAdmin', 'readAnyDatabase', 'readWriteAnyDatabase', 'userAdminAnyDatabase',
+ 'dbAdminAnyDatabase'
+ - "Or the following dictionary '{ db: DATABASE_NAME, role: ROLE_NAME }'."
+ - "This param requires pymongo 2.5+. If it is a string, mongodb 2.4+ is also required. If it is a dictionary, mongo 2.6+ is required."
+ state:
+ description:
+ - The database user state.
+ default: present
+ choices: [absent, present]
+ type: str
+ update_password:
+ default: always
+ choices: [always, on_create]
+ version_added: "2.1"
+ description:
+ - C(always) will update passwords if they differ.
+ - C(on_create) will only set the password for newly created users.
+ type: str
+
+notes:
+ - Requires the pymongo Python package on the remote host, version 2.4.2+. This
+ can be installed using pip or the OS package manager. @see http://api.mongodb.org/python/current/installation.html
+requirements: [ "pymongo" ]
+author:
+ - "Elliott Foster (@elliotttf)"
+ - "Julien Thebault (@Lujeni)"
+'''
+
+EXAMPLES = '''
+- name: Create 'burgers' database user with name 'bob' and password '12345'.
+ mongodb_user:
+ database: burgers
+ name: bob
+ password: 12345
+ state: present
+
+- name: Create a database user via SSL (MongoDB must be compiled with the SSL option and configured properly)
+ mongodb_user:
+ database: burgers
+ name: bob
+ password: 12345
+ state: present
+ ssl: True
+
+- name: Delete 'burgers' database user with name 'bob'.
+ mongodb_user:
+ database: burgers
+ name: bob
+ state: absent
+
+- name: Define more users with various specific roles (if not defined, no roles is assigned, and the user will be added via pre mongo 2.2 style)
+ mongodb_user:
+ database: burgers
+ name: ben
+ password: 12345
+ roles: read
+ state: present
+
+- name: Define roles
+ mongodb_user:
+ database: burgers
+ name: jim
+ password: 12345
+ roles: readWrite,dbAdmin,userAdmin
+ state: present
+
+- name: Define roles
+ mongodb_user:
+ database: burgers
+ name: joe
+ password: 12345
+ roles: readWriteAnyDatabase
+ state: present
+
+- name: Add a user to database in a replica set, the primary server is automatically discovered and written to
+ mongodb_user:
+ database: burgers
+ name: bob
+ replica_set: belcher
+ password: 12345
+ roles: readWriteAnyDatabase
+ state: present
+
+# add a user 'oplog_reader' with read only access to the 'local' database on the replica_set 'belcher'. This is useful for oplog access (MONGO_OPLOG_URL).
+# please notice the credentials must be added to the 'admin' database because the 'local' database is not synchronized and can't receive user credentials
+# To login with such user, the connection string should be MONGO_OPLOG_URL="mongodb://oplog_reader:oplog_reader_password@server1,server2/local?authSource=admin"
+# This syntax requires mongodb 2.6+ and pymongo 2.5+
+- name: Roles as a dictionary
+ mongodb_user:
+ login_user: root
+ login_password: root_password
+ database: admin
+ user: oplog_reader
+ password: oplog_reader_password
+ state: present
+ replica_set: belcher
+ roles:
+ - db: local
+ role: read
+
+'''
+
+RETURN = '''
+user:
+ description: The name of the user to add or remove.
+ returned: success
+ type: str
+'''
+
+import os
+import ssl as ssl_lib
+import traceback
+from distutils.version import LooseVersion
+from operator import itemgetter
+
+try:
+ from pymongo.errors import ConnectionFailure
+ from pymongo.errors import OperationFailure
+ from pymongo import version as PyMongoVersion
+ from pymongo import MongoClient
+except ImportError:
+ try: # for older PyMongo 2.2
+ from pymongo import Connection as MongoClient
+ except ImportError:
+ pymongo_found = False
+ else:
+ pymongo_found = True
+else:
+ pymongo_found = True
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.six import binary_type, text_type
+from ansible.module_utils.six.moves import configparser
+from ansible.module_utils._text import to_native
+
+
+# =========================================
+# MongoDB module specific support methods.
+#
+
+def check_compatibility(module, client):
+ """Check the compatibility between the driver and the database.
+
+ See: https://docs.mongodb.com/ecosystem/drivers/driver-compatibility-reference/#python-driver-compatibility
+
+ Args:
+ module: Ansible module.
+ client (cursor): Mongodb cursor on admin database.
+ """
+ loose_srv_version = LooseVersion(client.server_info()['version'])
+ loose_driver_version = LooseVersion(PyMongoVersion)
+
+ if loose_srv_version >= LooseVersion('3.2') and loose_driver_version < LooseVersion('3.2'):
+ module.fail_json(msg=' (Note: you must use pymongo 3.2+ with MongoDB >= 3.2)')
+
+ elif loose_srv_version >= LooseVersion('3.0') and loose_driver_version <= LooseVersion('2.8'):
+ module.fail_json(msg=' (Note: you must use pymongo 2.8+ with MongoDB 3.0)')
+
+ elif loose_srv_version >= LooseVersion('2.6') and loose_driver_version <= LooseVersion('2.7'):
+ module.fail_json(msg=' (Note: you must use pymongo 2.7+ with MongoDB 2.6)')
+
+ elif LooseVersion(PyMongoVersion) <= LooseVersion('2.5'):
+ module.fail_json(msg=' (Note: you must be on mongodb 2.4+ and pymongo 2.5+ to use the roles param)')
+
+
+def user_find(client, user, db_name):
+ """Check if the user exists.
+
+ Args:
+ client (cursor): Mongodb cursor on admin database.
+ user (str): User to check.
+ db_name (str): User's database.
+
+ Returns:
+ dict: when user exists, False otherwise.
+ """
+ for mongo_user in client["admin"].system.users.find():
+ if mongo_user['user'] == user:
+ # NOTE: there is no 'db' field in mongo 2.4.
+ if 'db' not in mongo_user:
+ return mongo_user
+
+ if mongo_user["db"] == db_name:
+ return mongo_user
+ return False
+
+
+def user_add(module, client, db_name, user, password, roles):
+ # pymongo's user_add is a _create_or_update_user so we won't know if it was changed or updated
+ # without reproducing a lot of the logic in database.py of pymongo
+ db = client[db_name]
+
+ if roles is None:
+ db.add_user(user, password, False)
+ else:
+ db.add_user(user, password, None, roles=roles)
+
+
+def user_remove(module, client, db_name, user):
+ exists = user_find(client, user, db_name)
+ if exists:
+ if module.check_mode:
+ module.exit_json(changed=True, user=user)
+ db = client[db_name]
+ db.remove_user(user)
+ else:
+ module.exit_json(changed=False, user=user)
+
+
+def load_mongocnf():
+ config = configparser.RawConfigParser()
+ mongocnf = os.path.expanduser('~/.mongodb.cnf')
+
+ try:
+ config.readfp(open(mongocnf))
+ creds = dict(
+ user=config.get('client', 'user'),
+ password=config.get('client', 'pass')
+ )
+ except (configparser.NoOptionError, IOError):
+ return False
+
+ return creds
+
+
+def check_if_roles_changed(uinfo, roles, db_name):
+ # We must be aware of users which can read the oplog on a replicaset
+ # Such users must have access to the local DB, but since this DB does not store users credentials
+ # and is not synchronized among replica sets, the user must be stored on the admin db
+ # Therefore their structure is the following :
+ # {
+ # "_id" : "admin.oplog_reader",
+ # "user" : "oplog_reader",
+ # "db" : "admin", # <-- admin DB
+ # "roles" : [
+ # {
+ # "role" : "read",
+ # "db" : "local" # <-- local DB
+ # }
+ # ]
+ # }
+
+ def make_sure_roles_are_a_list_of_dict(roles, db_name):
+ output = list()
+ for role in roles:
+ if isinstance(role, (binary_type, text_type)):
+ new_role = {"role": role, "db": db_name}
+ output.append(new_role)
+ else:
+ output.append(role)
+ return output
+
+ roles_as_list_of_dict = make_sure_roles_are_a_list_of_dict(roles, db_name)
+ uinfo_roles = uinfo.get('roles', [])
+
+ if sorted(roles_as_list_of_dict, key=itemgetter('db')) == sorted(uinfo_roles, key=itemgetter('db')):
+ return False
+ return True
+
+
+# =========================================
+# Module execution.
+#
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ login_user=dict(default=None),
+ login_password=dict(default=None, no_log=True),
+ login_host=dict(default='localhost'),
+ login_port=dict(default='27017'),
+ login_database=dict(default=None),
+ replica_set=dict(default=None),
+ database=dict(required=True, aliases=['db']),
+ name=dict(required=True, aliases=['user']),
+ password=dict(aliases=['pass'], no_log=True),
+ ssl=dict(default=False, type='bool'),
+ roles=dict(default=None, type='list', elements='raw'),
+ state=dict(default='present', choices=['absent', 'present']),
+ update_password=dict(default="always", choices=["always", "on_create"]),
+ ssl_cert_reqs=dict(default='CERT_REQUIRED', choices=['CERT_NONE', 'CERT_OPTIONAL', 'CERT_REQUIRED']),
+ ),
+ supports_check_mode=True
+ )
+
+ if not pymongo_found:
+ module.fail_json(msg=missing_required_lib('pymongo'))
+
+ login_user = module.params['login_user']
+ login_password = module.params['login_password']
+ login_host = module.params['login_host']
+ login_port = module.params['login_port']
+ login_database = module.params['login_database']
+
+ replica_set = module.params['replica_set']
+ db_name = module.params['database']
+ user = module.params['name']
+ password = module.params['password']
+ ssl = module.params['ssl']
+ roles = module.params['roles'] or []
+ state = module.params['state']
+ update_password = module.params['update_password']
+
+ try:
+ connection_params = {
+ "host": login_host,
+ "port": int(login_port),
+ }
+
+ if replica_set:
+ connection_params["replicaset"] = replica_set
+
+ if ssl:
+ connection_params["ssl"] = ssl
+ connection_params["ssl_cert_reqs"] = getattr(ssl_lib, module.params['ssl_cert_reqs'])
+
+ client = MongoClient(**connection_params)
+
+ # NOTE: this check must be done ASAP.
+ # We doesn't need to be authenticated (this ability has lost in PyMongo 3.6)
+ if LooseVersion(PyMongoVersion) <= LooseVersion('3.5'):
+ check_compatibility(module, client)
+
+ if login_user is None and login_password is None:
+ mongocnf_creds = load_mongocnf()
+ if mongocnf_creds is not False:
+ login_user = mongocnf_creds['user']
+ login_password = mongocnf_creds['password']
+ elif login_password is None or login_user is None:
+ module.fail_json(msg='when supplying login arguments, both login_user and login_password must be provided')
+
+ if login_user is not None and login_password is not None:
+ client.admin.authenticate(login_user, login_password, source=login_database)
+ elif LooseVersion(PyMongoVersion) >= LooseVersion('3.0'):
+ if db_name != "admin":
+ module.fail_json(msg='The localhost login exception only allows the first admin account to be created')
+ # else: this has to be the first admin user added
+
+ except Exception as e:
+ module.fail_json(msg='unable to connect to database: %s' % to_native(e), exception=traceback.format_exc())
+
+ if state == 'present':
+ if password is None and update_password == 'always':
+ module.fail_json(msg='password parameter required when adding a user unless update_password is set to on_create')
+
+ try:
+ if update_password != 'always':
+ uinfo = user_find(client, user, db_name)
+ if uinfo:
+ password = None
+ if not check_if_roles_changed(uinfo, roles, db_name):
+ module.exit_json(changed=False, user=user)
+
+ if module.check_mode:
+ module.exit_json(changed=True, user=user)
+
+ user_add(module, client, db_name, user, password, roles)
+ except Exception as e:
+ module.fail_json(msg='Unable to add or update user: %s' % to_native(e), exception=traceback.format_exc())
+ finally:
+ try:
+ client.close()
+ except Exception:
+ pass
+ # Here we can check password change if mongo provide a query for that : https://jira.mongodb.org/browse/SERVER-22848
+ # newuinfo = user_find(client, user, db_name)
+ # if uinfo['role'] == newuinfo['role'] and CheckPasswordHere:
+ # module.exit_json(changed=False, user=user)
+
+ elif state == 'absent':
+ try:
+ user_remove(module, client, db_name, user)
+ except Exception as e:
+ module.fail_json(msg='Unable to remove user: %s' % to_native(e), exception=traceback.format_exc())
+ finally:
+ try:
+ client.close()
+ except Exception:
+ pass
+ module.exit_json(changed=True, user=user)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/mysql_db.py b/test/support/integration/plugins/modules/mysql_db.py
new file mode 100644
index 0000000000..58bcca7832
--- /dev/null
+++ b/test/support/integration/plugins/modules/mysql_db.py
@@ -0,0 +1,617 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2012, Mark Theunissen <mark.theunissen@gmail.com>
+# Sponsored by Four Kitchens http://fourkitchens.com.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+module: mysql_db
+short_description: Add or remove MySQL databases from a remote host
+description:
+- Add or remove MySQL databases from a remote host.
+version_added: '0.6'
+options:
+ name:
+ description:
+ - Name of the database to add or remove.
+ - I(name=all) may only be provided if I(state) is C(dump) or C(import).
+ - List of databases is provided with I(state=dump), I(state=present) and I(state=absent).
+ - If I(name=all) it works like --all-databases option for mysqldump (Added in 2.0).
+ required: true
+ type: list
+ elements: str
+ aliases: [db]
+ state:
+ description:
+ - The database state
+ type: str
+ default: present
+ choices: ['absent', 'dump', 'import', 'present']
+ collation:
+ description:
+ - Collation mode (sorting). This only applies to new table/databases and
+ does not update existing ones, this is a limitation of MySQL.
+ type: str
+ default: ''
+ encoding:
+ description:
+ - Encoding mode to use, examples include C(utf8) or C(latin1_swedish_ci),
+ at creation of database, dump or importation of sql script.
+ type: str
+ default: ''
+ target:
+ description:
+ - Location, on the remote host, of the dump file to read from or write to.
+ - Uncompressed SQL files (C(.sql)) as well as bzip2 (C(.bz2)), gzip (C(.gz)) and
+ xz (Added in 2.0) compressed files are supported.
+ type: path
+ single_transaction:
+ description:
+ - Execute the dump in a single transaction.
+ type: bool
+ default: no
+ version_added: '2.1'
+ quick:
+ description:
+ - Option used for dumping large tables.
+ type: bool
+ default: yes
+ version_added: '2.1'
+ ignore_tables:
+ description:
+ - A list of table names that will be ignored in the dump
+ of the form database_name.table_name.
+ type: list
+ elements: str
+ required: no
+ default: []
+ version_added: '2.7'
+ hex_blob:
+ description:
+ - Dump binary columns using hexadecimal notation.
+ required: no
+ default: no
+ type: bool
+ version_added: '2.10'
+ force:
+ description:
+ - Continue dump or import even if we get an SQL error.
+ - Used only when I(state) is C(dump) or C(import).
+ required: no
+ type: bool
+ default: no
+ version_added: '2.10'
+ master_data:
+ description:
+ - Option to dump a master replication server to produce a dump file
+ that can be used to set up another server as a slave of the master.
+ - C(0) to not include master data.
+ - C(1) to generate a 'CHANGE MASTER TO' statement
+ required on the slave to start the replication process.
+ - C(2) to generate a commented 'CHANGE MASTER TO'.
+ - Can be used when I(state=dump).
+ required: no
+ type: int
+ choices: [0, 1, 2]
+ default: 0
+ version_added: '2.10'
+ skip_lock_tables:
+ description:
+ - Skip locking tables for read. Used when I(state=dump), ignored otherwise.
+ required: no
+ type: bool
+ default: no
+ version_added: '2.10'
+ dump_extra_args:
+ description:
+ - Provide additional arguments for mysqldump.
+ Used when I(state=dump) only, ignored otherwise.
+ required: no
+ type: str
+ version_added: '2.10'
+seealso:
+- module: mysql_info
+- module: mysql_variables
+- module: mysql_user
+- module: mysql_replication
+- name: MySQL command-line client reference
+ description: Complete reference of the MySQL command-line client documentation.
+ link: https://dev.mysql.com/doc/refman/8.0/en/mysql.html
+- name: mysqldump reference
+ description: Complete reference of the ``mysqldump`` client utility documentation.
+ link: https://dev.mysql.com/doc/refman/8.0/en/mysqldump.html
+- name: CREATE DATABASE reference
+ description: Complete reference of the CREATE DATABASE command documentation.
+ link: https://dev.mysql.com/doc/refman/8.0/en/create-database.html
+- name: DROP DATABASE reference
+ description: Complete reference of the DROP DATABASE command documentation.
+ link: https://dev.mysql.com/doc/refman/8.0/en/drop-database.html
+author: "Ansible Core Team"
+requirements:
+ - mysql (command line binary)
+ - mysqldump (command line binary)
+notes:
+ - Requires the mysql and mysqldump binaries on the remote host.
+ - This module is B(not idempotent) when I(state) is C(import),
+ and will import the dump file each time if run more than once.
+extends_documentation_fragment: mysql
+'''
+
+EXAMPLES = r'''
+- name: Create a new database with name 'bobdata'
+ mysql_db:
+ name: bobdata
+ state: present
+
+- name: Create new databases with names 'foo' and 'bar'
+ mysql_db:
+ name:
+ - foo
+ - bar
+ state: present
+
+# Copy database dump file to remote host and restore it to database 'my_db'
+- name: Copy database dump file
+ copy:
+ src: dump.sql.bz2
+ dest: /tmp
+
+- name: Restore database
+ mysql_db:
+ name: my_db
+ state: import
+ target: /tmp/dump.sql.bz2
+
+- name: Restore database ignoring errors
+ mysql_db:
+ name: my_db
+ state: import
+ target: /tmp/dump.sql.bz2
+ force: yes
+
+- name: Dump multiple databases
+ mysql_db:
+ state: dump
+ name: db_1,db_2
+ target: /tmp/dump.sql
+
+- name: Dump multiple databases
+ mysql_db:
+ state: dump
+ name:
+ - db_1
+ - db_2
+ target: /tmp/dump.sql
+
+- name: Dump all databases to hostname.sql
+ mysql_db:
+ state: dump
+ name: all
+ target: /tmp/dump.sql
+
+- name: Dump all databases to hostname.sql including master data
+ mysql_db:
+ state: dump
+ name: all
+ target: /tmp/dump.sql
+ master_data: 1
+
+# Import of sql script with encoding option
+- name: >
+ Import dump.sql with specific latin1 encoding,
+ similar to mysql -u <username> --default-character-set=latin1 -p <password> < dump.sql
+ mysql_db:
+ state: import
+ name: all
+ encoding: latin1
+ target: /tmp/dump.sql
+
+# Dump of database with encoding option
+- name: >
+ Dump of Databse with specific latin1 encoding,
+ similar to mysqldump -u <username> --default-character-set=latin1 -p <password> <database>
+ mysql_db:
+ state: dump
+ name: db_1
+ encoding: latin1
+ target: /tmp/dump.sql
+
+- name: Delete database with name 'bobdata'
+ mysql_db:
+ name: bobdata
+ state: absent
+
+- name: Make sure there is neither a database with name 'foo', nor one with name 'bar'
+ mysql_db:
+ name:
+ - foo
+ - bar
+ state: absent
+
+# Dump database with argument not directly supported by this module
+# using dump_extra_args parameter
+- name: Dump databases without including triggers
+ mysql_db:
+ state: dump
+ name: foo
+ target: /tmp/dump.sql
+ dump_extra_args: --skip-triggers
+'''
+
+RETURN = r'''
+db:
+ description: Database names in string format delimited by white space.
+ returned: always
+ type: str
+ sample: "foo bar"
+db_list:
+ description: List of database names.
+ returned: always
+ type: list
+ sample: ["foo", "bar"]
+ version_added: '2.9'
+executed_commands:
+ description: List of commands which tried to run.
+ returned: if executed
+ type: list
+ sample: ["CREATE DATABASE acme"]
+ version_added: '2.10'
+'''
+
+import os
+import subprocess
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.database import mysql_quote_identifier
+from ansible.module_utils.mysql import mysql_connect, mysql_driver, mysql_driver_fail_msg
+from ansible.module_utils.six.moves import shlex_quote
+from ansible.module_utils._text import to_native
+
+executed_commands = []
+
+# ===========================================
+# MySQL module specific support methods.
+#
+
+
+def db_exists(cursor, db):
+ res = 0
+ for each_db in db:
+ res += cursor.execute("SHOW DATABASES LIKE %s", (each_db.replace("_", r"\_"),))
+ return res == len(db)
+
+
+def db_delete(cursor, db):
+ if not db:
+ return False
+ for each_db in db:
+ query = "DROP DATABASE %s" % mysql_quote_identifier(each_db, 'database')
+ executed_commands.append(query)
+ cursor.execute(query)
+ return True
+
+
+def db_dump(module, host, user, password, db_name, target, all_databases, port,
+ config_file, socket=None, ssl_cert=None, ssl_key=None, ssl_ca=None,
+ single_transaction=None, quick=None, ignore_tables=None, hex_blob=None,
+ encoding=None, force=False, master_data=0, skip_lock_tables=False, dump_extra_args=None):
+ cmd = module.get_bin_path('mysqldump', True)
+ # If defined, mysqldump demands --defaults-extra-file be the first option
+ if config_file:
+ cmd += " --defaults-extra-file=%s" % shlex_quote(config_file)
+ if user is not None:
+ cmd += " --user=%s" % shlex_quote(user)
+ if password is not None:
+ cmd += " --password=%s" % shlex_quote(password)
+ if ssl_cert is not None:
+ cmd += " --ssl-cert=%s" % shlex_quote(ssl_cert)
+ if ssl_key is not None:
+ cmd += " --ssl-key=%s" % shlex_quote(ssl_key)
+ if ssl_ca is not None:
+ cmd += " --ssl-ca=%s" % shlex_quote(ssl_ca)
+ if force:
+ cmd += " --force"
+ if socket is not None:
+ cmd += " --socket=%s" % shlex_quote(socket)
+ else:
+ cmd += " --host=%s --port=%i" % (shlex_quote(host), port)
+
+ if all_databases:
+ cmd += " --all-databases"
+ elif len(db_name) > 1:
+ cmd += " --databases {0}".format(' '.join(db_name))
+ else:
+ cmd += " %s" % shlex_quote(' '.join(db_name))
+
+ if skip_lock_tables:
+ cmd += " --skip-lock-tables"
+ if (encoding is not None) and (encoding != ""):
+ cmd += " --default-character-set=%s" % shlex_quote(encoding)
+ if single_transaction:
+ cmd += " --single-transaction=true"
+ if quick:
+ cmd += " --quick"
+ if ignore_tables:
+ for an_ignored_table in ignore_tables:
+ cmd += " --ignore-table={0}".format(an_ignored_table)
+ if hex_blob:
+ cmd += " --hex-blob"
+ if master_data:
+ cmd += " --master-data=%s" % master_data
+ if dump_extra_args is not None:
+ cmd += " " + dump_extra_args
+
+ path = None
+ if os.path.splitext(target)[-1] == '.gz':
+ path = module.get_bin_path('gzip', True)
+ elif os.path.splitext(target)[-1] == '.bz2':
+ path = module.get_bin_path('bzip2', True)
+ elif os.path.splitext(target)[-1] == '.xz':
+ path = module.get_bin_path('xz', True)
+
+ if path:
+ cmd = '%s | %s > %s' % (cmd, path, shlex_quote(target))
+ else:
+ cmd += " > %s" % shlex_quote(target)
+
+ executed_commands.append(cmd)
+ rc, stdout, stderr = module.run_command(cmd, use_unsafe_shell=True)
+ return rc, stdout, stderr
+
+
+def db_import(module, host, user, password, db_name, target, all_databases, port, config_file,
+ socket=None, ssl_cert=None, ssl_key=None, ssl_ca=None, encoding=None, force=False):
+ if not os.path.exists(target):
+ return module.fail_json(msg="target %s does not exist on the host" % target)
+
+ cmd = [module.get_bin_path('mysql', True)]
+ # --defaults-file must go first, or errors out
+ if config_file:
+ cmd.append("--defaults-extra-file=%s" % shlex_quote(config_file))
+ if user:
+ cmd.append("--user=%s" % shlex_quote(user))
+ if password:
+ cmd.append("--password=%s" % shlex_quote(password))
+ if ssl_cert is not None:
+ cmd.append("--ssl-cert=%s" % shlex_quote(ssl_cert))
+ if ssl_key is not None:
+ cmd.append("--ssl-key=%s" % shlex_quote(ssl_key))
+ if ssl_ca is not None:
+ cmd.append("--ssl-ca=%s" % shlex_quote(ssl_ca))
+ if force:
+ cmd.append("-f")
+ if socket is not None:
+ cmd.append("--socket=%s" % shlex_quote(socket))
+ else:
+ cmd.append("--host=%s" % shlex_quote(host))
+ cmd.append("--port=%i" % port)
+ if (encoding is not None) and (encoding != ""):
+ cmd.append("--default-character-set=%s" % shlex_quote(encoding))
+ if not all_databases:
+ cmd.append("--one-database")
+ cmd.append(shlex_quote(''.join(db_name)))
+
+ comp_prog_path = None
+ if os.path.splitext(target)[-1] == '.gz':
+ comp_prog_path = module.get_bin_path('gzip', required=True)
+ elif os.path.splitext(target)[-1] == '.bz2':
+ comp_prog_path = module.get_bin_path('bzip2', required=True)
+ elif os.path.splitext(target)[-1] == '.xz':
+ comp_prog_path = module.get_bin_path('xz', required=True)
+ if comp_prog_path:
+ # The line above is for returned data only:
+ executed_commands.append('%s -dc %s | %s' % (comp_prog_path, target, ' '.join(cmd)))
+ p1 = subprocess.Popen([comp_prog_path, '-dc', target], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ p2 = subprocess.Popen(cmd, stdin=p1.stdout, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ (stdout2, stderr2) = p2.communicate()
+ p1.stdout.close()
+ p1.wait()
+ if p1.returncode != 0:
+ stderr1 = p1.stderr.read()
+ return p1.returncode, '', stderr1
+ else:
+ return p2.returncode, stdout2, stderr2
+ else:
+ cmd = ' '.join(cmd)
+ cmd += " < %s" % shlex_quote(target)
+ executed_commands.append(cmd)
+ rc, stdout, stderr = module.run_command(cmd, use_unsafe_shell=True)
+ return rc, stdout, stderr
+
+
+def db_create(cursor, db, encoding, collation):
+ if not db:
+ return False
+ query_params = dict(enc=encoding, collate=collation)
+ res = 0
+ for each_db in db:
+ query = ['CREATE DATABASE %s' % mysql_quote_identifier(each_db, 'database')]
+ if encoding:
+ query.append("CHARACTER SET %(enc)s")
+ if collation:
+ query.append("COLLATE %(collate)s")
+ query = ' '.join(query)
+ res += cursor.execute(query, query_params)
+ try:
+ executed_commands.append(cursor.mogrify(query, query_params))
+ except AttributeError:
+ executed_commands.append(cursor._executed)
+ except Exception:
+ executed_commands.append(query)
+ return res > 0
+
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ login_user=dict(type='str'),
+ login_password=dict(type='str', no_log=True),
+ login_host=dict(type='str', default='localhost'),
+ login_port=dict(type='int', default=3306),
+ login_unix_socket=dict(type='str'),
+ name=dict(type='list', required=True, aliases=['db']),
+ encoding=dict(type='str', default=''),
+ collation=dict(type='str', default=''),
+ target=dict(type='path'),
+ state=dict(type='str', default='present', choices=['absent', 'dump', 'import', 'present']),
+ client_cert=dict(type='path', aliases=['ssl_cert']),
+ client_key=dict(type='path', aliases=['ssl_key']),
+ ca_cert=dict(type='path', aliases=['ssl_ca']),
+ connect_timeout=dict(type='int', default=30),
+ config_file=dict(type='path', default='~/.my.cnf'),
+ single_transaction=dict(type='bool', default=False),
+ quick=dict(type='bool', default=True),
+ ignore_tables=dict(type='list', default=[]),
+ hex_blob=dict(default=False, type='bool'),
+ force=dict(type='bool', default=False),
+ master_data=dict(type='int', default=0, choices=[0, 1, 2]),
+ skip_lock_tables=dict(type='bool', default=False),
+ dump_extra_args=dict(type='str'),
+ ),
+ supports_check_mode=True,
+ )
+
+ if mysql_driver is None:
+ module.fail_json(msg=mysql_driver_fail_msg)
+
+ db = module.params["name"]
+ if not db:
+ module.exit_json(changed=False, db=db, db_list=[])
+ db = [each_db.strip() for each_db in db]
+
+ encoding = module.params["encoding"]
+ collation = module.params["collation"]
+ state = module.params["state"]
+ target = module.params["target"]
+ socket = module.params["login_unix_socket"]
+ login_port = module.params["login_port"]
+ if login_port < 0 or login_port > 65535:
+ module.fail_json(msg="login_port must be a valid unix port number (0-65535)")
+ ssl_cert = module.params["client_cert"]
+ ssl_key = module.params["client_key"]
+ ssl_ca = module.params["ca_cert"]
+ connect_timeout = module.params['connect_timeout']
+ config_file = module.params['config_file']
+ login_password = module.params["login_password"]
+ login_user = module.params["login_user"]
+ login_host = module.params["login_host"]
+ ignore_tables = module.params["ignore_tables"]
+ for a_table in ignore_tables:
+ if a_table == "":
+ module.fail_json(msg="Name of ignored table cannot be empty")
+ single_transaction = module.params["single_transaction"]
+ quick = module.params["quick"]
+ hex_blob = module.params["hex_blob"]
+ force = module.params["force"]
+ master_data = module.params["master_data"]
+ skip_lock_tables = module.params["skip_lock_tables"]
+ dump_extra_args = module.params["dump_extra_args"]
+
+ if len(db) > 1 and state == 'import':
+ module.fail_json(msg="Multiple databases are not supported with state=import")
+ db_name = ' '.join(db)
+
+ all_databases = False
+ if state in ['dump', 'import']:
+ if target is None:
+ module.fail_json(msg="with state=%s target is required" % state)
+ if db == ['all']:
+ all_databases = True
+ else:
+ if db == ['all']:
+ module.fail_json(msg="name is not allowed to equal 'all' unless state equals import, or dump.")
+ try:
+ cursor, db_conn = mysql_connect(module, login_user, login_password, config_file, ssl_cert, ssl_key, ssl_ca,
+ connect_timeout=connect_timeout)
+ except Exception as e:
+ if os.path.exists(config_file):
+ module.fail_json(msg="unable to connect to database, check login_user and login_password are correct or %s has the credentials. "
+ "Exception message: %s" % (config_file, to_native(e)))
+ else:
+ module.fail_json(msg="unable to find %s. Exception message: %s" % (config_file, to_native(e)))
+
+ changed = False
+ if not os.path.exists(config_file):
+ config_file = None
+
+ existence_list = []
+ non_existence_list = []
+
+ if not all_databases:
+ for each_database in db:
+ if db_exists(cursor, [each_database]):
+ existence_list.append(each_database)
+ else:
+ non_existence_list.append(each_database)
+
+ if state == "absent":
+ if module.check_mode:
+ module.exit_json(changed=bool(existence_list), db=db_name, db_list=db)
+ try:
+ changed = db_delete(cursor, existence_list)
+ except Exception as e:
+ module.fail_json(msg="error deleting database: %s" % to_native(e))
+ module.exit_json(changed=changed, db=db_name, db_list=db, executed_commands=executed_commands)
+ elif state == "present":
+ if module.check_mode:
+ module.exit_json(changed=bool(non_existence_list), db=db_name, db_list=db)
+ changed = False
+ if non_existence_list:
+ try:
+ changed = db_create(cursor, non_existence_list, encoding, collation)
+ except Exception as e:
+ module.fail_json(msg="error creating database: %s" % to_native(e),
+ exception=traceback.format_exc())
+ module.exit_json(changed=changed, db=db_name, db_list=db, executed_commands=executed_commands)
+ elif state == "dump":
+ if non_existence_list and not all_databases:
+ module.fail_json(msg="Cannot dump database(s) %r - not found" % (', '.join(non_existence_list)))
+ if module.check_mode:
+ module.exit_json(changed=True, db=db_name, db_list=db)
+ rc, stdout, stderr = db_dump(module, login_host, login_user,
+ login_password, db, target, all_databases,
+ login_port, config_file, socket, ssl_cert, ssl_key,
+ ssl_ca, single_transaction, quick, ignore_tables,
+ hex_blob, encoding, force, master_data, skip_lock_tables,
+ dump_extra_args)
+ if rc != 0:
+ module.fail_json(msg="%s" % stderr)
+ module.exit_json(changed=True, db=db_name, db_list=db, msg=stdout,
+ executed_commands=executed_commands)
+ elif state == "import":
+ if module.check_mode:
+ module.exit_json(changed=True, db=db_name, db_list=db)
+ if non_existence_list and not all_databases:
+ try:
+ db_create(cursor, non_existence_list, encoding, collation)
+ except Exception as e:
+ module.fail_json(msg="error creating database: %s" % to_native(e),
+ exception=traceback.format_exc())
+ rc, stdout, stderr = db_import(module, login_host, login_user,
+ login_password, db, target,
+ all_databases,
+ login_port, config_file,
+ socket, ssl_cert, ssl_key, ssl_ca, encoding, force)
+ if rc != 0:
+ module.fail_json(msg="%s" % stderr)
+ module.exit_json(changed=True, db=db_name, db_list=db, msg=stdout,
+ executed_commands=executed_commands)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/mysql_user.py b/test/support/integration/plugins/modules/mysql_user.py
new file mode 100644
index 0000000000..f39982ed41
--- /dev/null
+++ b/test/support/integration/plugins/modules/mysql_user.py
@@ -0,0 +1,815 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2012, Mark Theunissen <mark.theunissen@gmail.com>
+# Sponsored by Four Kitchens http://fourkitchens.com.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = r'''
+---
+module: mysql_user
+short_description: Adds or removes a user from a MySQL database
+description:
+ - Adds or removes a user from a MySQL database.
+version_added: "0.6"
+options:
+ name:
+ description:
+ - Name of the user (role) to add or remove.
+ type: str
+ required: true
+ password:
+ description:
+ - Set the user's password..
+ type: str
+ encrypted:
+ description:
+ - Indicate that the 'password' field is a `mysql_native_password` hash.
+ type: bool
+ default: no
+ version_added: "2.0"
+ host:
+ description:
+ - The 'host' part of the MySQL username.
+ type: str
+ default: localhost
+ host_all:
+ description:
+ - Override the host option, making ansible apply changes to all hostnames for a given user.
+ - This option cannot be used when creating users.
+ type: bool
+ default: no
+ version_added: "2.1"
+ priv:
+ description:
+ - "MySQL privileges string in the format: C(db.table:priv1,priv2)."
+ - "Multiple privileges can be specified by separating each one using
+ a forward slash: C(db.table:priv/db.table:priv)."
+ - The format is based on MySQL C(GRANT) statement.
+ - Database and table names can be quoted, MySQL-style.
+ - If column privileges are used, the C(priv1,priv2) part must be
+ exactly as returned by a C(SHOW GRANT) statement. If not followed,
+ the module will always report changes. It includes grouping columns
+ by permission (C(SELECT(col1,col2)) instead of C(SELECT(col1),SELECT(col2))).
+ - Can be passed as a dictionary (see the examples).
+ type: raw
+ append_privs:
+ description:
+ - Append the privileges defined by priv to the existing ones for this
+ user instead of overwriting existing ones.
+ type: bool
+ default: no
+ version_added: "1.4"
+ sql_log_bin:
+ description:
+ - Whether binary logging should be enabled or disabled for the connection.
+ type: bool
+ default: yes
+ version_added: "2.1"
+ state:
+ description:
+ - Whether the user should exist.
+ - When C(absent), removes the user.
+ type: str
+ choices: [ absent, present ]
+ default: present
+ check_implicit_admin:
+ description:
+ - Check if mysql allows login as root/nopassword before trying supplied credentials.
+ type: bool
+ default: no
+ version_added: "1.3"
+ update_password:
+ description:
+ - C(always) will update passwords if they differ.
+ - C(on_create) will only set the password for newly created users.
+ type: str
+ choices: [ always, on_create ]
+ default: always
+ version_added: "2.0"
+ plugin:
+ description:
+ - User's plugin to authenticate (``CREATE USER user IDENTIFIED WITH plugin``).
+ type: str
+ version_added: '2.10'
+ plugin_hash_string:
+ description:
+ - User's plugin hash string (``CREATE USER user IDENTIFIED WITH plugin AS plugin_hash_string``).
+ type: str
+ version_added: '2.10'
+ plugin_auth_string:
+ description:
+ - User's plugin auth_string (``CREATE USER user IDENTIFIED WITH plugin BY plugin_auth_string``).
+ type: str
+ version_added: '2.10'
+
+notes:
+ - "MySQL server installs with default login_user of 'root' and no password. To secure this user
+ as part of an idempotent playbook, you must create at least two tasks: the first must change the root user's password,
+ without providing any login_user/login_password details. The second must drop a ~/.my.cnf file containing
+ the new root credentials. Subsequent runs of the playbook will then succeed by reading the new credentials from
+ the file."
+ - Currently, there is only support for the `mysql_native_password` encrypted password hash module.
+
+seealso:
+- module: mysql_info
+- name: MySQL access control and account management reference
+ description: Complete reference of the MySQL access control and account management documentation.
+ link: https://dev.mysql.com/doc/refman/8.0/en/access-control.html
+- name: MySQL provided privileges reference
+ description: Complete reference of the MySQL provided privileges documentation.
+ link: https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html
+
+author:
+- Jonathan Mainguy (@Jmainguy)
+- Benjamin Malynovytch (@bmalynovytch)
+- Lukasz Tomaszkiewicz (@tomaszkiewicz)
+extends_documentation_fragment: mysql
+'''
+
+EXAMPLES = r'''
+- name: Removes anonymous user account for localhost
+ mysql_user:
+ name: ''
+ host: localhost
+ state: absent
+
+- name: Removes all anonymous user accounts
+ mysql_user:
+ name: ''
+ host_all: yes
+ state: absent
+
+- name: Create database user with name 'bob' and password '12345' with all database privileges
+ mysql_user:
+ name: bob
+ password: 12345
+ priv: '*.*:ALL'
+ state: present
+
+- name: Create database user using hashed password with all database privileges
+ mysql_user:
+ name: bob
+ password: '*EE0D72C1085C46C5278932678FBE2C6A782821B4'
+ encrypted: yes
+ priv: '*.*:ALL'
+ state: present
+
+- name: Create database user with password and all database privileges and 'WITH GRANT OPTION'
+ mysql_user:
+ name: bob
+ password: 12345
+ priv: '*.*:ALL,GRANT'
+ state: present
+
+- name: Create user with password, all database privileges and 'WITH GRANT OPTION' in db1 and db2
+ mysql_user:
+ state: present
+ name: bob
+ password: 12345dd
+ priv:
+ 'db1.*': 'ALL,GRANT'
+ 'db2.*': 'ALL,GRANT'
+
+# Note that REQUIRESSL is a special privilege that should only apply to *.* by itself.
+- name: Modify user to require SSL connections.
+ mysql_user:
+ name: bob
+ append_privs: yes
+ priv: '*.*:REQUIRESSL'
+ state: present
+
+- name: Ensure no user named 'sally'@'localhost' exists, also passing in the auth credentials.
+ mysql_user:
+ login_user: root
+ login_password: 123456
+ name: sally
+ state: absent
+
+- name: Ensure no user named 'sally' exists at all
+ mysql_user:
+ name: sally
+ host_all: yes
+ state: absent
+
+- name: Specify grants composed of more than one word
+ mysql_user:
+ name: replication
+ password: 12345
+ priv: "*.*:REPLICATION CLIENT"
+ state: present
+
+- name: Revoke all privileges for user 'bob' and password '12345'
+ mysql_user:
+ name: bob
+ password: 12345
+ priv: "*.*:USAGE"
+ state: present
+
+# Example privileges string format
+# mydb.*:INSERT,UPDATE/anotherdb.*:SELECT/yetanotherdb.*:ALL
+
+- name: Example using login_unix_socket to connect to server
+ mysql_user:
+ name: root
+ password: abc123
+ login_unix_socket: /var/run/mysqld/mysqld.sock
+
+- name: Example of skipping binary logging while adding user 'bob'
+ mysql_user:
+ name: bob
+ password: 12345
+ priv: "*.*:USAGE"
+ state: present
+ sql_log_bin: no
+
+- name: Create user 'bob' authenticated with plugin 'AWSAuthenticationPlugin'
+ mysql_user:
+ name: bob
+ plugin: AWSAuthenticationPlugin
+ plugin_hash_string: RDS
+ priv: '*.*:ALL'
+ state: present
+
+# Example .my.cnf file for setting the root password
+# [client]
+# user=root
+# password=n<_665{vS43y
+'''
+
+import re
+import string
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.database import SQLParseError
+from ansible.module_utils.mysql import mysql_connect, mysql_driver, mysql_driver_fail_msg
+from ansible.module_utils.six import iteritems
+from ansible.module_utils._text import to_native
+
+
+VALID_PRIVS = frozenset(('CREATE', 'DROP', 'GRANT', 'GRANT OPTION',
+ 'LOCK TABLES', 'REFERENCES', 'EVENT', 'ALTER',
+ 'DELETE', 'INDEX', 'INSERT', 'SELECT', 'UPDATE',
+ 'CREATE TEMPORARY TABLES', 'TRIGGER', 'CREATE VIEW',
+ 'SHOW VIEW', 'ALTER ROUTINE', 'CREATE ROUTINE',
+ 'EXECUTE', 'FILE', 'CREATE TABLESPACE', 'CREATE USER',
+ 'PROCESS', 'PROXY', 'RELOAD', 'REPLICATION CLIENT',
+ 'REPLICATION SLAVE', 'SHOW DATABASES', 'SHUTDOWN',
+ 'SUPER', 'ALL', 'ALL PRIVILEGES', 'USAGE', 'REQUIRESSL',
+ 'CREATE ROLE', 'DROP ROLE', 'APPLICATION_PASSWORD_ADMIN',
+ 'AUDIT_ADMIN', 'BACKUP_ADMIN', 'BINLOG_ADMIN',
+ 'BINLOG_ENCRYPTION_ADMIN', 'CLONE_ADMIN', 'CONNECTION_ADMIN',
+ 'ENCRYPTION_KEY_ADMIN', 'FIREWALL_ADMIN', 'FIREWALL_USER',
+ 'GROUP_REPLICATION_ADMIN', 'INNODB_REDO_LOG_ARCHIVE',
+ 'NDB_STORED_USER', 'PERSIST_RO_VARIABLES_ADMIN',
+ 'REPLICATION_APPLIER', 'REPLICATION_SLAVE_ADMIN',
+ 'RESOURCE_GROUP_ADMIN', 'RESOURCE_GROUP_USER',
+ 'ROLE_ADMIN', 'SESSION_VARIABLES_ADMIN', 'SET_USER_ID',
+ 'SYSTEM_USER', 'SYSTEM_VARIABLES_ADMIN', 'SYSTEM_USER',
+ 'TABLE_ENCRYPTION_ADMIN', 'VERSION_TOKEN_ADMIN',
+ 'XA_RECOVER_ADMIN', 'LOAD FROM S3', 'SELECT INTO S3'))
+
+
+class InvalidPrivsError(Exception):
+ pass
+
+# ===========================================
+# MySQL module specific support methods.
+#
+
+
+# User Authentication Management changed in MySQL 5.7 and MariaDB 10.2.0
+def use_old_user_mgmt(cursor):
+ cursor.execute("SELECT VERSION()")
+ result = cursor.fetchone()
+ version_str = result[0]
+ version = version_str.split('.')
+
+ if 'mariadb' in version_str.lower():
+ # Prior to MariaDB 10.2
+ if int(version[0]) * 1000 + int(version[1]) < 10002:
+ return True
+ else:
+ return False
+ else:
+ # Prior to MySQL 5.7
+ if int(version[0]) * 1000 + int(version[1]) < 5007:
+ return True
+ else:
+ return False
+
+
+def get_mode(cursor):
+ cursor.execute('SELECT @@GLOBAL.sql_mode')
+ result = cursor.fetchone()
+ mode_str = result[0]
+ if 'ANSI' in mode_str:
+ mode = 'ANSI'
+ else:
+ mode = 'NOTANSI'
+ return mode
+
+
+def user_exists(cursor, user, host, host_all):
+ if host_all:
+ cursor.execute("SELECT count(*) FROM mysql.user WHERE user = %s", ([user]))
+ else:
+ cursor.execute("SELECT count(*) FROM mysql.user WHERE user = %s AND host = %s", (user, host))
+
+ count = cursor.fetchone()
+ return count[0] > 0
+
+
+def user_add(cursor, user, host, host_all, password, encrypted,
+ plugin, plugin_hash_string, plugin_auth_string, new_priv, check_mode):
+ # we cannot create users without a proper hostname
+ if host_all:
+ return False
+
+ if check_mode:
+ return True
+
+ if password and encrypted:
+ cursor.execute("CREATE USER %s@%s IDENTIFIED BY PASSWORD %s", (user, host, password))
+ elif password and not encrypted:
+ cursor.execute("CREATE USER %s@%s IDENTIFIED BY %s", (user, host, password))
+ elif plugin and plugin_hash_string:
+ cursor.execute("CREATE USER %s@%s IDENTIFIED WITH %s AS %s", (user, host, plugin, plugin_hash_string))
+ elif plugin and plugin_auth_string:
+ cursor.execute("CREATE USER %s@%s IDENTIFIED WITH %s BY %s", (user, host, plugin, plugin_auth_string))
+ elif plugin:
+ cursor.execute("CREATE USER %s@%s IDENTIFIED WITH %s", (user, host, plugin))
+ else:
+ cursor.execute("CREATE USER %s@%s", (user, host))
+ if new_priv is not None:
+ for db_table, priv in iteritems(new_priv):
+ privileges_grant(cursor, user, host, db_table, priv)
+ return True
+
+
+def is_hash(password):
+ ishash = False
+ if len(password) == 41 and password[0] == '*':
+ if frozenset(password[1:]).issubset(string.hexdigits):
+ ishash = True
+ return ishash
+
+
+def user_mod(cursor, user, host, host_all, password, encrypted,
+ plugin, plugin_hash_string, plugin_auth_string, new_priv, append_privs, module):
+ changed = False
+ msg = "User unchanged"
+ grant_option = False
+
+ if host_all:
+ hostnames = user_get_hostnames(cursor, [user])
+ else:
+ hostnames = [host]
+
+ for host in hostnames:
+ # Handle clear text and hashed passwords.
+ if bool(password):
+ # Determine what user management method server uses
+ old_user_mgmt = use_old_user_mgmt(cursor)
+
+ # Get a list of valid columns in mysql.user table to check if Password and/or authentication_string exist
+ cursor.execute("""
+ SELECT COLUMN_NAME FROM information_schema.COLUMNS
+ WHERE TABLE_SCHEMA = 'mysql' AND TABLE_NAME = 'user' AND COLUMN_NAME IN ('Password', 'authentication_string')
+ ORDER BY COLUMN_NAME DESC LIMIT 1
+ """)
+ colA = cursor.fetchone()
+
+ cursor.execute("""
+ SELECT COLUMN_NAME FROM information_schema.COLUMNS
+ WHERE TABLE_SCHEMA = 'mysql' AND TABLE_NAME = 'user' AND COLUMN_NAME IN ('Password', 'authentication_string')
+ ORDER BY COLUMN_NAME ASC LIMIT 1
+ """)
+ colB = cursor.fetchone()
+
+ # Select hash from either Password or authentication_string, depending which one exists and/or is filled
+ cursor.execute("""
+ SELECT COALESCE(
+ CASE WHEN %s = '' THEN NULL ELSE %s END,
+ CASE WHEN %s = '' THEN NULL ELSE %s END
+ )
+ FROM mysql.user WHERE user = %%s AND host = %%s
+ """ % (colA[0], colA[0], colB[0], colB[0]), (user, host))
+ current_pass_hash = cursor.fetchone()[0]
+ if isinstance(current_pass_hash, bytes):
+ current_pass_hash = current_pass_hash.decode('ascii')
+
+ if encrypted:
+ encrypted_password = password
+ if not is_hash(encrypted_password):
+ module.fail_json(msg="encrypted was specified however it does not appear to be a valid hash expecting: *SHA1(SHA1(your_password))")
+ else:
+ if old_user_mgmt:
+ cursor.execute("SELECT PASSWORD(%s)", (password,))
+ else:
+ cursor.execute("SELECT CONCAT('*', UCASE(SHA1(UNHEX(SHA1(%s)))))", (password,))
+ encrypted_password = cursor.fetchone()[0]
+
+ if current_pass_hash != encrypted_password:
+ msg = "Password updated"
+ if module.check_mode:
+ return (True, msg)
+ if old_user_mgmt:
+ cursor.execute("SET PASSWORD FOR %s@%s = %s", (user, host, encrypted_password))
+ msg = "Password updated (old style)"
+ else:
+ try:
+ cursor.execute("ALTER USER %s@%s IDENTIFIED WITH mysql_native_password AS %s", (user, host, encrypted_password))
+ msg = "Password updated (new style)"
+ except (mysql_driver.Error) as e:
+ # https://stackoverflow.com/questions/51600000/authentication-string-of-root-user-on-mysql
+ # Replacing empty root password with new authentication mechanisms fails with error 1396
+ if e.args[0] == 1396:
+ cursor.execute(
+ "UPDATE user SET plugin = %s, authentication_string = %s, Password = '' WHERE User = %s AND Host = %s",
+ ('mysql_native_password', encrypted_password, user, host)
+ )
+ cursor.execute("FLUSH PRIVILEGES")
+ msg = "Password forced update"
+ else:
+ raise e
+ changed = True
+
+ # Handle plugin authentication
+ if plugin:
+ cursor.execute("SELECT plugin, authentication_string FROM mysql.user "
+ "WHERE user = %s AND host = %s", (user, host))
+ current_plugin = cursor.fetchone()
+
+ update = False
+
+ if current_plugin[0] != plugin:
+ update = True
+
+ if plugin_hash_string and current_plugin[1] != plugin_hash_string:
+ update = True
+
+ if plugin_auth_string and current_plugin[1] != plugin_auth_string:
+ # this case can cause more updates than expected,
+ # as plugin can hash auth_string in any way it wants
+ # and there's no way to figure it out for
+ # a check, so I prefer to update more often than never
+ update = True
+
+ if update:
+ if plugin_hash_string:
+ cursor.execute("ALTER USER %s@%s IDENTIFIED WITH %s AS %s", (user, host, plugin, plugin_hash_string))
+ elif plugin_auth_string:
+ cursor.execute("ALTER USER %s@%s IDENTIFIED WITH %s BY %s", (user, host, plugin, plugin_auth_string))
+ else:
+ cursor.execute("ALTER USER %s@%s IDENTIFIED WITH %s", (user, host, plugin))
+ changed = True
+
+ # Handle privileges
+ if new_priv is not None:
+ curr_priv = privileges_get(cursor, user, host)
+
+ # If the user has privileges on a db.table that doesn't appear at all in
+ # the new specification, then revoke all privileges on it.
+ for db_table, priv in iteritems(curr_priv):
+ # If the user has the GRANT OPTION on a db.table, revoke it first.
+ if "GRANT" in priv:
+ grant_option = True
+ if db_table not in new_priv:
+ if user != "root" and "PROXY" not in priv and not append_privs:
+ msg = "Privileges updated"
+ if module.check_mode:
+ return (True, msg)
+ privileges_revoke(cursor, user, host, db_table, priv, grant_option)
+ changed = True
+
+ # If the user doesn't currently have any privileges on a db.table, then
+ # we can perform a straight grant operation.
+ for db_table, priv in iteritems(new_priv):
+ if db_table not in curr_priv:
+ msg = "New privileges granted"
+ if module.check_mode:
+ return (True, msg)
+ privileges_grant(cursor, user, host, db_table, priv)
+ changed = True
+
+ # If the db.table specification exists in both the user's current privileges
+ # and in the new privileges, then we need to see if there's a difference.
+ db_table_intersect = set(new_priv.keys()) & set(curr_priv.keys())
+ for db_table in db_table_intersect:
+ priv_diff = set(new_priv[db_table]) ^ set(curr_priv[db_table])
+ if len(priv_diff) > 0:
+ msg = "Privileges updated"
+ if module.check_mode:
+ return (True, msg)
+ if not append_privs:
+ privileges_revoke(cursor, user, host, db_table, curr_priv[db_table], grant_option)
+ privileges_grant(cursor, user, host, db_table, new_priv[db_table])
+ changed = True
+
+ return (changed, msg)
+
+
+def user_delete(cursor, user, host, host_all, check_mode):
+ if check_mode:
+ return True
+
+ if host_all:
+ hostnames = user_get_hostnames(cursor, [user])
+
+ for hostname in hostnames:
+ cursor.execute("DROP USER %s@%s", (user, hostname))
+ else:
+ cursor.execute("DROP USER %s@%s", (user, host))
+
+ return True
+
+
+def user_get_hostnames(cursor, user):
+ cursor.execute("SELECT Host FROM mysql.user WHERE user = %s", user)
+ hostnames_raw = cursor.fetchall()
+ hostnames = []
+
+ for hostname_raw in hostnames_raw:
+ hostnames.append(hostname_raw[0])
+
+ return hostnames
+
+
+def privileges_get(cursor, user, host):
+ """ MySQL doesn't have a better method of getting privileges aside from the
+ SHOW GRANTS query syntax, which requires us to then parse the returned string.
+ Here's an example of the string that is returned from MySQL:
+
+ GRANT USAGE ON *.* TO 'user'@'localhost' IDENTIFIED BY 'pass';
+
+ This function makes the query and returns a dictionary containing the results.
+ The dictionary format is the same as that returned by privileges_unpack() below.
+ """
+ output = {}
+ cursor.execute("SHOW GRANTS FOR %s@%s", (user, host))
+ grants = cursor.fetchall()
+
+ def pick(x):
+ if x == 'ALL PRIVILEGES':
+ return 'ALL'
+ else:
+ return x
+
+ for grant in grants:
+ res = re.match("""GRANT (.+) ON (.+) TO (['`"]).*\\3@(['`"]).*\\4( IDENTIFIED BY PASSWORD (['`"]).+\\6)? ?(.*)""", grant[0])
+ if res is None:
+ raise InvalidPrivsError('unable to parse the MySQL grant string: %s' % grant[0])
+ privileges = res.group(1).split(", ")
+ privileges = [pick(x) for x in privileges]
+ if "WITH GRANT OPTION" in res.group(7):
+ privileges.append('GRANT')
+ if "REQUIRE SSL" in res.group(7):
+ privileges.append('REQUIRESSL')
+ db = res.group(2)
+ output[db] = privileges
+ return output
+
+
+def privileges_unpack(priv, mode):
+ """ Take a privileges string, typically passed as a parameter, and unserialize
+ it into a dictionary, the same format as privileges_get() above. We have this
+ custom format to avoid using YAML/JSON strings inside YAML playbooks. Example
+ of a privileges string:
+
+ mydb.*:INSERT,UPDATE/anotherdb.*:SELECT/yetanother.*:ALL
+
+ The privilege USAGE stands for no privileges, so we add that in on *.* if it's
+ not specified in the string, as MySQL will always provide this by default.
+ """
+ if mode == 'ANSI':
+ quote = '"'
+ else:
+ quote = '`'
+ output = {}
+ privs = []
+ for item in priv.strip().split('/'):
+ pieces = item.strip().rsplit(':', 1)
+ dbpriv = pieces[0].rsplit(".", 1)
+
+ # Check for FUNCTION or PROCEDURE object types
+ parts = dbpriv[0].split(" ", 1)
+ object_type = ''
+ if len(parts) > 1 and (parts[0] == 'FUNCTION' or parts[0] == 'PROCEDURE'):
+ object_type = parts[0] + ' '
+ dbpriv[0] = parts[1]
+
+ # Do not escape if privilege is for database or table, i.e.
+ # neither quote *. nor .*
+ for i, side in enumerate(dbpriv):
+ if side.strip('`') != '*':
+ dbpriv[i] = '%s%s%s' % (quote, side.strip('`'), quote)
+ pieces[0] = object_type + '.'.join(dbpriv)
+
+ if '(' in pieces[1]:
+ output[pieces[0]] = re.split(r',\s*(?=[^)]*(?:\(|$))', pieces[1].upper())
+ for i in output[pieces[0]]:
+ privs.append(re.sub(r'\s*\(.*\)', '', i))
+ else:
+ output[pieces[0]] = pieces[1].upper().split(',')
+ privs = output[pieces[0]]
+ new_privs = frozenset(privs)
+ if not new_privs.issubset(VALID_PRIVS):
+ raise InvalidPrivsError('Invalid privileges specified: %s' % new_privs.difference(VALID_PRIVS))
+
+ if '*.*' not in output:
+ output['*.*'] = ['USAGE']
+
+ # if we are only specifying something like REQUIRESSL and/or GRANT (=WITH GRANT OPTION) in *.*
+ # we still need to add USAGE as a privilege to avoid syntax errors
+ if 'REQUIRESSL' in priv and not set(output['*.*']).difference(set(['GRANT', 'REQUIRESSL'])):
+ output['*.*'].append('USAGE')
+
+ return output
+
+
+def privileges_revoke(cursor, user, host, db_table, priv, grant_option):
+ # Escape '%' since mysql db.execute() uses a format string
+ db_table = db_table.replace('%', '%%')
+ if grant_option:
+ query = ["REVOKE GRANT OPTION ON %s" % db_table]
+ query.append("FROM %s@%s")
+ query = ' '.join(query)
+ cursor.execute(query, (user, host))
+ priv_string = ",".join([p for p in priv if p not in ('GRANT', 'REQUIRESSL')])
+ query = ["REVOKE %s ON %s" % (priv_string, db_table)]
+ query.append("FROM %s@%s")
+ query = ' '.join(query)
+ cursor.execute(query, (user, host))
+
+
+def privileges_grant(cursor, user, host, db_table, priv):
+ # Escape '%' since mysql db.execute uses a format string and the
+ # specification of db and table often use a % (SQL wildcard)
+ db_table = db_table.replace('%', '%%')
+ priv_string = ",".join([p for p in priv if p not in ('GRANT', 'REQUIRESSL')])
+ query = ["GRANT %s ON %s" % (priv_string, db_table)]
+ query.append("TO %s@%s")
+ if 'REQUIRESSL' in priv:
+ query.append("REQUIRE SSL")
+ if 'GRANT' in priv:
+ query.append("WITH GRANT OPTION")
+ query = ' '.join(query)
+ cursor.execute(query, (user, host))
+
+
+def convert_priv_dict_to_str(priv):
+ """Converts privs dictionary to string of certain format.
+
+ Args:
+ priv (dict): Dict of privileges that needs to be converted to string.
+
+ Returns:
+ priv (str): String representation of input argument.
+ """
+ priv_list = ['%s:%s' % (key, val) for key, val in iteritems(priv)]
+
+ return '/'.join(priv_list)
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ login_user=dict(type='str'),
+ login_password=dict(type='str', no_log=True),
+ login_host=dict(type='str', default='localhost'),
+ login_port=dict(type='int', default=3306),
+ login_unix_socket=dict(type='str'),
+ user=dict(type='str', required=True, aliases=['name']),
+ password=dict(type='str', no_log=True),
+ encrypted=dict(type='bool', default=False),
+ host=dict(type='str', default='localhost'),
+ host_all=dict(type="bool", default=False),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ priv=dict(type='raw'),
+ append_privs=dict(type='bool', default=False),
+ check_implicit_admin=dict(type='bool', default=False),
+ update_password=dict(type='str', default='always', choices=['always', 'on_create']),
+ connect_timeout=dict(type='int', default=30),
+ config_file=dict(type='path', default='~/.my.cnf'),
+ sql_log_bin=dict(type='bool', default=True),
+ client_cert=dict(type='path', aliases=['ssl_cert']),
+ client_key=dict(type='path', aliases=['ssl_key']),
+ ca_cert=dict(type='path', aliases=['ssl_ca']),
+ plugin=dict(default=None, type='str'),
+ plugin_hash_string=dict(default=None, type='str'),
+ plugin_auth_string=dict(default=None, type='str'),
+ ),
+ supports_check_mode=True,
+ )
+ login_user = module.params["login_user"]
+ login_password = module.params["login_password"]
+ user = module.params["user"]
+ password = module.params["password"]
+ encrypted = module.boolean(module.params["encrypted"])
+ host = module.params["host"].lower()
+ host_all = module.params["host_all"]
+ state = module.params["state"]
+ priv = module.params["priv"]
+ check_implicit_admin = module.params['check_implicit_admin']
+ connect_timeout = module.params['connect_timeout']
+ config_file = module.params['config_file']
+ append_privs = module.boolean(module.params["append_privs"])
+ update_password = module.params['update_password']
+ ssl_cert = module.params["client_cert"]
+ ssl_key = module.params["client_key"]
+ ssl_ca = module.params["ca_cert"]
+ db = ''
+ sql_log_bin = module.params["sql_log_bin"]
+ plugin = module.params["plugin"]
+ plugin_hash_string = module.params["plugin_hash_string"]
+ plugin_auth_string = module.params["plugin_auth_string"]
+ if priv and not (isinstance(priv, str) or isinstance(priv, dict)):
+ module.fail_json(msg="priv parameter must be str or dict but %s was passed" % type(priv))
+
+ if priv and isinstance(priv, dict):
+ priv = convert_priv_dict_to_str(priv)
+
+ if mysql_driver is None:
+ module.fail_json(msg=mysql_driver_fail_msg)
+
+ cursor = None
+ try:
+ if check_implicit_admin:
+ try:
+ cursor, db_conn = mysql_connect(module, 'root', '', config_file, ssl_cert, ssl_key, ssl_ca, db,
+ connect_timeout=connect_timeout)
+ except Exception:
+ pass
+
+ if not cursor:
+ cursor, db_conn = mysql_connect(module, login_user, login_password, config_file, ssl_cert, ssl_key, ssl_ca, db,
+ connect_timeout=connect_timeout)
+ except Exception as e:
+ module.fail_json(msg="unable to connect to database, check login_user and login_password are correct or %s has the credentials. "
+ "Exception message: %s" % (config_file, to_native(e)))
+
+ if not sql_log_bin:
+ cursor.execute("SET SQL_LOG_BIN=0;")
+
+ if priv is not None:
+ try:
+ mode = get_mode(cursor)
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+ try:
+ priv = privileges_unpack(priv, mode)
+ except Exception as e:
+ module.fail_json(msg="invalid privileges string: %s" % to_native(e))
+
+ if state == "present":
+ if user_exists(cursor, user, host, host_all):
+ try:
+ if update_password == 'always':
+ changed, msg = user_mod(cursor, user, host, host_all, password, encrypted,
+ plugin, plugin_hash_string, plugin_auth_string,
+ priv, append_privs, module)
+ else:
+ changed, msg = user_mod(cursor, user, host, host_all, None, encrypted,
+ plugin, plugin_hash_string, plugin_auth_string,
+ priv, append_privs, module)
+
+ except (SQLParseError, InvalidPrivsError, mysql_driver.Error) as e:
+ module.fail_json(msg=to_native(e))
+ else:
+ if host_all:
+ module.fail_json(msg="host_all parameter cannot be used when adding a user")
+ try:
+ changed = user_add(cursor, user, host, host_all, password, encrypted,
+ plugin, plugin_hash_string, plugin_auth_string,
+ priv, module.check_mode)
+ if changed:
+ msg = "User added"
+
+ except (SQLParseError, InvalidPrivsError, mysql_driver.Error) as e:
+ module.fail_json(msg=to_native(e))
+ elif state == "absent":
+ if user_exists(cursor, user, host, host_all):
+ changed = user_delete(cursor, user, host, host_all, module.check_mode)
+ msg = "User deleted"
+ else:
+ changed = False
+ msg = "User doesn't exist"
+ module.exit_json(changed=changed, user=user, msg=msg)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/openssl_certificate.py b/test/support/integration/plugins/modules/openssl_certificate.py
new file mode 100644
index 0000000000..4bd5e5c468
--- /dev/null
+++ b/test/support/integration/plugins/modules/openssl_certificate.py
@@ -0,0 +1,2756 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016-2017, Yanis Guenane <yanis+ansible@guenane.org>
+# Copyright: (c) 2017, Markus Teufelberger <mteufelberger+ansible@mgit.at>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+module: openssl_certificate
+version_added: "2.4"
+short_description: Generate and/or check OpenSSL certificates
+description:
+ - This module allows one to (re)generate OpenSSL certificates.
+ - It implements a notion of provider (ie. C(selfsigned), C(ownca), C(acme), C(assertonly), C(entrust))
+ for your certificate.
+ - The C(assertonly) provider is intended for use cases where one is only interested in
+ checking properties of a supplied certificate. Please note that this provider has been
+ deprecated in Ansible 2.9 and will be removed in Ansible 2.13. See the examples on how
+ to emulate C(assertonly) usage with M(openssl_certificate_info), M(openssl_csr_info),
+ M(openssl_privatekey_info) and M(assert). This also allows more flexible checks than
+ the ones offered by the C(assertonly) provider.
+ - The C(ownca) provider is intended for generating OpenSSL certificate signed with your own
+ CA (Certificate Authority) certificate (self-signed certificate).
+ - Many properties that can be specified in this module are for validation of an
+ existing or newly generated certificate. The proper place to specify them, if you
+ want to receive a certificate with these properties is a CSR (Certificate Signing Request).
+ - "Please note that the module regenerates existing certificate if it doesn't match the module's
+ options, or if it seems to be corrupt. If you are concerned that this could overwrite
+ your existing certificate, consider using the I(backup) option."
+ - It uses the pyOpenSSL or cryptography python library to interact with OpenSSL.
+ - If both the cryptography and PyOpenSSL libraries are available (and meet the minimum version requirements)
+ cryptography will be preferred as a backend over PyOpenSSL (unless the backend is forced with C(select_crypto_backend)).
+ Please note that the PyOpenSSL backend was deprecated in Ansible 2.9 and will be removed in Ansible 2.13.
+requirements:
+ - PyOpenSSL >= 0.15 or cryptography >= 1.6 (if using C(selfsigned) or C(assertonly) provider)
+ - acme-tiny >= 4.0.0 (if using the C(acme) provider)
+author:
+ - Yanis Guenane (@Spredzy)
+ - Markus Teufelberger (@MarkusTeufelberger)
+options:
+ state:
+ description:
+ - Whether the certificate should exist or not, taking action if the state is different from what is stated.
+ type: str
+ default: present
+ choices: [ absent, present ]
+
+ path:
+ description:
+ - Remote absolute path where the generated certificate file should be created or is already located.
+ type: path
+ required: true
+
+ provider:
+ description:
+ - Name of the provider to use to generate/retrieve the OpenSSL certificate.
+ - The C(assertonly) provider will not generate files and fail if the certificate file is missing.
+ - The C(assertonly) provider has been deprecated in Ansible 2.9 and will be removed in Ansible 2.13.
+ Please see the examples on how to emulate it with M(openssl_certificate_info), M(openssl_csr_info),
+ M(openssl_privatekey_info) and M(assert).
+ - "The C(entrust) provider was added for Ansible 2.9 and requires credentials for the
+ L(Entrust Certificate Services,https://www.entrustdatacard.com/products/categories/ssl-certificates) (ECS) API."
+ - Required if I(state) is C(present).
+ type: str
+ choices: [ acme, assertonly, entrust, ownca, selfsigned ]
+
+ force:
+ description:
+ - Generate the certificate, even if it already exists.
+ type: bool
+ default: no
+
+ csr_path:
+ description:
+ - Path to the Certificate Signing Request (CSR) used to generate this certificate.
+ - This is not required in C(assertonly) mode.
+ - This is mutually exclusive with I(csr_content).
+ type: path
+ csr_content:
+ description:
+ - Content of the Certificate Signing Request (CSR) used to generate this certificate.
+ - This is not required in C(assertonly) mode.
+ - This is mutually exclusive with I(csr_path).
+ type: str
+ version_added: "2.10"
+
+ privatekey_path:
+ description:
+ - Path to the private key to use when signing the certificate.
+ - This is mutually exclusive with I(privatekey_content).
+ type: path
+ privatekey_content:
+ description:
+ - Path to the private key to use when signing the certificate.
+ - This is mutually exclusive with I(privatekey_path).
+ type: str
+ version_added: "2.10"
+
+ privatekey_passphrase:
+ description:
+ - The passphrase for the I(privatekey_path) resp. I(privatekey_content).
+ - This is required if the private key is password protected.
+ type: str
+
+ selfsigned_version:
+ description:
+ - Version of the C(selfsigned) certificate.
+ - Nowadays it should almost always be C(3).
+ - This is only used by the C(selfsigned) provider.
+ type: int
+ default: 3
+ version_added: "2.5"
+
+ selfsigned_digest:
+ description:
+ - Digest algorithm to be used when self-signing the certificate.
+ - This is only used by the C(selfsigned) provider.
+ type: str
+ default: sha256
+
+ selfsigned_not_before:
+ description:
+ - The point in time the certificate is valid from.
+ - Time can be specified either as relative time or as absolute timestamp.
+ - Time will always be interpreted as UTC.
+ - Valid format is C([+-]timespec | ASN.1 TIME) where timespec can be an integer
+ + C([w | d | h | m | s]) (e.g. C(+32w1d2h).
+ - Note that if using relative time this module is NOT idempotent.
+ - If this value is not specified, the certificate will start being valid from now.
+ - This is only used by the C(selfsigned) provider.
+ type: str
+ default: +0s
+ aliases: [ selfsigned_notBefore ]
+
+ selfsigned_not_after:
+ description:
+ - The point in time at which the certificate stops being valid.
+ - Time can be specified either as relative time or as absolute timestamp.
+ - Time will always be interpreted as UTC.
+ - Valid format is C([+-]timespec | ASN.1 TIME) where timespec can be an integer
+ + C([w | d | h | m | s]) (e.g. C(+32w1d2h).
+ - Note that if using relative time this module is NOT idempotent.
+ - If this value is not specified, the certificate will stop being valid 10 years from now.
+ - This is only used by the C(selfsigned) provider.
+ type: str
+ default: +3650d
+ aliases: [ selfsigned_notAfter ]
+
+ selfsigned_create_subject_key_identifier:
+ description:
+ - Whether to create the Subject Key Identifier (SKI) from the public key.
+ - A value of C(create_if_not_provided) (default) only creates a SKI when the CSR does not
+ provide one.
+ - A value of C(always_create) always creates a SKI. If the CSR provides one, that one is
+ ignored.
+ - A value of C(never_create) never creates a SKI. If the CSR provides one, that one is used.
+ - This is only used by the C(selfsigned) provider.
+ - Note that this is only supported if the C(cryptography) backend is used!
+ type: str
+ choices: [create_if_not_provided, always_create, never_create]
+ default: create_if_not_provided
+ version_added: "2.9"
+
+ ownca_path:
+ description:
+ - Remote absolute path of the CA (Certificate Authority) certificate.
+ - This is only used by the C(ownca) provider.
+ - This is mutually exclusive with I(ownca_content).
+ type: path
+ version_added: "2.7"
+ ownca_content:
+ description:
+ - Content of the CA (Certificate Authority) certificate.
+ - This is only used by the C(ownca) provider.
+ - This is mutually exclusive with I(ownca_path).
+ type: str
+ version_added: "2.10"
+
+ ownca_privatekey_path:
+ description:
+ - Path to the CA (Certificate Authority) private key to use when signing the certificate.
+ - This is only used by the C(ownca) provider.
+ - This is mutually exclusive with I(ownca_privatekey_content).
+ type: path
+ version_added: "2.7"
+ ownca_privatekey_content:
+ description:
+ - Path to the CA (Certificate Authority) private key to use when signing the certificate.
+ - This is only used by the C(ownca) provider.
+ - This is mutually exclusive with I(ownca_privatekey_path).
+ type: str
+ version_added: "2.10"
+
+ ownca_privatekey_passphrase:
+ description:
+ - The passphrase for the I(ownca_privatekey_path) resp. I(ownca_privatekey_content).
+ - This is only used by the C(ownca) provider.
+ type: str
+ version_added: "2.7"
+
+ ownca_digest:
+ description:
+ - The digest algorithm to be used for the C(ownca) certificate.
+ - This is only used by the C(ownca) provider.
+ type: str
+ default: sha256
+ version_added: "2.7"
+
+ ownca_version:
+ description:
+ - The version of the C(ownca) certificate.
+ - Nowadays it should almost always be C(3).
+ - This is only used by the C(ownca) provider.
+ type: int
+ default: 3
+ version_added: "2.7"
+
+ ownca_not_before:
+ description:
+ - The point in time the certificate is valid from.
+ - Time can be specified either as relative time or as absolute timestamp.
+ - Time will always be interpreted as UTC.
+ - Valid format is C([+-]timespec | ASN.1 TIME) where timespec can be an integer
+ + C([w | d | h | m | s]) (e.g. C(+32w1d2h).
+ - Note that if using relative time this module is NOT idempotent.
+ - If this value is not specified, the certificate will start being valid from now.
+ - This is only used by the C(ownca) provider.
+ type: str
+ default: +0s
+ version_added: "2.7"
+
+ ownca_not_after:
+ description:
+ - The point in time at which the certificate stops being valid.
+ - Time can be specified either as relative time or as absolute timestamp.
+ - Time will always be interpreted as UTC.
+ - Valid format is C([+-]timespec | ASN.1 TIME) where timespec can be an integer
+ + C([w | d | h | m | s]) (e.g. C(+32w1d2h).
+ - Note that if using relative time this module is NOT idempotent.
+ - If this value is not specified, the certificate will stop being valid 10 years from now.
+ - This is only used by the C(ownca) provider.
+ type: str
+ default: +3650d
+ version_added: "2.7"
+
+ ownca_create_subject_key_identifier:
+ description:
+ - Whether to create the Subject Key Identifier (SKI) from the public key.
+ - A value of C(create_if_not_provided) (default) only creates a SKI when the CSR does not
+ provide one.
+ - A value of C(always_create) always creates a SKI. If the CSR provides one, that one is
+ ignored.
+ - A value of C(never_create) never creates a SKI. If the CSR provides one, that one is used.
+ - This is only used by the C(ownca) provider.
+ - Note that this is only supported if the C(cryptography) backend is used!
+ type: str
+ choices: [create_if_not_provided, always_create, never_create]
+ default: create_if_not_provided
+ version_added: "2.9"
+
+ ownca_create_authority_key_identifier:
+ description:
+ - Create a Authority Key Identifier from the CA's certificate. If the CSR provided
+ a authority key identifier, it is ignored.
+ - The Authority Key Identifier is generated from the CA certificate's Subject Key Identifier,
+ if available. If it is not available, the CA certificate's public key will be used.
+ - This is only used by the C(ownca) provider.
+ - Note that this is only supported if the C(cryptography) backend is used!
+ type: bool
+ default: yes
+ version_added: "2.9"
+
+ acme_accountkey_path:
+ description:
+ - The path to the accountkey for the C(acme) provider.
+ - This is only used by the C(acme) provider.
+ type: path
+
+ acme_challenge_path:
+ description:
+ - The path to the ACME challenge directory that is served on U(http://<HOST>:80/.well-known/acme-challenge/)
+ - This is only used by the C(acme) provider.
+ type: path
+
+ acme_chain:
+ description:
+ - Include the intermediate certificate to the generated certificate
+ - This is only used by the C(acme) provider.
+ - Note that this is only available for older versions of C(acme-tiny).
+ New versions include the chain automatically, and setting I(acme_chain) to C(yes) results in an error.
+ type: bool
+ default: no
+ version_added: "2.5"
+
+ acme_directory:
+ description:
+ - "The ACME directory to use. You can use any directory that supports the ACME protocol, such as Buypass or Let's Encrypt."
+ - "Let's Encrypt recommends using their staging server while developing jobs. U(https://letsencrypt.org/docs/staging-environment/)."
+ type: str
+ default: https://acme-v02.api.letsencrypt.org/directory
+ version_added: "2.10"
+
+ signature_algorithms:
+ description:
+ - A list of algorithms that you would accept the certificate to be signed with
+ (e.g. ['sha256WithRSAEncryption', 'sha512WithRSAEncryption']).
+ - This is only used by the C(assertonly) provider.
+ - This option is deprecated since Ansible 2.9 and will be removed with the C(assertonly) provider in Ansible 2.13.
+ For alternatives, see the example on replacing C(assertonly).
+ type: list
+ elements: str
+
+ issuer:
+ description:
+ - The key/value pairs that must be present in the issuer name field of the certificate.
+ - If you need to specify more than one value with the same key, use a list as value.
+ - This is only used by the C(assertonly) provider.
+ - This option is deprecated since Ansible 2.9 and will be removed with the C(assertonly) provider in Ansible 2.13.
+ For alternatives, see the example on replacing C(assertonly).
+ type: dict
+
+ issuer_strict:
+ description:
+ - If set to C(yes), the I(issuer) field must contain only these values.
+ - This is only used by the C(assertonly) provider.
+ - This option is deprecated since Ansible 2.9 and will be removed with the C(assertonly) provider in Ansible 2.13.
+ For alternatives, see the example on replacing C(assertonly).
+ type: bool
+ default: no
+ version_added: "2.5"
+
+ subject:
+ description:
+ - The key/value pairs that must be present in the subject name field of the certificate.
+ - If you need to specify more than one value with the same key, use a list as value.
+ - This is only used by the C(assertonly) provider.
+ - This option is deprecated since Ansible 2.9 and will be removed with the C(assertonly) provider in Ansible 2.13.
+ For alternatives, see the example on replacing C(assertonly).
+ type: dict
+
+ subject_strict:
+ description:
+ - If set to C(yes), the I(subject) field must contain only these values.
+ - This is only used by the C(assertonly) provider.
+ - This option is deprecated since Ansible 2.9 and will be removed with the C(assertonly) provider in Ansible 2.13.
+ For alternatives, see the example on replacing C(assertonly).
+ type: bool
+ default: no
+ version_added: "2.5"
+
+ has_expired:
+ description:
+ - Checks if the certificate is expired/not expired at the time the module is executed.
+ - This is only used by the C(assertonly) provider.
+ - This option is deprecated since Ansible 2.9 and will be removed with the C(assertonly) provider in Ansible 2.13.
+ For alternatives, see the example on replacing C(assertonly).
+ type: bool
+ default: no
+
+ version:
+ description:
+ - The version of the certificate.
+ - Nowadays it should almost always be 3.
+ - This is only used by the C(assertonly) provider.
+ - This option is deprecated since Ansible 2.9 and will be removed with the C(assertonly) provider in Ansible 2.13.
+ For alternatives, see the example on replacing C(assertonly).
+ type: int
+
+ valid_at:
+ description:
+ - The certificate must be valid at this point in time.
+ - The timestamp is formatted as an ASN.1 TIME.
+ - This is only used by the C(assertonly) provider.
+ - This option is deprecated since Ansible 2.9 and will be removed with the C(assertonly) provider in Ansible 2.13.
+ For alternatives, see the example on replacing C(assertonly).
+ type: str
+
+ invalid_at:
+ description:
+ - The certificate must be invalid at this point in time.
+ - The timestamp is formatted as an ASN.1 TIME.
+ - This is only used by the C(assertonly) provider.
+ - This option is deprecated since Ansible 2.9 and will be removed with the C(assertonly) provider in Ansible 2.13.
+ For alternatives, see the example on replacing C(assertonly).
+ type: str
+
+ not_before:
+ description:
+ - The certificate must start to become valid at this point in time.
+ - The timestamp is formatted as an ASN.1 TIME.
+ - This is only used by the C(assertonly) provider.
+ - This option is deprecated since Ansible 2.9 and will be removed with the C(assertonly) provider in Ansible 2.13.
+ For alternatives, see the example on replacing C(assertonly).
+ type: str
+ aliases: [ notBefore ]
+
+ not_after:
+ description:
+ - The certificate must expire at this point in time.
+ - The timestamp is formatted as an ASN.1 TIME.
+ - This is only used by the C(assertonly) provider.
+ - This option is deprecated since Ansible 2.9 and will be removed with the C(assertonly) provider in Ansible 2.13.
+ For alternatives, see the example on replacing C(assertonly).
+ type: str
+ aliases: [ notAfter ]
+
+ valid_in:
+ description:
+ - The certificate must still be valid at this relative time offset from now.
+ - Valid format is C([+-]timespec | number_of_seconds) where timespec can be an integer
+ + C([w | d | h | m | s]) (e.g. C(+32w1d2h).
+ - Note that if using this parameter, this module is NOT idempotent.
+ - This is only used by the C(assertonly) provider.
+ - This option is deprecated since Ansible 2.9 and will be removed with the C(assertonly) provider in Ansible 2.13.
+ For alternatives, see the example on replacing C(assertonly).
+ type: str
+
+ key_usage:
+ description:
+ - The I(key_usage) extension field must contain all these values.
+ - This is only used by the C(assertonly) provider.
+ - This option is deprecated since Ansible 2.9 and will be removed with the C(assertonly) provider in Ansible 2.13.
+ For alternatives, see the example on replacing C(assertonly).
+ type: list
+ elements: str
+ aliases: [ keyUsage ]
+
+ key_usage_strict:
+ description:
+ - If set to C(yes), the I(key_usage) extension field must contain only these values.
+ - This is only used by the C(assertonly) provider.
+ - This option is deprecated since Ansible 2.9 and will be removed with the C(assertonly) provider in Ansible 2.13.
+ For alternatives, see the example on replacing C(assertonly).
+ type: bool
+ default: no
+ aliases: [ keyUsage_strict ]
+
+ extended_key_usage:
+ description:
+ - The I(extended_key_usage) extension field must contain all these values.
+ - This is only used by the C(assertonly) provider.
+ - This option is deprecated since Ansible 2.9 and will be removed with the C(assertonly) provider in Ansible 2.13.
+ For alternatives, see the example on replacing C(assertonly).
+ type: list
+ elements: str
+ aliases: [ extendedKeyUsage ]
+
+ extended_key_usage_strict:
+ description:
+ - If set to C(yes), the I(extended_key_usage) extension field must contain only these values.
+ - This is only used by the C(assertonly) provider.
+ - This option is deprecated since Ansible 2.9 and will be removed with the C(assertonly) provider in Ansible 2.13.
+ For alternatives, see the example on replacing C(assertonly).
+ type: bool
+ default: no
+ aliases: [ extendedKeyUsage_strict ]
+
+ subject_alt_name:
+ description:
+ - The I(subject_alt_name) extension field must contain these values.
+ - This is only used by the C(assertonly) provider.
+ - This option is deprecated since Ansible 2.9 and will be removed with the C(assertonly) provider in Ansible 2.13.
+ For alternatives, see the example on replacing C(assertonly).
+ type: list
+ elements: str
+ aliases: [ subjectAltName ]
+
+ subject_alt_name_strict:
+ description:
+ - If set to C(yes), the I(subject_alt_name) extension field must contain only these values.
+ - This is only used by the C(assertonly) provider.
+ - This option is deprecated since Ansible 2.9 and will be removed with the C(assertonly) provider in Ansible 2.13.
+ For alternatives, see the example on replacing C(assertonly).
+ type: bool
+ default: no
+ aliases: [ subjectAltName_strict ]
+
+ select_crypto_backend:
+ description:
+ - Determines which crypto backend to use.
+ - The default choice is C(auto), which tries to use C(cryptography) if available, and falls back to C(pyopenssl).
+ - If set to C(pyopenssl), will try to use the L(pyOpenSSL,https://pypi.org/project/pyOpenSSL/) library.
+ - If set to C(cryptography), will try to use the L(cryptography,https://cryptography.io/) library.
+ - Please note that the C(pyopenssl) backend has been deprecated in Ansible 2.9, and will be removed in Ansible 2.13.
+ From that point on, only the C(cryptography) backend will be available.
+ type: str
+ default: auto
+ choices: [ auto, cryptography, pyopenssl ]
+ version_added: "2.8"
+
+ backup:
+ description:
+ - Create a backup file including a timestamp so you can get the original
+ certificate back if you overwrote it with a new one by accident.
+ - This is not used by the C(assertonly) provider.
+ - This option is deprecated since Ansible 2.9 and will be removed with the C(assertonly) provider in Ansible 2.13.
+ For alternatives, see the example on replacing C(assertonly).
+ type: bool
+ default: no
+ version_added: "2.8"
+
+ entrust_cert_type:
+ description:
+ - Specify the type of certificate requested.
+ - This is only used by the C(entrust) provider.
+ type: str
+ default: STANDARD_SSL
+ choices: [ 'STANDARD_SSL', 'ADVANTAGE_SSL', 'UC_SSL', 'EV_SSL', 'WILDCARD_SSL', 'PRIVATE_SSL', 'PD_SSL', 'CDS_ENT_LITE', 'CDS_ENT_PRO', 'SMIME_ENT' ]
+ version_added: "2.9"
+
+ entrust_requester_email:
+ description:
+ - The email of the requester of the certificate (for tracking purposes).
+ - This is only used by the C(entrust) provider.
+ - This is required if the provider is C(entrust).
+ type: str
+ version_added: "2.9"
+
+ entrust_requester_name:
+ description:
+ - The name of the requester of the certificate (for tracking purposes).
+ - This is only used by the C(entrust) provider.
+ - This is required if the provider is C(entrust).
+ type: str
+ version_added: "2.9"
+
+ entrust_requester_phone:
+ description:
+ - The phone number of the requester of the certificate (for tracking purposes).
+ - This is only used by the C(entrust) provider.
+ - This is required if the provider is C(entrust).
+ type: str
+ version_added: "2.9"
+
+ entrust_api_user:
+ description:
+ - The username for authentication to the Entrust Certificate Services (ECS) API.
+ - This is only used by the C(entrust) provider.
+ - This is required if the provider is C(entrust).
+ type: str
+ version_added: "2.9"
+
+ entrust_api_key:
+ description:
+ - The key (password) for authentication to the Entrust Certificate Services (ECS) API.
+ - This is only used by the C(entrust) provider.
+ - This is required if the provider is C(entrust).
+ type: str
+ version_added: "2.9"
+
+ entrust_api_client_cert_path:
+ description:
+ - The path to the client certificate used to authenticate to the Entrust Certificate Services (ECS) API.
+ - This is only used by the C(entrust) provider.
+ - This is required if the provider is C(entrust).
+ type: path
+ version_added: "2.9"
+
+ entrust_api_client_cert_key_path:
+ description:
+ - The path to the private key of the client certificate used to authenticate to the Entrust Certificate Services (ECS) API.
+ - This is only used by the C(entrust) provider.
+ - This is required if the provider is C(entrust).
+ type: path
+ version_added: "2.9"
+
+ entrust_not_after:
+ description:
+ - The point in time at which the certificate stops being valid.
+ - Time can be specified either as relative time or as an absolute timestamp.
+ - A valid absolute time format is C(ASN.1 TIME) such as C(2019-06-18).
+ - A valid relative time format is C([+-]timespec) where timespec can be an integer + C([w | d | h | m | s]), such as C(+365d) or C(+32w1d2h)).
+ - Time will always be interpreted as UTC.
+ - Note that only the date (day, month, year) is supported for specifying the expiry date of the issued certificate.
+ - The full date-time is adjusted to EST (GMT -5:00) before issuance, which may result in a certificate with an expiration date one day
+ earlier than expected if a relative time is used.
+ - The minimum certificate lifetime is 90 days, and maximum is three years.
+ - If this value is not specified, the certificate will stop being valid 365 days the date of issue.
+ - This is only used by the C(entrust) provider.
+ type: str
+ default: +365d
+ version_added: "2.9"
+
+ entrust_api_specification_path:
+ description:
+ - The path to the specification file defining the Entrust Certificate Services (ECS) API configuration.
+ - You can use this to keep a local copy of the specification to avoid downloading it every time the module is used.
+ - This is only used by the C(entrust) provider.
+ type: path
+ default: https://cloud.entrust.net/EntrustCloud/documentation/cms-api-2.1.0.yaml
+ version_added: "2.9"
+
+ return_content:
+ description:
+ - If set to C(yes), will return the (current or generated) certificate's content as I(certificate).
+ type: bool
+ default: no
+ version_added: "2.10"
+
+extends_documentation_fragment: files
+notes:
+ - All ASN.1 TIME values should be specified following the YYYYMMDDHHMMSSZ pattern.
+ - Date specified should be UTC. Minutes and seconds are mandatory.
+ - For security reason, when you use C(ownca) provider, you should NOT run M(openssl_certificate) on
+ a target machine, but on a dedicated CA machine. It is recommended not to store the CA private key
+ on the target machine. Once signed, the certificate can be moved to the target machine.
+seealso:
+- module: openssl_csr
+- module: openssl_dhparam
+- module: openssl_pkcs12
+- module: openssl_privatekey
+- module: openssl_publickey
+'''
+
+EXAMPLES = r'''
+- name: Generate a Self Signed OpenSSL certificate
+ openssl_certificate:
+ path: /etc/ssl/crt/ansible.com.crt
+ privatekey_path: /etc/ssl/private/ansible.com.pem
+ csr_path: /etc/ssl/csr/ansible.com.csr
+ provider: selfsigned
+
+- name: Generate an OpenSSL certificate signed with your own CA certificate
+ openssl_certificate:
+ path: /etc/ssl/crt/ansible.com.crt
+ csr_path: /etc/ssl/csr/ansible.com.csr
+ ownca_path: /etc/ssl/crt/ansible_CA.crt
+ ownca_privatekey_path: /etc/ssl/private/ansible_CA.pem
+ provider: ownca
+
+- name: Generate a Let's Encrypt Certificate
+ openssl_certificate:
+ path: /etc/ssl/crt/ansible.com.crt
+ csr_path: /etc/ssl/csr/ansible.com.csr
+ provider: acme
+ acme_accountkey_path: /etc/ssl/private/ansible.com.pem
+ acme_challenge_path: /etc/ssl/challenges/ansible.com/
+
+- name: Force (re-)generate a new Let's Encrypt Certificate
+ openssl_certificate:
+ path: /etc/ssl/crt/ansible.com.crt
+ csr_path: /etc/ssl/csr/ansible.com.csr
+ provider: acme
+ acme_accountkey_path: /etc/ssl/private/ansible.com.pem
+ acme_challenge_path: /etc/ssl/challenges/ansible.com/
+ force: yes
+
+- name: Generate an Entrust certificate via the Entrust Certificate Services (ECS) API
+ openssl_certificate:
+ path: /etc/ssl/crt/ansible.com.crt
+ csr_path: /etc/ssl/csr/ansible.com.csr
+ provider: entrust
+ entrust_requester_name: Jo Doe
+ entrust_requester_email: jdoe@ansible.com
+ entrust_requester_phone: 555-555-5555
+ entrust_cert_type: STANDARD_SSL
+ entrust_api_user: apiusername
+ entrust_api_key: a^lv*32!cd9LnT
+ entrust_api_client_cert_path: /etc/ssl/entrust/ecs-client.crt
+ entrust_api_client_cert_key_path: /etc/ssl/entrust/ecs-key.crt
+ entrust_api_specification_path: /etc/ssl/entrust/api-docs/cms-api-2.1.0.yaml
+
+# The following example shows one assertonly usage using all existing options for
+# assertonly, and shows how to emulate the behavior with the openssl_certificate_info,
+# openssl_csr_info, openssl_privatekey_info and assert modules:
+
+- openssl_certificate:
+ provider: assertonly
+ path: /etc/ssl/crt/ansible.com.crt
+ csr_path: /etc/ssl/csr/ansible.com.csr
+ privatekey_path: /etc/ssl/csr/ansible.com.key
+ signature_algorithms:
+ - sha256WithRSAEncryption
+ - sha512WithRSAEncryption
+ subject:
+ commonName: ansible.com
+ subject_strict: yes
+ issuer:
+ commonName: ansible.com
+ issuer_strict: yes
+ has_expired: no
+ version: 3
+ key_usage:
+ - Data Encipherment
+ key_usage_strict: yes
+ extended_key_usage:
+ - DVCS
+ extended_key_usage_strict: yes
+ subject_alt_name:
+ - dns:ansible.com
+ subject_alt_name_strict: yes
+ not_before: 20190331202428Z
+ not_after: 20190413202428Z
+ valid_at: "+1d10h"
+ invalid_at: 20200331202428Z
+ valid_in: 10 # in ten seconds
+
+- openssl_certificate_info:
+ path: /etc/ssl/crt/ansible.com.crt
+ # for valid_at, invalid_at and valid_in
+ valid_at:
+ one_day_ten_hours: "+1d10h"
+ fixed_timestamp: 20200331202428Z
+ ten_seconds: "+10"
+ register: result
+
+- openssl_csr_info:
+ # Verifies that the CSR signature is valid; module will fail if not
+ path: /etc/ssl/csr/ansible.com.csr
+ register: result_csr
+
+- openssl_privatekey_info:
+ path: /etc/ssl/csr/ansible.com.key
+ register: result_privatekey
+
+- assert:
+ that:
+ # When private key is specified for assertonly, this will be checked:
+ - result.public_key == result_privatekey.public_key
+ # When CSR is specified for assertonly, this will be checked:
+ - result.public_key == result_csr.public_key
+ - result.subject_ordered == result_csr.subject_ordered
+ - result.extensions_by_oid == result_csr.extensions_by_oid
+ # signature_algorithms check
+ - "result.signature_algorithm == 'sha256WithRSAEncryption' or result.signature_algorithm == 'sha512WithRSAEncryption'"
+ # subject and subject_strict
+ - "result.subject.commonName == 'ansible.com'"
+ - "result.subject | length == 1" # the number must be the number of entries you check for
+ # issuer and issuer_strict
+ - "result.issuer.commonName == 'ansible.com'"
+ - "result.issuer | length == 1" # the number must be the number of entries you check for
+ # has_expired
+ - not result.expired
+ # version
+ - result.version == 3
+ # key_usage and key_usage_strict
+ - "'Data Encipherment' in result.key_usage"
+ - "result.key_usage | length == 1" # the number must be the number of entries you check for
+ # extended_key_usage and extended_key_usage_strict
+ - "'DVCS' in result.extended_key_usage"
+ - "result.extended_key_usage | length == 1" # the number must be the number of entries you check for
+ # subject_alt_name and subject_alt_name_strict
+ - "'dns:ansible.com' in result.subject_alt_name"
+ - "result.subject_alt_name | length == 1" # the number must be the number of entries you check for
+ # not_before and not_after
+ - "result.not_before == '20190331202428Z'"
+ - "result.not_after == '20190413202428Z'"
+ # valid_at, invalid_at and valid_in
+ - "result.valid_at.one_day_ten_hours" # for valid_at
+ - "not result.valid_at.fixed_timestamp" # for invalid_at
+ - "result.valid_at.ten_seconds" # for valid_in
+
+# Examples for some checks one could use the assertonly provider for:
+# (Please note that assertonly has been deprecated!)
+
+# How to use the assertonly provider to implement and trigger your own custom certificate generation workflow:
+- name: Check if a certificate is currently still valid, ignoring failures
+ openssl_certificate:
+ path: /etc/ssl/crt/example.com.crt
+ provider: assertonly
+ has_expired: no
+ ignore_errors: yes
+ register: validity_check
+
+- name: Run custom task(s) to get a new, valid certificate in case the initial check failed
+ command: superspecialSSL recreate /etc/ssl/crt/example.com.crt
+ when: validity_check.failed
+
+- name: Check the new certificate again for validity with the same parameters, this time failing the play if it is still invalid
+ openssl_certificate:
+ path: /etc/ssl/crt/example.com.crt
+ provider: assertonly
+ has_expired: no
+ when: validity_check.failed
+
+# Some other checks that assertonly could be used for:
+- name: Verify that an existing certificate was issued by the Let's Encrypt CA and is currently still valid
+ openssl_certificate:
+ path: /etc/ssl/crt/example.com.crt
+ provider: assertonly
+ issuer:
+ O: Let's Encrypt
+ has_expired: no
+
+- name: Ensure that a certificate uses a modern signature algorithm (no SHA1, MD5 or DSA)
+ openssl_certificate:
+ path: /etc/ssl/crt/example.com.crt
+ provider: assertonly
+ signature_algorithms:
+ - sha224WithRSAEncryption
+ - sha256WithRSAEncryption
+ - sha384WithRSAEncryption
+ - sha512WithRSAEncryption
+ - sha224WithECDSAEncryption
+ - sha256WithECDSAEncryption
+ - sha384WithECDSAEncryption
+ - sha512WithECDSAEncryption
+
+- name: Ensure that the existing certificate belongs to the specified private key
+ openssl_certificate:
+ path: /etc/ssl/crt/example.com.crt
+ privatekey_path: /etc/ssl/private/example.com.pem
+ provider: assertonly
+
+- name: Ensure that the existing certificate is still valid at the winter solstice 2017
+ openssl_certificate:
+ path: /etc/ssl/crt/example.com.crt
+ provider: assertonly
+ valid_at: 20171221162800Z
+
+- name: Ensure that the existing certificate is still valid 2 weeks (1209600 seconds) from now
+ openssl_certificate:
+ path: /etc/ssl/crt/example.com.crt
+ provider: assertonly
+ valid_in: 1209600
+
+- name: Ensure that the existing certificate is only used for digital signatures and encrypting other keys
+ openssl_certificate:
+ path: /etc/ssl/crt/example.com.crt
+ provider: assertonly
+ key_usage:
+ - digitalSignature
+ - keyEncipherment
+ key_usage_strict: true
+
+- name: Ensure that the existing certificate can be used for client authentication
+ openssl_certificate:
+ path: /etc/ssl/crt/example.com.crt
+ provider: assertonly
+ extended_key_usage:
+ - clientAuth
+
+- name: Ensure that the existing certificate can only be used for client authentication and time stamping
+ openssl_certificate:
+ path: /etc/ssl/crt/example.com.crt
+ provider: assertonly
+ extended_key_usage:
+ - clientAuth
+ - 1.3.6.1.5.5.7.3.8
+ extended_key_usage_strict: true
+
+- name: Ensure that the existing certificate has a certain domain in its subjectAltName
+ openssl_certificate:
+ path: /etc/ssl/crt/example.com.crt
+ provider: assertonly
+ subject_alt_name:
+ - www.example.com
+ - test.example.com
+'''
+
+RETURN = r'''
+filename:
+ description: Path to the generated certificate.
+ returned: changed or success
+ type: str
+ sample: /etc/ssl/crt/www.ansible.com.crt
+backup_file:
+ description: Name of backup file created.
+ returned: changed and if I(backup) is C(yes)
+ type: str
+ sample: /path/to/www.ansible.com.crt.2019-03-09@11:22~
+certificate:
+ description: The (current or generated) certificate's content.
+ returned: if I(state) is C(present) and I(return_content) is C(yes)
+ type: str
+ version_added: "2.10"
+'''
+
+
+from random import randint
+import abc
+import datetime
+import time
+import os
+import tempfile
+import traceback
+from distutils.version import LooseVersion
+
+from ansible.module_utils import crypto as crypto_utils
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native, to_bytes, to_text
+from ansible.module_utils.compat import ipaddress as compat_ipaddress
+from ansible.module_utils.ecs.api import ECSClient, RestOperationException, SessionConfigurationException
+
+MINIMAL_CRYPTOGRAPHY_VERSION = '1.6'
+MINIMAL_PYOPENSSL_VERSION = '0.15'
+
+PYOPENSSL_IMP_ERR = None
+try:
+ import OpenSSL
+ from OpenSSL import crypto
+ PYOPENSSL_VERSION = LooseVersion(OpenSSL.__version__)
+except ImportError:
+ PYOPENSSL_IMP_ERR = traceback.format_exc()
+ PYOPENSSL_FOUND = False
+else:
+ PYOPENSSL_FOUND = True
+
+CRYPTOGRAPHY_IMP_ERR = None
+try:
+ import cryptography
+ from cryptography import x509
+ from cryptography.hazmat.backends import default_backend
+ from cryptography.hazmat.primitives.serialization import Encoding
+ from cryptography.x509 import NameAttribute, Name
+ from cryptography.x509.oid import NameOID
+ CRYPTOGRAPHY_VERSION = LooseVersion(cryptography.__version__)
+except ImportError:
+ CRYPTOGRAPHY_IMP_ERR = traceback.format_exc()
+ CRYPTOGRAPHY_FOUND = False
+else:
+ CRYPTOGRAPHY_FOUND = True
+
+
+class CertificateError(crypto_utils.OpenSSLObjectError):
+ pass
+
+
+class Certificate(crypto_utils.OpenSSLObject):
+
+ def __init__(self, module, backend):
+ super(Certificate, self).__init__(
+ module.params['path'],
+ module.params['state'],
+ module.params['force'],
+ module.check_mode
+ )
+
+ self.provider = module.params['provider']
+ self.privatekey_path = module.params['privatekey_path']
+ self.privatekey_content = module.params['privatekey_content']
+ if self.privatekey_content is not None:
+ self.privatekey_content = self.privatekey_content.encode('utf-8')
+ self.privatekey_passphrase = module.params['privatekey_passphrase']
+ self.csr_path = module.params['csr_path']
+ self.csr_content = module.params['csr_content']
+ if self.csr_content is not None:
+ self.csr_content = self.csr_content.encode('utf-8')
+ self.cert = None
+ self.privatekey = None
+ self.csr = None
+ self.backend = backend
+ self.module = module
+ self.return_content = module.params['return_content']
+
+ # The following are default values which make sure check() works as
+ # before if providers do not explicitly change these properties.
+ self.create_subject_key_identifier = 'never_create'
+ self.create_authority_key_identifier = False
+
+ self.backup = module.params['backup']
+ self.backup_file = None
+
+ def _validate_privatekey(self):
+ if self.backend == 'pyopenssl':
+ ctx = OpenSSL.SSL.Context(OpenSSL.SSL.TLSv1_2_METHOD)
+ ctx.use_privatekey(self.privatekey)
+ ctx.use_certificate(self.cert)
+ try:
+ ctx.check_privatekey()
+ return True
+ except OpenSSL.SSL.Error:
+ return False
+ elif self.backend == 'cryptography':
+ return crypto_utils.cryptography_compare_public_keys(self.cert.public_key(), self.privatekey.public_key())
+
+ def _validate_csr(self):
+ if self.backend == 'pyopenssl':
+ # Verify that CSR is signed by certificate's private key
+ try:
+ self.csr.verify(self.cert.get_pubkey())
+ except OpenSSL.crypto.Error:
+ return False
+ # Check subject
+ if self.csr.get_subject() != self.cert.get_subject():
+ return False
+ # Check extensions
+ csr_extensions = self.csr.get_extensions()
+ cert_extension_count = self.cert.get_extension_count()
+ if len(csr_extensions) != cert_extension_count:
+ return False
+ for extension_number in range(0, cert_extension_count):
+ cert_extension = self.cert.get_extension(extension_number)
+ csr_extension = filter(lambda extension: extension.get_short_name() == cert_extension.get_short_name(), csr_extensions)
+ if cert_extension.get_data() != list(csr_extension)[0].get_data():
+ return False
+ return True
+ elif self.backend == 'cryptography':
+ # Verify that CSR is signed by certificate's private key
+ if not self.csr.is_signature_valid:
+ return False
+ if not crypto_utils.cryptography_compare_public_keys(self.csr.public_key(), self.cert.public_key()):
+ return False
+ # Check subject
+ if self.csr.subject != self.cert.subject:
+ return False
+ # Check extensions
+ cert_exts = list(self.cert.extensions)
+ csr_exts = list(self.csr.extensions)
+ if self.create_subject_key_identifier != 'never_create':
+ # Filter out SubjectKeyIdentifier extension before comparison
+ cert_exts = list(filter(lambda x: not isinstance(x.value, x509.SubjectKeyIdentifier), cert_exts))
+ csr_exts = list(filter(lambda x: not isinstance(x.value, x509.SubjectKeyIdentifier), csr_exts))
+ if self.create_authority_key_identifier:
+ # Filter out AuthorityKeyIdentifier extension before comparison
+ cert_exts = list(filter(lambda x: not isinstance(x.value, x509.AuthorityKeyIdentifier), cert_exts))
+ csr_exts = list(filter(lambda x: not isinstance(x.value, x509.AuthorityKeyIdentifier), csr_exts))
+ if len(cert_exts) != len(csr_exts):
+ return False
+ for cert_ext in cert_exts:
+ try:
+ csr_ext = self.csr.extensions.get_extension_for_oid(cert_ext.oid)
+ if cert_ext != csr_ext:
+ return False
+ except cryptography.x509.ExtensionNotFound as dummy:
+ return False
+ return True
+
+ def remove(self, module):
+ if self.backup:
+ self.backup_file = module.backup_local(self.path)
+ super(Certificate, self).remove(module)
+
+ def check(self, module, perms_required=True):
+ """Ensure the resource is in its desired state."""
+
+ state_and_perms = super(Certificate, self).check(module, perms_required)
+
+ if not state_and_perms:
+ return False
+
+ try:
+ self.cert = crypto_utils.load_certificate(self.path, backend=self.backend)
+ except Exception as dummy:
+ return False
+
+ if self.privatekey_path or self.privatekey_content:
+ try:
+ self.privatekey = crypto_utils.load_privatekey(
+ path=self.privatekey_path,
+ content=self.privatekey_content,
+ passphrase=self.privatekey_passphrase,
+ backend=self.backend
+ )
+ except crypto_utils.OpenSSLBadPassphraseError as exc:
+ raise CertificateError(exc)
+ if not self._validate_privatekey():
+ return False
+
+ if self.csr_path or self.csr_content:
+ self.csr = crypto_utils.load_certificate_request(
+ path=self.csr_path,
+ content=self.csr_content,
+ backend=self.backend
+ )
+ if not self._validate_csr():
+ return False
+
+ # Check SubjectKeyIdentifier
+ if self.backend == 'cryptography' and self.create_subject_key_identifier != 'never_create':
+ # Get hold of certificate's SKI
+ try:
+ ext = self.cert.extensions.get_extension_for_class(x509.SubjectKeyIdentifier)
+ except cryptography.x509.ExtensionNotFound as dummy:
+ return False
+ # Get hold of CSR's SKI for 'create_if_not_provided'
+ csr_ext = None
+ if self.create_subject_key_identifier == 'create_if_not_provided':
+ try:
+ csr_ext = self.csr.extensions.get_extension_for_class(x509.SubjectKeyIdentifier)
+ except cryptography.x509.ExtensionNotFound as dummy:
+ pass
+ if csr_ext is None:
+ # If CSR had no SKI, or we chose to ignore it ('always_create'), compare with created SKI
+ if ext.value.digest != x509.SubjectKeyIdentifier.from_public_key(self.cert.public_key()).digest:
+ return False
+ else:
+ # If CSR had SKI and we didn't ignore it ('create_if_not_provided'), compare SKIs
+ if ext.value.digest != csr_ext.value.digest:
+ return False
+
+ return True
+
+
+class CertificateAbsent(Certificate):
+ def __init__(self, module):
+ super(CertificateAbsent, self).__init__(module, 'cryptography') # backend doesn't matter
+
+ def generate(self, module):
+ pass
+
+ def dump(self, check_mode=False):
+ # Use only for absent
+
+ result = {
+ 'changed': self.changed,
+ 'filename': self.path,
+ 'privatekey': self.privatekey_path,
+ 'csr': self.csr_path
+ }
+ if self.backup_file:
+ result['backup_file'] = self.backup_file
+ if self.return_content:
+ result['certificate'] = None
+
+ return result
+
+
+class SelfSignedCertificateCryptography(Certificate):
+ """Generate the self-signed certificate, using the cryptography backend"""
+ def __init__(self, module):
+ super(SelfSignedCertificateCryptography, self).__init__(module, 'cryptography')
+ self.create_subject_key_identifier = module.params['selfsigned_create_subject_key_identifier']
+ self.notBefore = crypto_utils.get_relative_time_option(module.params['selfsigned_not_before'], 'selfsigned_not_before', backend=self.backend)
+ self.notAfter = crypto_utils.get_relative_time_option(module.params['selfsigned_not_after'], 'selfsigned_not_after', backend=self.backend)
+ self.digest = crypto_utils.select_message_digest(module.params['selfsigned_digest'])
+ self.version = module.params['selfsigned_version']
+ self.serial_number = x509.random_serial_number()
+
+ if self.csr_content is None and not os.path.exists(self.csr_path):
+ raise CertificateError(
+ 'The certificate signing request file {0} does not exist'.format(self.csr_path)
+ )
+ if self.privatekey_content is None and not os.path.exists(self.privatekey_path):
+ raise CertificateError(
+ 'The private key file {0} does not exist'.format(self.privatekey_path)
+ )
+
+ self.csr = crypto_utils.load_certificate_request(
+ path=self.csr_path,
+ content=self.csr_content,
+ backend=self.backend
+ )
+ self._module = module
+
+ try:
+ self.privatekey = crypto_utils.load_privatekey(
+ path=self.privatekey_path,
+ content=self.privatekey_content,
+ passphrase=self.privatekey_passphrase,
+ backend=self.backend
+ )
+ except crypto_utils.OpenSSLBadPassphraseError as exc:
+ module.fail_json(msg=to_native(exc))
+
+ if crypto_utils.cryptography_key_needs_digest_for_signing(self.privatekey):
+ if self.digest is None:
+ raise CertificateError(
+ 'The digest %s is not supported with the cryptography backend' % module.params['selfsigned_digest']
+ )
+ else:
+ self.digest = None
+
+ def generate(self, module):
+ if self.privatekey_content is None and not os.path.exists(self.privatekey_path):
+ raise CertificateError(
+ 'The private key %s does not exist' % self.privatekey_path
+ )
+ if self.csr_content is None and not os.path.exists(self.csr_path):
+ raise CertificateError(
+ 'The certificate signing request file %s does not exist' % self.csr_path
+ )
+ if not self.check(module, perms_required=False) or self.force:
+ try:
+ cert_builder = x509.CertificateBuilder()
+ cert_builder = cert_builder.subject_name(self.csr.subject)
+ cert_builder = cert_builder.issuer_name(self.csr.subject)
+ cert_builder = cert_builder.serial_number(self.serial_number)
+ cert_builder = cert_builder.not_valid_before(self.notBefore)
+ cert_builder = cert_builder.not_valid_after(self.notAfter)
+ cert_builder = cert_builder.public_key(self.privatekey.public_key())
+ has_ski = False
+ for extension in self.csr.extensions:
+ if isinstance(extension.value, x509.SubjectKeyIdentifier):
+ if self.create_subject_key_identifier == 'always_create':
+ continue
+ has_ski = True
+ cert_builder = cert_builder.add_extension(extension.value, critical=extension.critical)
+ if not has_ski and self.create_subject_key_identifier != 'never_create':
+ cert_builder = cert_builder.add_extension(
+ x509.SubjectKeyIdentifier.from_public_key(self.privatekey.public_key()),
+ critical=False
+ )
+ except ValueError as e:
+ raise CertificateError(str(e))
+
+ try:
+ certificate = cert_builder.sign(
+ private_key=self.privatekey, algorithm=self.digest,
+ backend=default_backend()
+ )
+ except TypeError as e:
+ if str(e) == 'Algorithm must be a registered hash algorithm.' and self.digest is None:
+ module.fail_json(msg='Signing with Ed25519 and Ed448 keys requires cryptography 2.8 or newer.')
+ raise
+
+ self.cert = certificate
+
+ if self.backup:
+ self.backup_file = module.backup_local(self.path)
+ crypto_utils.write_file(module, certificate.public_bytes(Encoding.PEM))
+ self.changed = True
+ else:
+ self.cert = crypto_utils.load_certificate(self.path, backend=self.backend)
+
+ file_args = module.load_file_common_arguments(module.params)
+ if module.set_fs_attributes_if_different(file_args, False):
+ self.changed = True
+
+ def dump(self, check_mode=False):
+
+ result = {
+ 'changed': self.changed,
+ 'filename': self.path,
+ 'privatekey': self.privatekey_path,
+ 'csr': self.csr_path
+ }
+ if self.backup_file:
+ result['backup_file'] = self.backup_file
+ if self.return_content:
+ content = crypto_utils.load_file_if_exists(self.path, ignore_errors=True)
+ result['certificate'] = content.decode('utf-8') if content else None
+
+ if check_mode:
+ result.update({
+ 'notBefore': self.notBefore.strftime("%Y%m%d%H%M%SZ"),
+ 'notAfter': self.notAfter.strftime("%Y%m%d%H%M%SZ"),
+ 'serial_number': self.serial_number,
+ })
+ else:
+ result.update({
+ 'notBefore': self.cert.not_valid_before.strftime("%Y%m%d%H%M%SZ"),
+ 'notAfter': self.cert.not_valid_after.strftime("%Y%m%d%H%M%SZ"),
+ 'serial_number': self.cert.serial_number,
+ })
+
+ return result
+
+
+class SelfSignedCertificate(Certificate):
+ """Generate the self-signed certificate."""
+
+ def __init__(self, module):
+ super(SelfSignedCertificate, self).__init__(module, 'pyopenssl')
+ if module.params['selfsigned_create_subject_key_identifier'] != 'create_if_not_provided':
+ module.fail_json(msg='selfsigned_create_subject_key_identifier cannot be used with the pyOpenSSL backend!')
+ self.notBefore = crypto_utils.get_relative_time_option(module.params['selfsigned_not_before'], 'selfsigned_not_before', backend=self.backend)
+ self.notAfter = crypto_utils.get_relative_time_option(module.params['selfsigned_not_after'], 'selfsigned_not_after', backend=self.backend)
+ self.digest = module.params['selfsigned_digest']
+ self.version = module.params['selfsigned_version']
+ self.serial_number = randint(1000, 99999)
+
+ if self.csr_content is None and not os.path.exists(self.csr_path):
+ raise CertificateError(
+ 'The certificate signing request file {0} does not exist'.format(self.csr_path)
+ )
+ if self.privatekey_content is None and not os.path.exists(self.privatekey_path):
+ raise CertificateError(
+ 'The private key file {0} does not exist'.format(self.privatekey_path)
+ )
+
+ self.csr = crypto_utils.load_certificate_request(
+ path=self.csr_path,
+ content=self.csr_content,
+ )
+ try:
+ self.privatekey = crypto_utils.load_privatekey(
+ path=self.privatekey_path,
+ content=self.privatekey_content,
+ passphrase=self.privatekey_passphrase,
+ )
+ except crypto_utils.OpenSSLBadPassphraseError as exc:
+ module.fail_json(msg=str(exc))
+
+ def generate(self, module):
+
+ if self.privatekey_content is None and not os.path.exists(self.privatekey_path):
+ raise CertificateError(
+ 'The private key %s does not exist' % self.privatekey_path
+ )
+
+ if self.csr_content is None and not os.path.exists(self.csr_path):
+ raise CertificateError(
+ 'The certificate signing request file %s does not exist' % self.csr_path
+ )
+
+ if not self.check(module, perms_required=False) or self.force:
+ cert = crypto.X509()
+ cert.set_serial_number(self.serial_number)
+ cert.set_notBefore(to_bytes(self.notBefore))
+ cert.set_notAfter(to_bytes(self.notAfter))
+ cert.set_subject(self.csr.get_subject())
+ cert.set_issuer(self.csr.get_subject())
+ cert.set_version(self.version - 1)
+ cert.set_pubkey(self.csr.get_pubkey())
+ cert.add_extensions(self.csr.get_extensions())
+
+ cert.sign(self.privatekey, self.digest)
+ self.cert = cert
+
+ if self.backup:
+ self.backup_file = module.backup_local(self.path)
+ crypto_utils.write_file(module, crypto.dump_certificate(crypto.FILETYPE_PEM, self.cert))
+ self.changed = True
+
+ file_args = module.load_file_common_arguments(module.params)
+ if module.set_fs_attributes_if_different(file_args, False):
+ self.changed = True
+
+ def dump(self, check_mode=False):
+
+ result = {
+ 'changed': self.changed,
+ 'filename': self.path,
+ 'privatekey': self.privatekey_path,
+ 'csr': self.csr_path
+ }
+ if self.backup_file:
+ result['backup_file'] = self.backup_file
+ if self.return_content:
+ content = crypto_utils.load_file_if_exists(self.path, ignore_errors=True)
+ result['certificate'] = content.decode('utf-8') if content else None
+
+ if check_mode:
+ result.update({
+ 'notBefore': self.notBefore,
+ 'notAfter': self.notAfter,
+ 'serial_number': self.serial_number,
+ })
+ else:
+ result.update({
+ 'notBefore': self.cert.get_notBefore(),
+ 'notAfter': self.cert.get_notAfter(),
+ 'serial_number': self.cert.get_serial_number(),
+ })
+
+ return result
+
+
+class OwnCACertificateCryptography(Certificate):
+ """Generate the own CA certificate. Using the cryptography backend"""
+ def __init__(self, module):
+ super(OwnCACertificateCryptography, self).__init__(module, 'cryptography')
+ self.create_subject_key_identifier = module.params['ownca_create_subject_key_identifier']
+ self.create_authority_key_identifier = module.params['ownca_create_authority_key_identifier']
+ self.notBefore = crypto_utils.get_relative_time_option(module.params['ownca_not_before'], 'ownca_not_before', backend=self.backend)
+ self.notAfter = crypto_utils.get_relative_time_option(module.params['ownca_not_after'], 'ownca_not_after', backend=self.backend)
+ self.digest = crypto_utils.select_message_digest(module.params['ownca_digest'])
+ self.version = module.params['ownca_version']
+ self.serial_number = x509.random_serial_number()
+ self.ca_cert_path = module.params['ownca_path']
+ self.ca_cert_content = module.params['ownca_content']
+ if self.ca_cert_content is not None:
+ self.ca_cert_content = self.ca_cert_content.encode('utf-8')
+ self.ca_privatekey_path = module.params['ownca_privatekey_path']
+ self.ca_privatekey_content = module.params['ownca_privatekey_content']
+ if self.ca_privatekey_content is not None:
+ self.ca_privatekey_content = self.ca_privatekey_content.encode('utf-8')
+ self.ca_privatekey_passphrase = module.params['ownca_privatekey_passphrase']
+
+ if self.csr_content is None and not os.path.exists(self.csr_path):
+ raise CertificateError(
+ 'The certificate signing request file {0} does not exist'.format(self.csr_path)
+ )
+ if self.ca_cert_content is None and not os.path.exists(self.ca_cert_path):
+ raise CertificateError(
+ 'The CA certificate file {0} does not exist'.format(self.ca_cert_path)
+ )
+ if self.ca_privatekey_content is None and not os.path.exists(self.ca_privatekey_path):
+ raise CertificateError(
+ 'The CA private key file {0} does not exist'.format(self.ca_privatekey_path)
+ )
+
+ self.csr = crypto_utils.load_certificate_request(
+ path=self.csr_path,
+ content=self.csr_content,
+ backend=self.backend
+ )
+ self.ca_cert = crypto_utils.load_certificate(
+ path=self.ca_cert_path,
+ content=self.ca_cert_content,
+ backend=self.backend
+ )
+ try:
+ self.ca_private_key = crypto_utils.load_privatekey(
+ path=self.ca_privatekey_path,
+ content=self.ca_privatekey_content,
+ passphrase=self.ca_privatekey_passphrase,
+ backend=self.backend
+ )
+ except crypto_utils.OpenSSLBadPassphraseError as exc:
+ module.fail_json(msg=str(exc))
+
+ if crypto_utils.cryptography_key_needs_digest_for_signing(self.ca_private_key):
+ if self.digest is None:
+ raise CertificateError(
+ 'The digest %s is not supported with the cryptography backend' % module.params['ownca_digest']
+ )
+ else:
+ self.digest = None
+
+ def generate(self, module):
+
+ if self.ca_cert_content is None and not os.path.exists(self.ca_cert_path):
+ raise CertificateError(
+ 'The CA certificate %s does not exist' % self.ca_cert_path
+ )
+
+ if self.ca_privatekey_content is None and not os.path.exists(self.ca_privatekey_path):
+ raise CertificateError(
+ 'The CA private key %s does not exist' % self.ca_privatekey_path
+ )
+
+ if self.csr_content is None and not os.path.exists(self.csr_path):
+ raise CertificateError(
+ 'The certificate signing request file %s does not exist' % self.csr_path
+ )
+
+ if not self.check(module, perms_required=False) or self.force:
+ cert_builder = x509.CertificateBuilder()
+ cert_builder = cert_builder.subject_name(self.csr.subject)
+ cert_builder = cert_builder.issuer_name(self.ca_cert.subject)
+ cert_builder = cert_builder.serial_number(self.serial_number)
+ cert_builder = cert_builder.not_valid_before(self.notBefore)
+ cert_builder = cert_builder.not_valid_after(self.notAfter)
+ cert_builder = cert_builder.public_key(self.csr.public_key())
+ has_ski = False
+ for extension in self.csr.extensions:
+ if isinstance(extension.value, x509.SubjectKeyIdentifier):
+ if self.create_subject_key_identifier == 'always_create':
+ continue
+ has_ski = True
+ if self.create_authority_key_identifier and isinstance(extension.value, x509.AuthorityKeyIdentifier):
+ continue
+ cert_builder = cert_builder.add_extension(extension.value, critical=extension.critical)
+ if not has_ski and self.create_subject_key_identifier != 'never_create':
+ cert_builder = cert_builder.add_extension(
+ x509.SubjectKeyIdentifier.from_public_key(self.csr.public_key()),
+ critical=False
+ )
+ if self.create_authority_key_identifier:
+ try:
+ ext = self.ca_cert.extensions.get_extension_for_class(x509.SubjectKeyIdentifier)
+ cert_builder = cert_builder.add_extension(
+ x509.AuthorityKeyIdentifier.from_issuer_subject_key_identifier(ext.value)
+ if CRYPTOGRAPHY_VERSION >= LooseVersion('2.7') else
+ x509.AuthorityKeyIdentifier.from_issuer_subject_key_identifier(ext),
+ critical=False
+ )
+ except cryptography.x509.ExtensionNotFound:
+ cert_builder = cert_builder.add_extension(
+ x509.AuthorityKeyIdentifier.from_issuer_public_key(self.ca_cert.public_key()),
+ critical=False
+ )
+
+ try:
+ certificate = cert_builder.sign(
+ private_key=self.ca_private_key, algorithm=self.digest,
+ backend=default_backend()
+ )
+ except TypeError as e:
+ if str(e) == 'Algorithm must be a registered hash algorithm.' and self.digest is None:
+ module.fail_json(msg='Signing with Ed25519 and Ed448 keys requires cryptography 2.8 or newer.')
+ raise
+
+ self.cert = certificate
+
+ if self.backup:
+ self.backup_file = module.backup_local(self.path)
+ crypto_utils.write_file(module, certificate.public_bytes(Encoding.PEM))
+ self.changed = True
+ else:
+ self.cert = crypto_utils.load_certificate(self.path, backend=self.backend)
+
+ file_args = module.load_file_common_arguments(module.params)
+ if module.set_fs_attributes_if_different(file_args, False):
+ self.changed = True
+
+ def check(self, module, perms_required=True):
+ """Ensure the resource is in its desired state."""
+
+ if not super(OwnCACertificateCryptography, self).check(module, perms_required):
+ return False
+
+ # Check AuthorityKeyIdentifier
+ if self.create_authority_key_identifier:
+ try:
+ ext = self.ca_cert.extensions.get_extension_for_class(x509.SubjectKeyIdentifier)
+ expected_ext = (
+ x509.AuthorityKeyIdentifier.from_issuer_subject_key_identifier(ext.value)
+ if CRYPTOGRAPHY_VERSION >= LooseVersion('2.7') else
+ x509.AuthorityKeyIdentifier.from_issuer_subject_key_identifier(ext)
+ )
+ except cryptography.x509.ExtensionNotFound:
+ expected_ext = x509.AuthorityKeyIdentifier.from_issuer_public_key(self.ca_cert.public_key())
+ try:
+ ext = self.cert.extensions.get_extension_for_class(x509.AuthorityKeyIdentifier)
+ if ext.value != expected_ext:
+ return False
+ except cryptography.x509.ExtensionNotFound as dummy:
+ return False
+
+ return True
+
+ def dump(self, check_mode=False):
+
+ result = {
+ 'changed': self.changed,
+ 'filename': self.path,
+ 'privatekey': self.privatekey_path,
+ 'csr': self.csr_path,
+ 'ca_cert': self.ca_cert_path,
+ 'ca_privatekey': self.ca_privatekey_path
+ }
+ if self.backup_file:
+ result['backup_file'] = self.backup_file
+ if self.return_content:
+ content = crypto_utils.load_file_if_exists(self.path, ignore_errors=True)
+ result['certificate'] = content.decode('utf-8') if content else None
+
+ if check_mode:
+ result.update({
+ 'notBefore': self.notBefore.strftime("%Y%m%d%H%M%SZ"),
+ 'notAfter': self.notAfter.strftime("%Y%m%d%H%M%SZ"),
+ 'serial_number': self.serial_number,
+ })
+ else:
+ result.update({
+ 'notBefore': self.cert.not_valid_before.strftime("%Y%m%d%H%M%SZ"),
+ 'notAfter': self.cert.not_valid_after.strftime("%Y%m%d%H%M%SZ"),
+ 'serial_number': self.cert.serial_number,
+ })
+
+ return result
+
+
+class OwnCACertificate(Certificate):
+ """Generate the own CA certificate."""
+
+ def __init__(self, module):
+ super(OwnCACertificate, self).__init__(module, 'pyopenssl')
+ self.notBefore = crypto_utils.get_relative_time_option(module.params['ownca_not_before'], 'ownca_not_before', backend=self.backend)
+ self.notAfter = crypto_utils.get_relative_time_option(module.params['ownca_not_after'], 'ownca_not_after', backend=self.backend)
+ self.digest = module.params['ownca_digest']
+ self.version = module.params['ownca_version']
+ self.serial_number = randint(1000, 99999)
+ if module.params['ownca_create_subject_key_identifier'] != 'create_if_not_provided':
+ module.fail_json(msg='ownca_create_subject_key_identifier cannot be used with the pyOpenSSL backend!')
+ if module.params['ownca_create_authority_key_identifier']:
+ module.warn('ownca_create_authority_key_identifier is ignored by the pyOpenSSL backend!')
+ self.ca_cert_path = module.params['ownca_path']
+ self.ca_cert_content = module.params['ownca_content']
+ if self.ca_cert_content is not None:
+ self.ca_cert_content = self.ca_cert_content.encode('utf-8')
+ self.ca_privatekey_path = module.params['ownca_privatekey_path']
+ self.ca_privatekey_content = module.params['ownca_privatekey_content']
+ if self.ca_privatekey_content is not None:
+ self.ca_privatekey_content = self.ca_privatekey_content.encode('utf-8')
+ self.ca_privatekey_passphrase = module.params['ownca_privatekey_passphrase']
+
+ if self.csr_content is None and not os.path.exists(self.csr_path):
+ raise CertificateError(
+ 'The certificate signing request file {0} does not exist'.format(self.csr_path)
+ )
+ if self.ca_cert_content is None and not os.path.exists(self.ca_cert_path):
+ raise CertificateError(
+ 'The CA certificate file {0} does not exist'.format(self.ca_cert_path)
+ )
+ if self.ca_privatekey_content is None and not os.path.exists(self.ca_privatekey_path):
+ raise CertificateError(
+ 'The CA private key file {0} does not exist'.format(self.ca_privatekey_path)
+ )
+
+ self.csr = crypto_utils.load_certificate_request(
+ path=self.csr_path,
+ content=self.csr_content,
+ )
+ self.ca_cert = crypto_utils.load_certificate(
+ path=self.ca_cert_path,
+ content=self.ca_cert_content,
+ )
+ try:
+ self.ca_privatekey = crypto_utils.load_privatekey(
+ path=self.ca_privatekey_path,
+ content=self.ca_privatekey_content,
+ passphrase=self.ca_privatekey_passphrase
+ )
+ except crypto_utils.OpenSSLBadPassphraseError as exc:
+ module.fail_json(msg=str(exc))
+
+ def generate(self, module):
+
+ if self.ca_cert_content is None and not os.path.exists(self.ca_cert_path):
+ raise CertificateError(
+ 'The CA certificate %s does not exist' % self.ca_cert_path
+ )
+
+ if self.ca_privatekey_content is None and not os.path.exists(self.ca_privatekey_path):
+ raise CertificateError(
+ 'The CA private key %s does not exist' % self.ca_privatekey_path
+ )
+
+ if self.csr_content is None and not os.path.exists(self.csr_path):
+ raise CertificateError(
+ 'The certificate signing request file %s does not exist' % self.csr_path
+ )
+
+ if not self.check(module, perms_required=False) or self.force:
+ cert = crypto.X509()
+ cert.set_serial_number(self.serial_number)
+ cert.set_notBefore(to_bytes(self.notBefore))
+ cert.set_notAfter(to_bytes(self.notAfter))
+ cert.set_subject(self.csr.get_subject())
+ cert.set_issuer(self.ca_cert.get_subject())
+ cert.set_version(self.version - 1)
+ cert.set_pubkey(self.csr.get_pubkey())
+ cert.add_extensions(self.csr.get_extensions())
+
+ cert.sign(self.ca_privatekey, self.digest)
+ self.cert = cert
+
+ if self.backup:
+ self.backup_file = module.backup_local(self.path)
+ crypto_utils.write_file(module, crypto.dump_certificate(crypto.FILETYPE_PEM, self.cert))
+ self.changed = True
+
+ file_args = module.load_file_common_arguments(module.params)
+ if module.set_fs_attributes_if_different(file_args, False):
+ self.changed = True
+
+ def dump(self, check_mode=False):
+
+ result = {
+ 'changed': self.changed,
+ 'filename': self.path,
+ 'privatekey': self.privatekey_path,
+ 'csr': self.csr_path,
+ 'ca_cert': self.ca_cert_path,
+ 'ca_privatekey': self.ca_privatekey_path
+ }
+ if self.backup_file:
+ result['backup_file'] = self.backup_file
+ if self.return_content:
+ content = crypto_utils.load_file_if_exists(self.path, ignore_errors=True)
+ result['certificate'] = content.decode('utf-8') if content else None
+
+ if check_mode:
+ result.update({
+ 'notBefore': self.notBefore,
+ 'notAfter': self.notAfter,
+ 'serial_number': self.serial_number,
+ })
+ else:
+ result.update({
+ 'notBefore': self.cert.get_notBefore(),
+ 'notAfter': self.cert.get_notAfter(),
+ 'serial_number': self.cert.get_serial_number(),
+ })
+
+ return result
+
+
+def compare_sets(subset, superset, equality=False):
+ if equality:
+ return set(subset) == set(superset)
+ else:
+ return all(x in superset for x in subset)
+
+
+def compare_dicts(subset, superset, equality=False):
+ if equality:
+ return subset == superset
+ else:
+ return all(superset.get(x) == v for x, v in subset.items())
+
+
+NO_EXTENSION = 'no extension'
+
+
+class AssertOnlyCertificateBase(Certificate):
+
+ def __init__(self, module, backend):
+ super(AssertOnlyCertificateBase, self).__init__(module, backend)
+
+ self.signature_algorithms = module.params['signature_algorithms']
+ if module.params['subject']:
+ self.subject = crypto_utils.parse_name_field(module.params['subject'])
+ else:
+ self.subject = []
+ self.subject_strict = module.params['subject_strict']
+ if module.params['issuer']:
+ self.issuer = crypto_utils.parse_name_field(module.params['issuer'])
+ else:
+ self.issuer = []
+ self.issuer_strict = module.params['issuer_strict']
+ self.has_expired = module.params['has_expired']
+ self.version = module.params['version']
+ self.key_usage = module.params['key_usage']
+ self.key_usage_strict = module.params['key_usage_strict']
+ self.extended_key_usage = module.params['extended_key_usage']
+ self.extended_key_usage_strict = module.params['extended_key_usage_strict']
+ self.subject_alt_name = module.params['subject_alt_name']
+ self.subject_alt_name_strict = module.params['subject_alt_name_strict']
+ self.not_before = module.params['not_before']
+ self.not_after = module.params['not_after']
+ self.valid_at = module.params['valid_at']
+ self.invalid_at = module.params['invalid_at']
+ self.valid_in = module.params['valid_in']
+ if self.valid_in and not self.valid_in.startswith("+") and not self.valid_in.startswith("-"):
+ try:
+ int(self.valid_in)
+ except ValueError:
+ module.fail_json(msg='The supplied value for "valid_in" (%s) is not an integer or a valid timespec' % self.valid_in)
+ self.valid_in = "+" + self.valid_in + "s"
+
+ # Load objects
+ self.cert = crypto_utils.load_certificate(self.path, backend=self.backend)
+ if self.privatekey_path is not None or self.privatekey_content is not None:
+ try:
+ self.privatekey = crypto_utils.load_privatekey(
+ path=self.privatekey_path,
+ content=self.privatekey_content,
+ passphrase=self.privatekey_passphrase,
+ backend=self.backend
+ )
+ except crypto_utils.OpenSSLBadPassphraseError as exc:
+ raise CertificateError(exc)
+ if self.csr_path is not None or self.csr_content is not None:
+ self.csr = crypto_utils.load_certificate_request(
+ path=self.csr_path,
+ content=self.csr_content,
+ backend=self.backend
+ )
+
+ @abc.abstractmethod
+ def _validate_privatekey(self):
+ pass
+
+ @abc.abstractmethod
+ def _validate_csr_signature(self):
+ pass
+
+ @abc.abstractmethod
+ def _validate_csr_subject(self):
+ pass
+
+ @abc.abstractmethod
+ def _validate_csr_extensions(self):
+ pass
+
+ @abc.abstractmethod
+ def _validate_signature_algorithms(self):
+ pass
+
+ @abc.abstractmethod
+ def _validate_subject(self):
+ pass
+
+ @abc.abstractmethod
+ def _validate_issuer(self):
+ pass
+
+ @abc.abstractmethod
+ def _validate_has_expired(self):
+ pass
+
+ @abc.abstractmethod
+ def _validate_version(self):
+ pass
+
+ @abc.abstractmethod
+ def _validate_key_usage(self):
+ pass
+
+ @abc.abstractmethod
+ def _validate_extended_key_usage(self):
+ pass
+
+ @abc.abstractmethod
+ def _validate_subject_alt_name(self):
+ pass
+
+ @abc.abstractmethod
+ def _validate_not_before(self):
+ pass
+
+ @abc.abstractmethod
+ def _validate_not_after(self):
+ pass
+
+ @abc.abstractmethod
+ def _validate_valid_at(self):
+ pass
+
+ @abc.abstractmethod
+ def _validate_invalid_at(self):
+ pass
+
+ @abc.abstractmethod
+ def _validate_valid_in(self):
+ pass
+
+ def assertonly(self, module):
+ messages = []
+ if self.privatekey_path is not None or self.privatekey_content is not None:
+ if not self._validate_privatekey():
+ messages.append(
+ 'Certificate %s and private key %s do not match' %
+ (self.path, self.privatekey_path or '(provided in module options)')
+ )
+
+ if self.csr_path is not None or self.csr_content is not None:
+ if not self._validate_csr_signature():
+ messages.append(
+ 'Certificate %s and CSR %s do not match: private key mismatch' %
+ (self.path, self.csr_path or '(provided in module options)')
+ )
+ if not self._validate_csr_subject():
+ messages.append(
+ 'Certificate %s and CSR %s do not match: subject mismatch' %
+ (self.path, self.csr_path or '(provided in module options)')
+ )
+ if not self._validate_csr_extensions():
+ messages.append(
+ 'Certificate %s and CSR %s do not match: extensions mismatch' %
+ (self.path, self.csr_path or '(provided in module options)')
+ )
+
+ if self.signature_algorithms is not None:
+ wrong_alg = self._validate_signature_algorithms()
+ if wrong_alg:
+ messages.append(
+ 'Invalid signature algorithm (got %s, expected one of %s)' %
+ (wrong_alg, self.signature_algorithms)
+ )
+
+ if self.subject is not None:
+ failure = self._validate_subject()
+ if failure:
+ dummy, cert_subject = failure
+ messages.append(
+ 'Invalid subject component (got %s, expected all of %s to be present)' %
+ (cert_subject, self.subject)
+ )
+
+ if self.issuer is not None:
+ failure = self._validate_issuer()
+ if failure:
+ dummy, cert_issuer = failure
+ messages.append(
+ 'Invalid issuer component (got %s, expected all of %s to be present)' % (cert_issuer, self.issuer)
+ )
+
+ if self.has_expired is not None:
+ cert_expired = self._validate_has_expired()
+ if cert_expired != self.has_expired:
+ messages.append(
+ 'Certificate expiration check failed (certificate expiration is %s, expected %s)' %
+ (cert_expired, self.has_expired)
+ )
+
+ if self.version is not None:
+ cert_version = self._validate_version()
+ if cert_version != self.version:
+ messages.append(
+ 'Invalid certificate version number (got %s, expected %s)' %
+ (cert_version, self.version)
+ )
+
+ if self.key_usage is not None:
+ failure = self._validate_key_usage()
+ if failure == NO_EXTENSION:
+ messages.append('Found no keyUsage extension')
+ elif failure:
+ dummy, cert_key_usage = failure
+ messages.append(
+ 'Invalid keyUsage components (got %s, expected all of %s to be present)' %
+ (cert_key_usage, self.key_usage)
+ )
+
+ if self.extended_key_usage is not None:
+ failure = self._validate_extended_key_usage()
+ if failure == NO_EXTENSION:
+ messages.append('Found no extendedKeyUsage extension')
+ elif failure:
+ dummy, ext_cert_key_usage = failure
+ messages.append(
+ 'Invalid extendedKeyUsage component (got %s, expected all of %s to be present)' % (ext_cert_key_usage, self.extended_key_usage)
+ )
+
+ if self.subject_alt_name is not None:
+ failure = self._validate_subject_alt_name()
+ if failure == NO_EXTENSION:
+ messages.append('Found no subjectAltName extension')
+ elif failure:
+ dummy, cert_san = failure
+ messages.append(
+ 'Invalid subjectAltName component (got %s, expected all of %s to be present)' %
+ (cert_san, self.subject_alt_name)
+ )
+
+ if self.not_before is not None:
+ cert_not_valid_before = self._validate_not_before()
+ if cert_not_valid_before != crypto_utils.get_relative_time_option(self.not_before, 'not_before', backend=self.backend):
+ messages.append(
+ 'Invalid not_before component (got %s, expected %s to be present)' %
+ (cert_not_valid_before, self.not_before)
+ )
+
+ if self.not_after is not None:
+ cert_not_valid_after = self._validate_not_after()
+ if cert_not_valid_after != crypto_utils.get_relative_time_option(self.not_after, 'not_after', backend=self.backend):
+ messages.append(
+ 'Invalid not_after component (got %s, expected %s to be present)' %
+ (cert_not_valid_after, self.not_after)
+ )
+
+ if self.valid_at is not None:
+ not_before, valid_at, not_after = self._validate_valid_at()
+ if not (not_before <= valid_at <= not_after):
+ messages.append(
+ 'Certificate is not valid for the specified date (%s) - not_before: %s - not_after: %s' %
+ (self.valid_at, not_before, not_after)
+ )
+
+ if self.invalid_at is not None:
+ not_before, invalid_at, not_after = self._validate_invalid_at()
+ if not_before <= invalid_at <= not_after:
+ messages.append(
+ 'Certificate is not invalid for the specified date (%s) - not_before: %s - not_after: %s' %
+ (self.invalid_at, not_before, not_after)
+ )
+
+ if self.valid_in is not None:
+ not_before, valid_in, not_after = self._validate_valid_in()
+ if not not_before <= valid_in <= not_after:
+ messages.append(
+ 'Certificate is not valid in %s from now (that would be %s) - not_before: %s - not_after: %s' %
+ (self.valid_in, valid_in, not_before, not_after)
+ )
+ return messages
+
+ def generate(self, module):
+ """Don't generate anything - only assert"""
+ messages = self.assertonly(module)
+ if messages:
+ module.fail_json(msg=' | '.join(messages))
+
+ def check(self, module, perms_required=False):
+ """Ensure the resource is in its desired state."""
+ messages = self.assertonly(module)
+ return len(messages) == 0
+
+ def dump(self, check_mode=False):
+ result = {
+ 'changed': self.changed,
+ 'filename': self.path,
+ 'privatekey': self.privatekey_path,
+ 'csr': self.csr_path,
+ }
+ if self.return_content:
+ content = crypto_utils.load_file_if_exists(self.path, ignore_errors=True)
+ result['certificate'] = content.decode('utf-8') if content else None
+ return result
+
+
+class AssertOnlyCertificateCryptography(AssertOnlyCertificateBase):
+ """Validate the supplied cert, using the cryptography backend"""
+ def __init__(self, module):
+ super(AssertOnlyCertificateCryptography, self).__init__(module, 'cryptography')
+
+ def _validate_privatekey(self):
+ return crypto_utils.cryptography_compare_public_keys(self.cert.public_key(), self.privatekey.public_key())
+
+ def _validate_csr_signature(self):
+ if not self.csr.is_signature_valid:
+ return False
+ return crypto_utils.cryptography_compare_public_keys(self.csr.public_key(), self.cert.public_key())
+
+ def _validate_csr_subject(self):
+ return self.csr.subject == self.cert.subject
+
+ def _validate_csr_extensions(self):
+ cert_exts = self.cert.extensions
+ csr_exts = self.csr.extensions
+ if len(cert_exts) != len(csr_exts):
+ return False
+ for cert_ext in cert_exts:
+ try:
+ csr_ext = csr_exts.get_extension_for_oid(cert_ext.oid)
+ if cert_ext != csr_ext:
+ return False
+ except cryptography.x509.ExtensionNotFound as dummy:
+ return False
+ return True
+
+ def _validate_signature_algorithms(self):
+ if self.cert.signature_algorithm_oid._name not in self.signature_algorithms:
+ return self.cert.signature_algorithm_oid._name
+
+ def _validate_subject(self):
+ expected_subject = Name([NameAttribute(oid=crypto_utils.cryptography_name_to_oid(sub[0]), value=to_text(sub[1]))
+ for sub in self.subject])
+ cert_subject = self.cert.subject
+ if not compare_sets(expected_subject, cert_subject, self.subject_strict):
+ return expected_subject, cert_subject
+
+ def _validate_issuer(self):
+ expected_issuer = Name([NameAttribute(oid=crypto_utils.cryptography_name_to_oid(iss[0]), value=to_text(iss[1]))
+ for iss in self.issuer])
+ cert_issuer = self.cert.issuer
+ if not compare_sets(expected_issuer, cert_issuer, self.issuer_strict):
+ return self.issuer, cert_issuer
+
+ def _validate_has_expired(self):
+ cert_not_after = self.cert.not_valid_after
+ cert_expired = cert_not_after < datetime.datetime.utcnow()
+ return cert_expired
+
+ def _validate_version(self):
+ if self.cert.version == x509.Version.v1:
+ return 1
+ if self.cert.version == x509.Version.v3:
+ return 3
+ return "unknown"
+
+ def _validate_key_usage(self):
+ try:
+ current_key_usage = self.cert.extensions.get_extension_for_class(x509.KeyUsage).value
+ test_key_usage = dict(
+ digital_signature=current_key_usage.digital_signature,
+ content_commitment=current_key_usage.content_commitment,
+ key_encipherment=current_key_usage.key_encipherment,
+ data_encipherment=current_key_usage.data_encipherment,
+ key_agreement=current_key_usage.key_agreement,
+ key_cert_sign=current_key_usage.key_cert_sign,
+ crl_sign=current_key_usage.crl_sign,
+ encipher_only=False,
+ decipher_only=False
+ )
+ if test_key_usage['key_agreement']:
+ test_key_usage.update(dict(
+ encipher_only=current_key_usage.encipher_only,
+ decipher_only=current_key_usage.decipher_only
+ ))
+
+ key_usages = crypto_utils.cryptography_parse_key_usage_params(self.key_usage)
+ if not compare_dicts(key_usages, test_key_usage, self.key_usage_strict):
+ return self.key_usage, [k for k, v in test_key_usage.items() if v is True]
+
+ except cryptography.x509.ExtensionNotFound:
+ # This is only bad if the user specified a non-empty list
+ if self.key_usage:
+ return NO_EXTENSION
+
+ def _validate_extended_key_usage(self):
+ try:
+ current_ext_keyusage = self.cert.extensions.get_extension_for_class(x509.ExtendedKeyUsage).value
+ usages = [crypto_utils.cryptography_name_to_oid(usage) for usage in self.extended_key_usage]
+ expected_ext_keyusage = x509.ExtendedKeyUsage(usages)
+ if not compare_sets(expected_ext_keyusage, current_ext_keyusage, self.extended_key_usage_strict):
+ return [eku.value for eku in expected_ext_keyusage], [eku.value for eku in current_ext_keyusage]
+
+ except cryptography.x509.ExtensionNotFound:
+ # This is only bad if the user specified a non-empty list
+ if self.extended_key_usage:
+ return NO_EXTENSION
+
+ def _validate_subject_alt_name(self):
+ try:
+ current_san = self.cert.extensions.get_extension_for_class(x509.SubjectAlternativeName).value
+ expected_san = [crypto_utils.cryptography_get_name(san) for san in self.subject_alt_name]
+ if not compare_sets(expected_san, current_san, self.subject_alt_name_strict):
+ return self.subject_alt_name, current_san
+ except cryptography.x509.ExtensionNotFound:
+ # This is only bad if the user specified a non-empty list
+ if self.subject_alt_name:
+ return NO_EXTENSION
+
+ def _validate_not_before(self):
+ return self.cert.not_valid_before
+
+ def _validate_not_after(self):
+ return self.cert.not_valid_after
+
+ def _validate_valid_at(self):
+ rt = crypto_utils.get_relative_time_option(self.valid_at, 'valid_at', backend=self.backend)
+ return self.cert.not_valid_before, rt, self.cert.not_valid_after
+
+ def _validate_invalid_at(self):
+ rt = crypto_utils.get_relative_time_option(self.invalid_at, 'invalid_at', backend=self.backend)
+ return self.cert.not_valid_before, rt, self.cert.not_valid_after
+
+ def _validate_valid_in(self):
+ valid_in_date = crypto_utils.get_relative_time_option(self.valid_in, "valid_in", backend=self.backend)
+ return self.cert.not_valid_before, valid_in_date, self.cert.not_valid_after
+
+
+class AssertOnlyCertificate(AssertOnlyCertificateBase):
+ """validate the supplied certificate."""
+
+ def __init__(self, module):
+ super(AssertOnlyCertificate, self).__init__(module, 'pyopenssl')
+
+ # Ensure inputs are properly sanitized before comparison.
+ for param in ['signature_algorithms', 'key_usage', 'extended_key_usage',
+ 'subject_alt_name', 'subject', 'issuer', 'not_before',
+ 'not_after', 'valid_at', 'invalid_at']:
+ attr = getattr(self, param)
+ if isinstance(attr, list) and attr:
+ if isinstance(attr[0], str):
+ setattr(self, param, [to_bytes(item) for item in attr])
+ elif isinstance(attr[0], tuple):
+ setattr(self, param, [(to_bytes(item[0]), to_bytes(item[1])) for item in attr])
+ elif isinstance(attr, tuple):
+ setattr(self, param, dict((to_bytes(k), to_bytes(v)) for (k, v) in attr.items()))
+ elif isinstance(attr, dict):
+ setattr(self, param, dict((to_bytes(k), to_bytes(v)) for (k, v) in attr.items()))
+ elif isinstance(attr, str):
+ setattr(self, param, to_bytes(attr))
+
+ def _validate_privatekey(self):
+ ctx = OpenSSL.SSL.Context(OpenSSL.SSL.TLSv1_2_METHOD)
+ ctx.use_privatekey(self.privatekey)
+ ctx.use_certificate(self.cert)
+ try:
+ ctx.check_privatekey()
+ return True
+ except OpenSSL.SSL.Error:
+ return False
+
+ def _validate_csr_signature(self):
+ try:
+ self.csr.verify(self.cert.get_pubkey())
+ except OpenSSL.crypto.Error:
+ return False
+
+ def _validate_csr_subject(self):
+ if self.csr.get_subject() != self.cert.get_subject():
+ return False
+
+ def _validate_csr_extensions(self):
+ csr_extensions = self.csr.get_extensions()
+ cert_extension_count = self.cert.get_extension_count()
+ if len(csr_extensions) != cert_extension_count:
+ return False
+ for extension_number in range(0, cert_extension_count):
+ cert_extension = self.cert.get_extension(extension_number)
+ csr_extension = filter(lambda extension: extension.get_short_name() == cert_extension.get_short_name(), csr_extensions)
+ if cert_extension.get_data() != list(csr_extension)[0].get_data():
+ return False
+ return True
+
+ def _validate_signature_algorithms(self):
+ if self.cert.get_signature_algorithm() not in self.signature_algorithms:
+ return self.cert.get_signature_algorithm()
+
+ def _validate_subject(self):
+ expected_subject = [(OpenSSL._util.lib.OBJ_txt2nid(sub[0]), sub[1]) for sub in self.subject]
+ cert_subject = self.cert.get_subject().get_components()
+ current_subject = [(OpenSSL._util.lib.OBJ_txt2nid(sub[0]), sub[1]) for sub in cert_subject]
+ if not compare_sets(expected_subject, current_subject, self.subject_strict):
+ return expected_subject, current_subject
+
+ def _validate_issuer(self):
+ expected_issuer = [(OpenSSL._util.lib.OBJ_txt2nid(iss[0]), iss[1]) for iss in self.issuer]
+ cert_issuer = self.cert.get_issuer().get_components()
+ current_issuer = [(OpenSSL._util.lib.OBJ_txt2nid(iss[0]), iss[1]) for iss in cert_issuer]
+ if not compare_sets(expected_issuer, current_issuer, self.issuer_strict):
+ return self.issuer, cert_issuer
+
+ def _validate_has_expired(self):
+ # The following 3 lines are the same as the current PyOpenSSL code for cert.has_expired().
+ # Older version of PyOpenSSL have a buggy implementation,
+ # to avoid issues with those we added the code from a more recent release here.
+
+ time_string = to_native(self.cert.get_notAfter())
+ not_after = datetime.datetime.strptime(time_string, "%Y%m%d%H%M%SZ")
+ cert_expired = not_after < datetime.datetime.utcnow()
+ return cert_expired
+
+ def _validate_version(self):
+ # Version numbers in certs are off by one:
+ # v1: 0, v2: 1, v3: 2 ...
+ return self.cert.get_version() + 1
+
+ def _validate_key_usage(self):
+ found = False
+ for extension_idx in range(0, self.cert.get_extension_count()):
+ extension = self.cert.get_extension(extension_idx)
+ if extension.get_short_name() == b'keyUsage':
+ found = True
+ expected_extension = crypto.X509Extension(b"keyUsage", False, b', '.join(self.key_usage))
+ key_usage = [usage.strip() for usage in to_text(expected_extension, errors='surrogate_or_strict').split(',')]
+ current_ku = [usage.strip() for usage in to_text(extension, errors='surrogate_or_strict').split(',')]
+ if not compare_sets(key_usage, current_ku, self.key_usage_strict):
+ return self.key_usage, str(extension).split(', ')
+ if not found:
+ # This is only bad if the user specified a non-empty list
+ if self.key_usage:
+ return NO_EXTENSION
+
+ def _validate_extended_key_usage(self):
+ found = False
+ for extension_idx in range(0, self.cert.get_extension_count()):
+ extension = self.cert.get_extension(extension_idx)
+ if extension.get_short_name() == b'extendedKeyUsage':
+ found = True
+ extKeyUsage = [OpenSSL._util.lib.OBJ_txt2nid(keyUsage) for keyUsage in self.extended_key_usage]
+ current_xku = [OpenSSL._util.lib.OBJ_txt2nid(usage.strip()) for usage in
+ to_bytes(extension, errors='surrogate_or_strict').split(b',')]
+ if not compare_sets(extKeyUsage, current_xku, self.extended_key_usage_strict):
+ return self.extended_key_usage, str(extension).split(', ')
+ if not found:
+ # This is only bad if the user specified a non-empty list
+ if self.extended_key_usage:
+ return NO_EXTENSION
+
+ def _normalize_san(self, san):
+ # Apparently OpenSSL returns 'IP address' not 'IP' as specifier when converting the subjectAltName to string
+ # although it won't accept this specifier when generating the CSR. (https://github.com/openssl/openssl/issues/4004)
+ if san.startswith('IP Address:'):
+ san = 'IP:' + san[len('IP Address:'):]
+ if san.startswith('IP:'):
+ ip = compat_ipaddress.ip_address(san[3:])
+ san = 'IP:{0}'.format(ip.compressed)
+ return san
+
+ def _validate_subject_alt_name(self):
+ found = False
+ for extension_idx in range(0, self.cert.get_extension_count()):
+ extension = self.cert.get_extension(extension_idx)
+ if extension.get_short_name() == b'subjectAltName':
+ found = True
+ l_altnames = [self._normalize_san(altname.strip()) for altname in
+ to_text(extension, errors='surrogate_or_strict').split(', ')]
+ sans = [self._normalize_san(to_text(san, errors='surrogate_or_strict')) for san in self.subject_alt_name]
+ if not compare_sets(sans, l_altnames, self.subject_alt_name_strict):
+ return self.subject_alt_name, l_altnames
+ if not found:
+ # This is only bad if the user specified a non-empty list
+ if self.subject_alt_name:
+ return NO_EXTENSION
+
+ def _validate_not_before(self):
+ return self.cert.get_notBefore()
+
+ def _validate_not_after(self):
+ return self.cert.get_notAfter()
+
+ def _validate_valid_at(self):
+ rt = crypto_utils.get_relative_time_option(self.valid_at, "valid_at", backend=self.backend)
+ rt = to_bytes(rt, errors='surrogate_or_strict')
+ return self.cert.get_notBefore(), rt, self.cert.get_notAfter()
+
+ def _validate_invalid_at(self):
+ rt = crypto_utils.get_relative_time_option(self.invalid_at, "invalid_at", backend=self.backend)
+ rt = to_bytes(rt, errors='surrogate_or_strict')
+ return self.cert.get_notBefore(), rt, self.cert.get_notAfter()
+
+ def _validate_valid_in(self):
+ valid_in_asn1 = crypto_utils.get_relative_time_option(self.valid_in, "valid_in", backend=self.backend)
+ valid_in_date = to_bytes(valid_in_asn1, errors='surrogate_or_strict')
+ return self.cert.get_notBefore(), valid_in_date, self.cert.get_notAfter()
+
+
+class EntrustCertificate(Certificate):
+ """Retrieve a certificate using Entrust (ECS)."""
+
+ def __init__(self, module, backend):
+ super(EntrustCertificate, self).__init__(module, backend)
+ self.trackingId = None
+ self.notAfter = crypto_utils.get_relative_time_option(module.params['entrust_not_after'], 'entrust_not_after', backend=self.backend)
+
+ if self.csr_content is None or not os.path.exists(self.csr_path):
+ raise CertificateError(
+ 'The certificate signing request file {0} does not exist'.format(self.csr_path)
+ )
+
+ self.csr = crypto_utils.load_certificate_request(
+ path=self.csr_path,
+ content=self.csr_content,
+ backend=self.backend,
+ )
+
+ # ECS API defaults to using the validated organization tied to the account.
+ # We want to always force behavior of trying to use the organization provided in the CSR.
+ # To that end we need to parse out the organization from the CSR.
+ self.csr_org = None
+ if self.backend == 'pyopenssl':
+ csr_subject = self.csr.get_subject()
+ csr_subject_components = csr_subject.get_components()
+ for k, v in csr_subject_components:
+ if k.upper() == 'O':
+ # Entrust does not support multiple validated organizations in a single certificate
+ if self.csr_org is not None:
+ module.fail_json(msg=("Entrust provider does not currently support multiple validated organizations. Multiple organizations found in "
+ "Subject DN: '{0}'. ".format(csr_subject)))
+ else:
+ self.csr_org = v
+ elif self.backend == 'cryptography':
+ csr_subject_orgs = self.csr.subject.get_attributes_for_oid(NameOID.ORGANIZATION_NAME)
+ if len(csr_subject_orgs) == 1:
+ self.csr_org = csr_subject_orgs[0].value
+ elif len(csr_subject_orgs) > 1:
+ module.fail_json(msg=("Entrust provider does not currently support multiple validated organizations. Multiple organizations found in "
+ "Subject DN: '{0}'. ".format(self.csr.subject)))
+ # If no organization in the CSR, explicitly tell ECS that it should be blank in issued cert, not defaulted to
+ # organization tied to the account.
+ if self.csr_org is None:
+ self.csr_org = ''
+
+ try:
+ self.ecs_client = ECSClient(
+ entrust_api_user=module.params.get('entrust_api_user'),
+ entrust_api_key=module.params.get('entrust_api_key'),
+ entrust_api_cert=module.params.get('entrust_api_client_cert_path'),
+ entrust_api_cert_key=module.params.get('entrust_api_client_cert_key_path'),
+ entrust_api_specification_path=module.params.get('entrust_api_specification_path')
+ )
+ except SessionConfigurationException as e:
+ module.fail_json(msg='Failed to initialize Entrust Provider: {0}'.format(to_native(e.message)))
+
+ def generate(self, module):
+
+ if not self.check(module, perms_required=False) or self.force:
+ # Read the CSR that was generated for us
+ body = {}
+ if self.csr_content is not None:
+ body['csr'] = self.csr_content
+ else:
+ with open(self.csr_path, 'r') as csr_file:
+ body['csr'] = csr_file.read()
+
+ body['certType'] = module.params['entrust_cert_type']
+
+ # Handle expiration (30 days if not specified)
+ expiry = self.notAfter
+ if not expiry:
+ gmt_now = datetime.datetime.fromtimestamp(time.mktime(time.gmtime()))
+ expiry = gmt_now + datetime.timedelta(days=365)
+
+ expiry_iso3339 = expiry.strftime("%Y-%m-%dT%H:%M:%S.00Z")
+ body['certExpiryDate'] = expiry_iso3339
+ body['org'] = self.csr_org
+ body['tracking'] = {
+ 'requesterName': module.params['entrust_requester_name'],
+ 'requesterEmail': module.params['entrust_requester_email'],
+ 'requesterPhone': module.params['entrust_requester_phone'],
+ }
+
+ try:
+ result = self.ecs_client.NewCertRequest(Body=body)
+ self.trackingId = result.get('trackingId')
+ except RestOperationException as e:
+ module.fail_json(msg='Failed to request new certificate from Entrust Certificate Services (ECS): {0}'.format(to_native(e.message)))
+
+ if self.backup:
+ self.backup_file = module.backup_local(self.path)
+ crypto_utils.write_file(module, to_bytes(result.get('endEntityCert')))
+ self.cert = crypto_utils.load_certificate(self.path, backend=self.backend)
+ self.changed = True
+
+ def check(self, module, perms_required=True):
+ """Ensure the resource is in its desired state."""
+
+ parent_check = super(EntrustCertificate, self).check(module, perms_required)
+
+ try:
+ cert_details = self._get_cert_details()
+ except RestOperationException as e:
+ module.fail_json(msg='Failed to get status of existing certificate from Entrust Certificate Services (ECS): {0}.'.format(to_native(e.message)))
+
+ # Always issue a new certificate if the certificate is expired, suspended or revoked
+ status = cert_details.get('status', False)
+ if status == 'EXPIRED' or status == 'SUSPENDED' or status == 'REVOKED':
+ return False
+
+ # If the requested cert type was specified and it is for a different certificate type than the initial certificate, a new one is needed
+ if module.params['entrust_cert_type'] and cert_details.get('certType') and module.params['entrust_cert_type'] != cert_details.get('certType'):
+ return False
+
+ return parent_check
+
+ def _get_cert_details(self):
+ cert_details = {}
+ if self.cert:
+ serial_number = None
+ expiry = None
+ if self.backend == 'pyopenssl':
+ serial_number = "{0:X}".format(self.cert.get_serial_number())
+ time_string = to_native(self.cert.get_notAfter())
+ expiry = datetime.datetime.strptime(time_string, "%Y%m%d%H%M%SZ")
+ elif self.backend == 'cryptography':
+ serial_number = "{0:X}".format(self.cert.serial_number)
+ expiry = self.cert.not_valid_after
+
+ # get some information about the expiry of this certificate
+ expiry_iso3339 = expiry.strftime("%Y-%m-%dT%H:%M:%S.00Z")
+ cert_details['expiresAfter'] = expiry_iso3339
+
+ # If a trackingId is not already defined (from the result of a generate)
+ # use the serial number to identify the tracking Id
+ if self.trackingId is None and serial_number is not None:
+ cert_results = self.ecs_client.GetCertificates(serialNumber=serial_number).get('certificates', {})
+
+ # Finding 0 or more than 1 result is a very unlikely use case, it simply means we cannot perform additional checks
+ # on the 'state' as returned by Entrust Certificate Services (ECS). The general certificate validity is
+ # still checked as it is in the rest of the module.
+ if len(cert_results) == 1:
+ self.trackingId = cert_results[0].get('trackingId')
+
+ if self.trackingId is not None:
+ cert_details.update(self.ecs_client.GetCertificate(trackingId=self.trackingId))
+
+ return cert_details
+
+ def dump(self, check_mode=False):
+
+ result = {
+ 'changed': self.changed,
+ 'filename': self.path,
+ 'privatekey': self.privatekey_path,
+ 'csr': self.csr_path,
+ }
+
+ if self.backup_file:
+ result['backup_file'] = self.backup_file
+ if self.return_content:
+ content = crypto_utils.load_file_if_exists(self.path, ignore_errors=True)
+ result['certificate'] = content.decode('utf-8') if content else None
+
+ result.update(self._get_cert_details())
+
+ return result
+
+
+class AcmeCertificate(Certificate):
+ """Retrieve a certificate using the ACME protocol."""
+
+ # Since there's no real use of the backend,
+ # other than the 'self.check' function, we just pass the backend to the constructor
+
+ def __init__(self, module, backend):
+ super(AcmeCertificate, self).__init__(module, backend)
+ self.accountkey_path = module.params['acme_accountkey_path']
+ self.challenge_path = module.params['acme_challenge_path']
+ self.use_chain = module.params['acme_chain']
+ self.acme_directory = module.params['acme_directory']
+
+ def generate(self, module):
+
+ if self.csr_content is None and not os.path.exists(self.csr_path):
+ raise CertificateError(
+ 'The certificate signing request file %s does not exist' % self.csr_path
+ )
+
+ if not os.path.exists(self.accountkey_path):
+ raise CertificateError(
+ 'The account key %s does not exist' % self.accountkey_path
+ )
+
+ if not os.path.exists(self.challenge_path):
+ raise CertificateError(
+ 'The challenge path %s does not exist' % self.challenge_path
+ )
+
+ if not self.check(module, perms_required=False) or self.force:
+ acme_tiny_path = self.module.get_bin_path('acme-tiny', required=True)
+ command = [acme_tiny_path]
+ if self.use_chain:
+ command.append('--chain')
+ command.extend(['--account-key', self.accountkey_path])
+ if self.csr_content is not None:
+ # We need to temporarily write the CSR to disk
+ fd, tmpsrc = tempfile.mkstemp()
+ module.add_cleanup_file(tmpsrc) # Ansible will delete the file on exit
+ f = os.fdopen(fd, 'wb')
+ try:
+ f.write(self.csr_content)
+ except Exception as err:
+ try:
+ f.close()
+ except Exception as dummy:
+ pass
+ module.fail_json(
+ msg="failed to create temporary CSR file: %s" % to_native(err),
+ exception=traceback.format_exc()
+ )
+ f.close()
+ command.extend(['--csr', tmpsrc])
+ else:
+ command.extend(['--csr', self.csr_path])
+ command.extend(['--acme-dir', self.challenge_path])
+ command.extend(['--directory-url', self.acme_directory])
+
+ try:
+ crt = module.run_command(command, check_rc=True)[1]
+ if self.backup:
+ self.backup_file = module.backup_local(self.path)
+ crypto_utils.write_file(module, to_bytes(crt))
+ self.changed = True
+ except OSError as exc:
+ raise CertificateError(exc)
+
+ file_args = module.load_file_common_arguments(module.params)
+ if module.set_fs_attributes_if_different(file_args, False):
+ self.changed = True
+
+ def dump(self, check_mode=False):
+
+ result = {
+ 'changed': self.changed,
+ 'filename': self.path,
+ 'privatekey': self.privatekey_path,
+ 'accountkey': self.accountkey_path,
+ 'csr': self.csr_path,
+ }
+ if self.backup_file:
+ result['backup_file'] = self.backup_file
+ if self.return_content:
+ content = crypto_utils.load_file_if_exists(self.path, ignore_errors=True)
+ result['certificate'] = content.decode('utf-8') if content else None
+
+ return result
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ path=dict(type='path', required=True),
+ provider=dict(type='str', choices=['acme', 'assertonly', 'entrust', 'ownca', 'selfsigned']),
+ force=dict(type='bool', default=False,),
+ csr_path=dict(type='path'),
+ csr_content=dict(type='str'),
+ backup=dict(type='bool', default=False),
+ select_crypto_backend=dict(type='str', default='auto', choices=['auto', 'cryptography', 'pyopenssl']),
+ return_content=dict(type='bool', default=False),
+
+ # General properties of a certificate
+ privatekey_path=dict(type='path'),
+ privatekey_content=dict(type='str'),
+ privatekey_passphrase=dict(type='str', no_log=True),
+
+ # provider: assertonly
+ signature_algorithms=dict(type='list', elements='str', removed_in_version='2.13'),
+ subject=dict(type='dict', removed_in_version='2.13'),
+ subject_strict=dict(type='bool', default=False, removed_in_version='2.13'),
+ issuer=dict(type='dict', removed_in_version='2.13'),
+ issuer_strict=dict(type='bool', default=False, removed_in_version='2.13'),
+ has_expired=dict(type='bool', default=False, removed_in_version='2.13'),
+ version=dict(type='int', removed_in_version='2.13'),
+ key_usage=dict(type='list', elements='str', aliases=['keyUsage'], removed_in_version='2.13'),
+ key_usage_strict=dict(type='bool', default=False, aliases=['keyUsage_strict'], removed_in_version='2.13'),
+ extended_key_usage=dict(type='list', elements='str', aliases=['extendedKeyUsage'], removed_in_version='2.13'),
+ extended_key_usage_strict=dict(type='bool', default=False, aliases=['extendedKeyUsage_strict'], removed_in_version='2.13'),
+ subject_alt_name=dict(type='list', elements='str', aliases=['subjectAltName'], removed_in_version='2.13'),
+ subject_alt_name_strict=dict(type='bool', default=False, aliases=['subjectAltName_strict'], removed_in_version='2.13'),
+ not_before=dict(type='str', aliases=['notBefore'], removed_in_version='2.13'),
+ not_after=dict(type='str', aliases=['notAfter'], removed_in_version='2.13'),
+ valid_at=dict(type='str', removed_in_version='2.13'),
+ invalid_at=dict(type='str', removed_in_version='2.13'),
+ valid_in=dict(type='str', removed_in_version='2.13'),
+
+ # provider: selfsigned
+ selfsigned_version=dict(type='int', default=3),
+ selfsigned_digest=dict(type='str', default='sha256'),
+ selfsigned_not_before=dict(type='str', default='+0s', aliases=['selfsigned_notBefore']),
+ selfsigned_not_after=dict(type='str', default='+3650d', aliases=['selfsigned_notAfter']),
+ selfsigned_create_subject_key_identifier=dict(
+ type='str',
+ default='create_if_not_provided',
+ choices=['create_if_not_provided', 'always_create', 'never_create']
+ ),
+
+ # provider: ownca
+ ownca_path=dict(type='path'),
+ ownca_content=dict(type='str'),
+ ownca_privatekey_path=dict(type='path'),
+ ownca_privatekey_content=dict(type='str'),
+ ownca_privatekey_passphrase=dict(type='str', no_log=True),
+ ownca_digest=dict(type='str', default='sha256'),
+ ownca_version=dict(type='int', default=3),
+ ownca_not_before=dict(type='str', default='+0s'),
+ ownca_not_after=dict(type='str', default='+3650d'),
+ ownca_create_subject_key_identifier=dict(
+ type='str',
+ default='create_if_not_provided',
+ choices=['create_if_not_provided', 'always_create', 'never_create']
+ ),
+ ownca_create_authority_key_identifier=dict(type='bool', default=True),
+
+ # provider: acme
+ acme_accountkey_path=dict(type='path'),
+ acme_challenge_path=dict(type='path'),
+ acme_chain=dict(type='bool', default=False),
+ acme_directory=dict(type='str', default="https://acme-v02.api.letsencrypt.org/directory"),
+
+ # provider: entrust
+ entrust_cert_type=dict(type='str', default='STANDARD_SSL',
+ choices=['STANDARD_SSL', 'ADVANTAGE_SSL', 'UC_SSL', 'EV_SSL', 'WILDCARD_SSL',
+ 'PRIVATE_SSL', 'PD_SSL', 'CDS_ENT_LITE', 'CDS_ENT_PRO', 'SMIME_ENT']),
+ entrust_requester_email=dict(type='str'),
+ entrust_requester_name=dict(type='str'),
+ entrust_requester_phone=dict(type='str'),
+ entrust_api_user=dict(type='str'),
+ entrust_api_key=dict(type='str', no_log=True),
+ entrust_api_client_cert_path=dict(type='path'),
+ entrust_api_client_cert_key_path=dict(type='path', no_log=True),
+ entrust_api_specification_path=dict(type='path', default='https://cloud.entrust.net/EntrustCloud/documentation/cms-api-2.1.0.yaml'),
+ entrust_not_after=dict(type='str', default='+365d'),
+ ),
+ supports_check_mode=True,
+ add_file_common_args=True,
+ required_if=[
+ ['state', 'present', ['provider']],
+ ['provider', 'entrust', ['entrust_requester_email', 'entrust_requester_name', 'entrust_requester_phone',
+ 'entrust_api_user', 'entrust_api_key', 'entrust_api_client_cert_path',
+ 'entrust_api_client_cert_key_path']],
+ ],
+ mutually_exclusive=[
+ ['csr_path', 'csr_content'],
+ ['privatekey_path', 'privatekey_content'],
+ ['ownca_path', 'ownca_content'],
+ ['ownca_privatekey_path', 'ownca_privatekey_content'],
+ ],
+ )
+
+ try:
+ if module.params['state'] == 'absent':
+ certificate = CertificateAbsent(module)
+
+ else:
+ if module.params['provider'] != 'assertonly' and module.params['csr_path'] is None and module.params['csr_content'] is None:
+ module.fail_json(msg='csr_path or csr_content is required when provider is not assertonly')
+
+ base_dir = os.path.dirname(module.params['path']) or '.'
+ if not os.path.isdir(base_dir):
+ module.fail_json(
+ name=base_dir,
+ msg='The directory %s does not exist or the file is not a directory' % base_dir
+ )
+
+ provider = module.params['provider']
+ if provider == 'assertonly':
+ module.deprecate("The 'assertonly' provider is deprecated; please see the examples of "
+ "the 'openssl_certificate' module on how to replace it with other modules",
+ version='2.13')
+ elif provider == 'selfsigned':
+ if module.params['privatekey_path'] is None and module.params['privatekey_content'] is None:
+ module.fail_json(msg='One of privatekey_path and privatekey_content must be specified for the selfsigned provider.')
+ elif provider == 'acme':
+ if module.params['acme_accountkey_path'] is None:
+ module.fail_json(msg='The acme_accountkey_path option must be specified for the acme provider.')
+ if module.params['acme_challenge_path'] is None:
+ module.fail_json(msg='The acme_challenge_path option must be specified for the acme provider.')
+ elif provider == 'ownca':
+ if module.params['ownca_path'] is None and module.params['ownca_content'] is None:
+ module.fail_json(msg='One of ownca_path and ownca_content must be specified for the ownca provider.')
+ if module.params['ownca_privatekey_path'] is None and module.params['ownca_privatekey_content'] is None:
+ module.fail_json(msg='One of ownca_privatekey_path and ownca_privatekey_content must be specified for the ownca provider.')
+
+ backend = module.params['select_crypto_backend']
+ if backend == 'auto':
+ # Detect what backend we can use
+ can_use_cryptography = CRYPTOGRAPHY_FOUND and CRYPTOGRAPHY_VERSION >= LooseVersion(MINIMAL_CRYPTOGRAPHY_VERSION)
+ can_use_pyopenssl = PYOPENSSL_FOUND and PYOPENSSL_VERSION >= LooseVersion(MINIMAL_PYOPENSSL_VERSION)
+
+ # If cryptography is available we'll use it
+ if can_use_cryptography:
+ backend = 'cryptography'
+ elif can_use_pyopenssl:
+ backend = 'pyopenssl'
+
+ if module.params['selfsigned_version'] == 2 or module.params['ownca_version'] == 2:
+ module.warn('crypto backend forced to pyopenssl. The cryptography library does not support v2 certificates')
+ backend = 'pyopenssl'
+
+ # Fail if no backend has been found
+ if backend == 'auto':
+ module.fail_json(msg=("Can't detect any of the required Python libraries "
+ "cryptography (>= {0}) or PyOpenSSL (>= {1})").format(
+ MINIMAL_CRYPTOGRAPHY_VERSION,
+ MINIMAL_PYOPENSSL_VERSION))
+
+ if backend == 'pyopenssl':
+ if not PYOPENSSL_FOUND:
+ module.fail_json(msg=missing_required_lib('pyOpenSSL >= {0}'.format(MINIMAL_PYOPENSSL_VERSION)),
+ exception=PYOPENSSL_IMP_ERR)
+ if module.params['provider'] in ['selfsigned', 'ownca', 'assertonly']:
+ try:
+ getattr(crypto.X509Req, 'get_extensions')
+ except AttributeError:
+ module.fail_json(msg='You need to have PyOpenSSL>=0.15')
+
+ module.deprecate('The module is using the PyOpenSSL backend. This backend has been deprecated', version='2.13')
+ if provider == 'selfsigned':
+ certificate = SelfSignedCertificate(module)
+ elif provider == 'acme':
+ certificate = AcmeCertificate(module, 'pyopenssl')
+ elif provider == 'ownca':
+ certificate = OwnCACertificate(module)
+ elif provider == 'entrust':
+ certificate = EntrustCertificate(module, 'pyopenssl')
+ else:
+ certificate = AssertOnlyCertificate(module)
+ elif backend == 'cryptography':
+ if not CRYPTOGRAPHY_FOUND:
+ module.fail_json(msg=missing_required_lib('cryptography >= {0}'.format(MINIMAL_CRYPTOGRAPHY_VERSION)),
+ exception=CRYPTOGRAPHY_IMP_ERR)
+ if module.params['selfsigned_version'] == 2 or module.params['ownca_version'] == 2:
+ module.fail_json(msg='The cryptography backend does not support v2 certificates, '
+ 'use select_crypto_backend=pyopenssl for v2 certificates')
+ if provider == 'selfsigned':
+ certificate = SelfSignedCertificateCryptography(module)
+ elif provider == 'acme':
+ certificate = AcmeCertificate(module, 'cryptography')
+ elif provider == 'ownca':
+ certificate = OwnCACertificateCryptography(module)
+ elif provider == 'entrust':
+ certificate = EntrustCertificate(module, 'cryptography')
+ else:
+ certificate = AssertOnlyCertificateCryptography(module)
+
+ if module.params['state'] == 'present':
+ if module.check_mode:
+ result = certificate.dump(check_mode=True)
+ result['changed'] = module.params['force'] or not certificate.check(module)
+ module.exit_json(**result)
+
+ certificate.generate(module)
+ else:
+ if module.check_mode:
+ result = certificate.dump(check_mode=True)
+ result['changed'] = os.path.exists(module.params['path'])
+ module.exit_json(**result)
+
+ certificate.remove(module)
+
+ result = certificate.dump()
+ module.exit_json(**result)
+ except crypto_utils.OpenSSLObjectError as exc:
+ module.fail_json(msg=to_native(exc))
+
+
+if __name__ == "__main__":
+ main()
diff --git a/test/support/integration/plugins/modules/openssl_certificate_info.py b/test/support/integration/plugins/modules/openssl_certificate_info.py
new file mode 100644
index 0000000000..2d7459ae9d
--- /dev/null
+++ b/test/support/integration/plugins/modules/openssl_certificate_info.py
@@ -0,0 +1,863 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016-2017, Yanis Guenane <yanis+ansible@guenane.org>
+# Copyright: (c) 2017, Markus Teufelberger <mteufelberger+ansible@mgit.at>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+module: openssl_certificate_info
+version_added: '2.8'
+short_description: Provide information of OpenSSL X.509 certificates
+description:
+ - This module allows one to query information on OpenSSL certificates.
+ - It uses the pyOpenSSL or cryptography python library to interact with OpenSSL. If both the
+ cryptography and PyOpenSSL libraries are available (and meet the minimum version requirements)
+ cryptography will be preferred as a backend over PyOpenSSL (unless the backend is forced with
+ C(select_crypto_backend)). Please note that the PyOpenSSL backend was deprecated in Ansible 2.9
+ and will be removed in Ansible 2.13.
+requirements:
+ - PyOpenSSL >= 0.15 or cryptography >= 1.6
+author:
+ - Felix Fontein (@felixfontein)
+ - Yanis Guenane (@Spredzy)
+ - Markus Teufelberger (@MarkusTeufelberger)
+options:
+ path:
+ description:
+ - Remote absolute path where the certificate file is loaded from.
+ - Either I(path) or I(content) must be specified, but not both.
+ type: path
+ content:
+ description:
+ - Content of the X.509 certificate in PEM format.
+ - Either I(path) or I(content) must be specified, but not both.
+ type: str
+ version_added: "2.10"
+ valid_at:
+ description:
+ - A dict of names mapping to time specifications. Every time specified here
+ will be checked whether the certificate is valid at this point. See the
+ C(valid_at) return value for informations on the result.
+ - Time can be specified either as relative time or as absolute timestamp.
+ - Time will always be interpreted as UTC.
+ - Valid format is C([+-]timespec | ASN.1 TIME) where timespec can be an integer
+ + C([w | d | h | m | s]) (e.g. C(+32w1d2h), and ASN.1 TIME (i.e. pattern C(YYYYMMDDHHMMSSZ)).
+ Note that all timestamps will be treated as being in UTC.
+ type: dict
+ select_crypto_backend:
+ description:
+ - Determines which crypto backend to use.
+ - The default choice is C(auto), which tries to use C(cryptography) if available, and falls back to C(pyopenssl).
+ - If set to C(pyopenssl), will try to use the L(pyOpenSSL,https://pypi.org/project/pyOpenSSL/) library.
+ - If set to C(cryptography), will try to use the L(cryptography,https://cryptography.io/) library.
+ - Please note that the C(pyopenssl) backend has been deprecated in Ansible 2.9, and will be removed in Ansible 2.13.
+ From that point on, only the C(cryptography) backend will be available.
+ type: str
+ default: auto
+ choices: [ auto, cryptography, pyopenssl ]
+
+notes:
+ - All timestamp values are provided in ASN.1 TIME format, i.e. following the C(YYYYMMDDHHMMSSZ) pattern.
+ They are all in UTC.
+seealso:
+- module: openssl_certificate
+'''
+
+EXAMPLES = r'''
+- name: Generate a Self Signed OpenSSL certificate
+ openssl_certificate:
+ path: /etc/ssl/crt/ansible.com.crt
+ privatekey_path: /etc/ssl/private/ansible.com.pem
+ csr_path: /etc/ssl/csr/ansible.com.csr
+ provider: selfsigned
+
+
+# Get information on the certificate
+
+- name: Get information on generated certificate
+ openssl_certificate_info:
+ path: /etc/ssl/crt/ansible.com.crt
+ register: result
+
+- name: Dump information
+ debug:
+ var: result
+
+
+# Check whether the certificate is valid or not valid at certain times, fail
+# if this is not the case. The first task (openssl_certificate_info) collects
+# the information, and the second task (assert) validates the result and
+# makes the playbook fail in case something is not as expected.
+
+- name: Test whether that certificate is valid tomorrow and/or in three weeks
+ openssl_certificate_info:
+ path: /etc/ssl/crt/ansible.com.crt
+ valid_at:
+ point_1: "+1d"
+ point_2: "+3w"
+ register: result
+
+- name: Validate that certificate is valid tomorrow, but not in three weeks
+ assert:
+ that:
+ - result.valid_at.point_1 # valid in one day
+ - not result.valid_at.point_2 # not valid in three weeks
+'''
+
+RETURN = r'''
+expired:
+ description: Whether the certificate is expired (i.e. C(notAfter) is in the past)
+ returned: success
+ type: bool
+basic_constraints:
+ description: Entries in the C(basic_constraints) extension, or C(none) if extension is not present.
+ returned: success
+ type: list
+ elements: str
+ sample: "[CA:TRUE, pathlen:1]"
+basic_constraints_critical:
+ description: Whether the C(basic_constraints) extension is critical.
+ returned: success
+ type: bool
+extended_key_usage:
+ description: Entries in the C(extended_key_usage) extension, or C(none) if extension is not present.
+ returned: success
+ type: list
+ elements: str
+ sample: "[Biometric Info, DVCS, Time Stamping]"
+extended_key_usage_critical:
+ description: Whether the C(extended_key_usage) extension is critical.
+ returned: success
+ type: bool
+extensions_by_oid:
+ description: Returns a dictionary for every extension OID
+ returned: success
+ type: dict
+ contains:
+ critical:
+ description: Whether the extension is critical.
+ returned: success
+ type: bool
+ value:
+ description: The Base64 encoded value (in DER format) of the extension
+ returned: success
+ type: str
+ sample: "MAMCAQU="
+ sample: '{"1.3.6.1.5.5.7.1.24": { "critical": false, "value": "MAMCAQU="}}'
+key_usage:
+ description: Entries in the C(key_usage) extension, or C(none) if extension is not present.
+ returned: success
+ type: str
+ sample: "[Key Agreement, Data Encipherment]"
+key_usage_critical:
+ description: Whether the C(key_usage) extension is critical.
+ returned: success
+ type: bool
+subject_alt_name:
+ description: Entries in the C(subject_alt_name) extension, or C(none) if extension is not present.
+ returned: success
+ type: list
+ elements: str
+ sample: "[DNS:www.ansible.com, IP:1.2.3.4]"
+subject_alt_name_critical:
+ description: Whether the C(subject_alt_name) extension is critical.
+ returned: success
+ type: bool
+ocsp_must_staple:
+ description: C(yes) if the OCSP Must Staple extension is present, C(none) otherwise.
+ returned: success
+ type: bool
+ocsp_must_staple_critical:
+ description: Whether the C(ocsp_must_staple) extension is critical.
+ returned: success
+ type: bool
+issuer:
+ description:
+ - The certificate's issuer.
+ - Note that for repeated values, only the last one will be returned.
+ returned: success
+ type: dict
+ sample: '{"organizationName": "Ansible", "commonName": "ca.example.com"}'
+issuer_ordered:
+ description: The certificate's issuer as an ordered list of tuples.
+ returned: success
+ type: list
+ elements: list
+ sample: '[["organizationName", "Ansible"], ["commonName": "ca.example.com"]]'
+ version_added: "2.9"
+subject:
+ description:
+ - The certificate's subject as a dictionary.
+ - Note that for repeated values, only the last one will be returned.
+ returned: success
+ type: dict
+ sample: '{"commonName": "www.example.com", "emailAddress": "test@example.com"}'
+subject_ordered:
+ description: The certificate's subject as an ordered list of tuples.
+ returned: success
+ type: list
+ elements: list
+ sample: '[["commonName", "www.example.com"], ["emailAddress": "test@example.com"]]'
+ version_added: "2.9"
+not_after:
+ description: C(notAfter) date as ASN.1 TIME
+ returned: success
+ type: str
+ sample: 20190413202428Z
+not_before:
+ description: C(notBefore) date as ASN.1 TIME
+ returned: success
+ type: str
+ sample: 20190331202428Z
+public_key:
+ description: Certificate's public key in PEM format
+ returned: success
+ type: str
+ sample: "-----BEGIN PUBLIC KEY-----\nMIICIjANBgkqhkiG9w0BAQEFAAOCAg8A..."
+public_key_fingerprints:
+ description:
+ - Fingerprints of certificate's public key.
+ - For every hash algorithm available, the fingerprint is computed.
+ returned: success
+ type: dict
+ sample: "{'sha256': 'd4:b3:aa:6d:c8:04:ce:4e:ba:f6:29:4d:92:a3:94:b0:c2:ff:bd:bf:33:63:11:43:34:0f:51:b0:95:09:2f:63',
+ 'sha512': 'f7:07:4a:f0:b0:f0:e6:8b:95:5f:f9:e6:61:0a:32:68:f1..."
+signature_algorithm:
+ description: The signature algorithm used to sign the certificate.
+ returned: success
+ type: str
+ sample: sha256WithRSAEncryption
+serial_number:
+ description: The certificate's serial number.
+ returned: success
+ type: int
+ sample: 1234
+version:
+ description: The certificate version.
+ returned: success
+ type: int
+ sample: 3
+valid_at:
+ description: For every time stamp provided in the I(valid_at) option, a
+ boolean whether the certificate is valid at that point in time
+ or not.
+ returned: success
+ type: dict
+subject_key_identifier:
+ description:
+ - The certificate's subject key identifier.
+ - The identifier is returned in hexadecimal, with C(:) used to separate bytes.
+ - Is C(none) if the C(SubjectKeyIdentifier) extension is not present.
+ returned: success and if the pyOpenSSL backend is I(not) used
+ type: str
+ sample: '00:11:22:33:44:55:66:77:88:99:aa:bb:cc:dd:ee:ff:00:11:22:33'
+ version_added: "2.9"
+authority_key_identifier:
+ description:
+ - The certificate's authority key identifier.
+ - The identifier is returned in hexadecimal, with C(:) used to separate bytes.
+ - Is C(none) if the C(AuthorityKeyIdentifier) extension is not present.
+ returned: success and if the pyOpenSSL backend is I(not) used
+ type: str
+ sample: '00:11:22:33:44:55:66:77:88:99:aa:bb:cc:dd:ee:ff:00:11:22:33'
+ version_added: "2.9"
+authority_cert_issuer:
+ description:
+ - The certificate's authority cert issuer as a list of general names.
+ - Is C(none) if the C(AuthorityKeyIdentifier) extension is not present.
+ returned: success and if the pyOpenSSL backend is I(not) used
+ type: list
+ elements: str
+ sample: "[DNS:www.ansible.com, IP:1.2.3.4]"
+ version_added: "2.9"
+authority_cert_serial_number:
+ description:
+ - The certificate's authority cert serial number.
+ - Is C(none) if the C(AuthorityKeyIdentifier) extension is not present.
+ returned: success and if the pyOpenSSL backend is I(not) used
+ type: int
+ sample: '12345'
+ version_added: "2.9"
+ocsp_uri:
+ description: The OCSP responder URI, if included in the certificate. Will be
+ C(none) if no OCSP responder URI is included.
+ returned: success
+ type: str
+ version_added: "2.9"
+'''
+
+
+import abc
+import binascii
+import datetime
+import os
+import re
+import traceback
+from distutils.version import LooseVersion
+
+from ansible.module_utils import crypto as crypto_utils
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.six import string_types
+from ansible.module_utils._text import to_native, to_text, to_bytes
+from ansible.module_utils.compat import ipaddress as compat_ipaddress
+
+MINIMAL_CRYPTOGRAPHY_VERSION = '1.6'
+MINIMAL_PYOPENSSL_VERSION = '0.15'
+
+PYOPENSSL_IMP_ERR = None
+try:
+ import OpenSSL
+ from OpenSSL import crypto
+ PYOPENSSL_VERSION = LooseVersion(OpenSSL.__version__)
+ if OpenSSL.SSL.OPENSSL_VERSION_NUMBER >= 0x10100000:
+ # OpenSSL 1.1.0 or newer
+ OPENSSL_MUST_STAPLE_NAME = b"tlsfeature"
+ OPENSSL_MUST_STAPLE_VALUE = b"status_request"
+ else:
+ # OpenSSL 1.0.x or older
+ OPENSSL_MUST_STAPLE_NAME = b"1.3.6.1.5.5.7.1.24"
+ OPENSSL_MUST_STAPLE_VALUE = b"DER:30:03:02:01:05"
+except ImportError:
+ PYOPENSSL_IMP_ERR = traceback.format_exc()
+ PYOPENSSL_FOUND = False
+else:
+ PYOPENSSL_FOUND = True
+
+CRYPTOGRAPHY_IMP_ERR = None
+try:
+ import cryptography
+ from cryptography import x509
+ from cryptography.hazmat.primitives import serialization
+ CRYPTOGRAPHY_VERSION = LooseVersion(cryptography.__version__)
+except ImportError:
+ CRYPTOGRAPHY_IMP_ERR = traceback.format_exc()
+ CRYPTOGRAPHY_FOUND = False
+else:
+ CRYPTOGRAPHY_FOUND = True
+
+
+TIMESTAMP_FORMAT = "%Y%m%d%H%M%SZ"
+
+
+class CertificateInfo(crypto_utils.OpenSSLObject):
+ def __init__(self, module, backend):
+ super(CertificateInfo, self).__init__(
+ module.params['path'] or '',
+ 'present',
+ False,
+ module.check_mode,
+ )
+ self.backend = backend
+ self.module = module
+ self.content = module.params['content']
+ if self.content is not None:
+ self.content = self.content.encode('utf-8')
+
+ self.valid_at = module.params['valid_at']
+ if self.valid_at:
+ for k, v in self.valid_at.items():
+ if not isinstance(v, string_types):
+ self.module.fail_json(
+ msg='The value for valid_at.{0} must be of type string (got {1})'.format(k, type(v))
+ )
+ self.valid_at[k] = crypto_utils.get_relative_time_option(v, 'valid_at.{0}'.format(k))
+
+ def generate(self):
+ # Empty method because crypto_utils.OpenSSLObject wants this
+ pass
+
+ def dump(self):
+ # Empty method because crypto_utils.OpenSSLObject wants this
+ pass
+
+ @abc.abstractmethod
+ def _get_signature_algorithm(self):
+ pass
+
+ @abc.abstractmethod
+ def _get_subject_ordered(self):
+ pass
+
+ @abc.abstractmethod
+ def _get_issuer_ordered(self):
+ pass
+
+ @abc.abstractmethod
+ def _get_version(self):
+ pass
+
+ @abc.abstractmethod
+ def _get_key_usage(self):
+ pass
+
+ @abc.abstractmethod
+ def _get_extended_key_usage(self):
+ pass
+
+ @abc.abstractmethod
+ def _get_basic_constraints(self):
+ pass
+
+ @abc.abstractmethod
+ def _get_ocsp_must_staple(self):
+ pass
+
+ @abc.abstractmethod
+ def _get_subject_alt_name(self):
+ pass
+
+ @abc.abstractmethod
+ def _get_not_before(self):
+ pass
+
+ @abc.abstractmethod
+ def _get_not_after(self):
+ pass
+
+ @abc.abstractmethod
+ def _get_public_key(self, binary):
+ pass
+
+ @abc.abstractmethod
+ def _get_subject_key_identifier(self):
+ pass
+
+ @abc.abstractmethod
+ def _get_authority_key_identifier(self):
+ pass
+
+ @abc.abstractmethod
+ def _get_serial_number(self):
+ pass
+
+ @abc.abstractmethod
+ def _get_all_extensions(self):
+ pass
+
+ @abc.abstractmethod
+ def _get_ocsp_uri(self):
+ pass
+
+ def get_info(self):
+ result = dict()
+ self.cert = crypto_utils.load_certificate(self.path, content=self.content, backend=self.backend)
+
+ result['signature_algorithm'] = self._get_signature_algorithm()
+ subject = self._get_subject_ordered()
+ issuer = self._get_issuer_ordered()
+ result['subject'] = dict()
+ for k, v in subject:
+ result['subject'][k] = v
+ result['subject_ordered'] = subject
+ result['issuer'] = dict()
+ for k, v in issuer:
+ result['issuer'][k] = v
+ result['issuer_ordered'] = issuer
+ result['version'] = self._get_version()
+ result['key_usage'], result['key_usage_critical'] = self._get_key_usage()
+ result['extended_key_usage'], result['extended_key_usage_critical'] = self._get_extended_key_usage()
+ result['basic_constraints'], result['basic_constraints_critical'] = self._get_basic_constraints()
+ result['ocsp_must_staple'], result['ocsp_must_staple_critical'] = self._get_ocsp_must_staple()
+ result['subject_alt_name'], result['subject_alt_name_critical'] = self._get_subject_alt_name()
+
+ not_before = self._get_not_before()
+ not_after = self._get_not_after()
+ result['not_before'] = not_before.strftime(TIMESTAMP_FORMAT)
+ result['not_after'] = not_after.strftime(TIMESTAMP_FORMAT)
+ result['expired'] = not_after < datetime.datetime.utcnow()
+
+ result['valid_at'] = dict()
+ if self.valid_at:
+ for k, v in self.valid_at.items():
+ result['valid_at'][k] = not_before <= v <= not_after
+
+ result['public_key'] = self._get_public_key(binary=False)
+ pk = self._get_public_key(binary=True)
+ result['public_key_fingerprints'] = crypto_utils.get_fingerprint_of_bytes(pk) if pk is not None else dict()
+
+ if self.backend != 'pyopenssl':
+ ski = self._get_subject_key_identifier()
+ if ski is not None:
+ ski = to_native(binascii.hexlify(ski))
+ ski = ':'.join([ski[i:i + 2] for i in range(0, len(ski), 2)])
+ result['subject_key_identifier'] = ski
+
+ aki, aci, acsn = self._get_authority_key_identifier()
+ if aki is not None:
+ aki = to_native(binascii.hexlify(aki))
+ aki = ':'.join([aki[i:i + 2] for i in range(0, len(aki), 2)])
+ result['authority_key_identifier'] = aki
+ result['authority_cert_issuer'] = aci
+ result['authority_cert_serial_number'] = acsn
+
+ result['serial_number'] = self._get_serial_number()
+ result['extensions_by_oid'] = self._get_all_extensions()
+ result['ocsp_uri'] = self._get_ocsp_uri()
+
+ return result
+
+
+class CertificateInfoCryptography(CertificateInfo):
+ """Validate the supplied cert, using the cryptography backend"""
+ def __init__(self, module):
+ super(CertificateInfoCryptography, self).__init__(module, 'cryptography')
+
+ def _get_signature_algorithm(self):
+ return crypto_utils.cryptography_oid_to_name(self.cert.signature_algorithm_oid)
+
+ def _get_subject_ordered(self):
+ result = []
+ for attribute in self.cert.subject:
+ result.append([crypto_utils.cryptography_oid_to_name(attribute.oid), attribute.value])
+ return result
+
+ def _get_issuer_ordered(self):
+ result = []
+ for attribute in self.cert.issuer:
+ result.append([crypto_utils.cryptography_oid_to_name(attribute.oid), attribute.value])
+ return result
+
+ def _get_version(self):
+ if self.cert.version == x509.Version.v1:
+ return 1
+ if self.cert.version == x509.Version.v3:
+ return 3
+ return "unknown"
+
+ def _get_key_usage(self):
+ try:
+ current_key_ext = self.cert.extensions.get_extension_for_class(x509.KeyUsage)
+ current_key_usage = current_key_ext.value
+ key_usage = dict(
+ digital_signature=current_key_usage.digital_signature,
+ content_commitment=current_key_usage.content_commitment,
+ key_encipherment=current_key_usage.key_encipherment,
+ data_encipherment=current_key_usage.data_encipherment,
+ key_agreement=current_key_usage.key_agreement,
+ key_cert_sign=current_key_usage.key_cert_sign,
+ crl_sign=current_key_usage.crl_sign,
+ encipher_only=False,
+ decipher_only=False,
+ )
+ if key_usage['key_agreement']:
+ key_usage.update(dict(
+ encipher_only=current_key_usage.encipher_only,
+ decipher_only=current_key_usage.decipher_only
+ ))
+
+ key_usage_names = dict(
+ digital_signature='Digital Signature',
+ content_commitment='Non Repudiation',
+ key_encipherment='Key Encipherment',
+ data_encipherment='Data Encipherment',
+ key_agreement='Key Agreement',
+ key_cert_sign='Certificate Sign',
+ crl_sign='CRL Sign',
+ encipher_only='Encipher Only',
+ decipher_only='Decipher Only',
+ )
+ return sorted([
+ key_usage_names[name] for name, value in key_usage.items() if value
+ ]), current_key_ext.critical
+ except cryptography.x509.ExtensionNotFound:
+ return None, False
+
+ def _get_extended_key_usage(self):
+ try:
+ ext_keyusage_ext = self.cert.extensions.get_extension_for_class(x509.ExtendedKeyUsage)
+ return sorted([
+ crypto_utils.cryptography_oid_to_name(eku) for eku in ext_keyusage_ext.value
+ ]), ext_keyusage_ext.critical
+ except cryptography.x509.ExtensionNotFound:
+ return None, False
+
+ def _get_basic_constraints(self):
+ try:
+ ext_keyusage_ext = self.cert.extensions.get_extension_for_class(x509.BasicConstraints)
+ result = []
+ result.append('CA:{0}'.format('TRUE' if ext_keyusage_ext.value.ca else 'FALSE'))
+ if ext_keyusage_ext.value.path_length is not None:
+ result.append('pathlen:{0}'.format(ext_keyusage_ext.value.path_length))
+ return sorted(result), ext_keyusage_ext.critical
+ except cryptography.x509.ExtensionNotFound:
+ return None, False
+
+ def _get_ocsp_must_staple(self):
+ try:
+ try:
+ # This only works with cryptography >= 2.1
+ tlsfeature_ext = self.cert.extensions.get_extension_for_class(x509.TLSFeature)
+ value = cryptography.x509.TLSFeatureType.status_request in tlsfeature_ext.value
+ except AttributeError as dummy:
+ # Fallback for cryptography < 2.1
+ oid = x509.oid.ObjectIdentifier("1.3.6.1.5.5.7.1.24")
+ tlsfeature_ext = self.cert.extensions.get_extension_for_oid(oid)
+ value = tlsfeature_ext.value.value == b"\x30\x03\x02\x01\x05"
+ return value, tlsfeature_ext.critical
+ except cryptography.x509.ExtensionNotFound:
+ return None, False
+
+ def _get_subject_alt_name(self):
+ try:
+ san_ext = self.cert.extensions.get_extension_for_class(x509.SubjectAlternativeName)
+ result = [crypto_utils.cryptography_decode_name(san) for san in san_ext.value]
+ return result, san_ext.critical
+ except cryptography.x509.ExtensionNotFound:
+ return None, False
+
+ def _get_not_before(self):
+ return self.cert.not_valid_before
+
+ def _get_not_after(self):
+ return self.cert.not_valid_after
+
+ def _get_public_key(self, binary):
+ return self.cert.public_key().public_bytes(
+ serialization.Encoding.DER if binary else serialization.Encoding.PEM,
+ serialization.PublicFormat.SubjectPublicKeyInfo
+ )
+
+ def _get_subject_key_identifier(self):
+ try:
+ ext = self.cert.extensions.get_extension_for_class(x509.SubjectKeyIdentifier)
+ return ext.value.digest
+ except cryptography.x509.ExtensionNotFound:
+ return None
+
+ def _get_authority_key_identifier(self):
+ try:
+ ext = self.cert.extensions.get_extension_for_class(x509.AuthorityKeyIdentifier)
+ issuer = None
+ if ext.value.authority_cert_issuer is not None:
+ issuer = [crypto_utils.cryptography_decode_name(san) for san in ext.value.authority_cert_issuer]
+ return ext.value.key_identifier, issuer, ext.value.authority_cert_serial_number
+ except cryptography.x509.ExtensionNotFound:
+ return None, None, None
+
+ def _get_serial_number(self):
+ return self.cert.serial_number
+
+ def _get_all_extensions(self):
+ return crypto_utils.cryptography_get_extensions_from_cert(self.cert)
+
+ def _get_ocsp_uri(self):
+ try:
+ ext = self.cert.extensions.get_extension_for_class(x509.AuthorityInformationAccess)
+ for desc in ext.value:
+ if desc.access_method == x509.oid.AuthorityInformationAccessOID.OCSP:
+ if isinstance(desc.access_location, x509.UniformResourceIdentifier):
+ return desc.access_location.value
+ except x509.ExtensionNotFound as dummy:
+ pass
+ return None
+
+
+class CertificateInfoPyOpenSSL(CertificateInfo):
+ """validate the supplied certificate."""
+
+ def __init__(self, module):
+ super(CertificateInfoPyOpenSSL, self).__init__(module, 'pyopenssl')
+
+ def _get_signature_algorithm(self):
+ return to_text(self.cert.get_signature_algorithm())
+
+ def __get_name(self, name):
+ result = []
+ for sub in name.get_components():
+ result.append([crypto_utils.pyopenssl_normalize_name(sub[0]), to_text(sub[1])])
+ return result
+
+ def _get_subject_ordered(self):
+ return self.__get_name(self.cert.get_subject())
+
+ def _get_issuer_ordered(self):
+ return self.__get_name(self.cert.get_issuer())
+
+ def _get_version(self):
+ # Version numbers in certs are off by one:
+ # v1: 0, v2: 1, v3: 2 ...
+ return self.cert.get_version() + 1
+
+ def _get_extension(self, short_name):
+ for extension_idx in range(0, self.cert.get_extension_count()):
+ extension = self.cert.get_extension(extension_idx)
+ if extension.get_short_name() == short_name:
+ result = [
+ crypto_utils.pyopenssl_normalize_name(usage.strip()) for usage in to_text(extension, errors='surrogate_or_strict').split(',')
+ ]
+ return sorted(result), bool(extension.get_critical())
+ return None, False
+
+ def _get_key_usage(self):
+ return self._get_extension(b'keyUsage')
+
+ def _get_extended_key_usage(self):
+ return self._get_extension(b'extendedKeyUsage')
+
+ def _get_basic_constraints(self):
+ return self._get_extension(b'basicConstraints')
+
+ def _get_ocsp_must_staple(self):
+ extensions = [self.cert.get_extension(i) for i in range(0, self.cert.get_extension_count())]
+ oms_ext = [
+ ext for ext in extensions
+ if to_bytes(ext.get_short_name()) == OPENSSL_MUST_STAPLE_NAME and to_bytes(ext) == OPENSSL_MUST_STAPLE_VALUE
+ ]
+ if OpenSSL.SSL.OPENSSL_VERSION_NUMBER < 0x10100000:
+ # Older versions of libssl don't know about OCSP Must Staple
+ oms_ext.extend([ext for ext in extensions if ext.get_short_name() == b'UNDEF' and ext.get_data() == b'\x30\x03\x02\x01\x05'])
+ if oms_ext:
+ return True, bool(oms_ext[0].get_critical())
+ else:
+ return None, False
+
+ def _normalize_san(self, san):
+ if san.startswith('IP Address:'):
+ san = 'IP:' + san[len('IP Address:'):]
+ if san.startswith('IP:'):
+ ip = compat_ipaddress.ip_address(san[3:])
+ san = 'IP:{0}'.format(ip.compressed)
+ return san
+
+ def _get_subject_alt_name(self):
+ for extension_idx in range(0, self.cert.get_extension_count()):
+ extension = self.cert.get_extension(extension_idx)
+ if extension.get_short_name() == b'subjectAltName':
+ result = [self._normalize_san(altname.strip()) for altname in
+ to_text(extension, errors='surrogate_or_strict').split(', ')]
+ return result, bool(extension.get_critical())
+ return None, False
+
+ def _get_not_before(self):
+ time_string = to_native(self.cert.get_notBefore())
+ return datetime.datetime.strptime(time_string, "%Y%m%d%H%M%SZ")
+
+ def _get_not_after(self):
+ time_string = to_native(self.cert.get_notAfter())
+ return datetime.datetime.strptime(time_string, "%Y%m%d%H%M%SZ")
+
+ def _get_public_key(self, binary):
+ try:
+ return crypto.dump_publickey(
+ crypto.FILETYPE_ASN1 if binary else crypto.FILETYPE_PEM,
+ self.cert.get_pubkey()
+ )
+ except AttributeError:
+ try:
+ # pyOpenSSL < 16.0:
+ bio = crypto._new_mem_buf()
+ if binary:
+ rc = crypto._lib.i2d_PUBKEY_bio(bio, self.cert.get_pubkey()._pkey)
+ else:
+ rc = crypto._lib.PEM_write_bio_PUBKEY(bio, self.cert.get_pubkey()._pkey)
+ if rc != 1:
+ crypto._raise_current_error()
+ return crypto._bio_to_string(bio)
+ except AttributeError:
+ self.module.warn('Your pyOpenSSL version does not support dumping public keys. '
+ 'Please upgrade to version 16.0 or newer, or use the cryptography backend.')
+
+ def _get_subject_key_identifier(self):
+ # Won't be implemented
+ return None
+
+ def _get_authority_key_identifier(self):
+ # Won't be implemented
+ return None, None, None
+
+ def _get_serial_number(self):
+ return self.cert.get_serial_number()
+
+ def _get_all_extensions(self):
+ return crypto_utils.pyopenssl_get_extensions_from_cert(self.cert)
+
+ def _get_ocsp_uri(self):
+ for i in range(self.cert.get_extension_count()):
+ ext = self.cert.get_extension(i)
+ if ext.get_short_name() == b'authorityInfoAccess':
+ v = str(ext)
+ m = re.search('^OCSP - URI:(.*)$', v, flags=re.MULTILINE)
+ if m:
+ return m.group(1)
+ return None
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ path=dict(type='path'),
+ content=dict(type='str'),
+ valid_at=dict(type='dict'),
+ select_crypto_backend=dict(type='str', default='auto', choices=['auto', 'cryptography', 'pyopenssl']),
+ ),
+ required_one_of=(
+ ['path', 'content'],
+ ),
+ mutually_exclusive=(
+ ['path', 'content'],
+ ),
+ supports_check_mode=True,
+ )
+
+ try:
+ if module.params['path'] is not None:
+ base_dir = os.path.dirname(module.params['path']) or '.'
+ if not os.path.isdir(base_dir):
+ module.fail_json(
+ name=base_dir,
+ msg='The directory %s does not exist or the file is not a directory' % base_dir
+ )
+
+ backend = module.params['select_crypto_backend']
+ if backend == 'auto':
+ # Detect what backend we can use
+ can_use_cryptography = CRYPTOGRAPHY_FOUND and CRYPTOGRAPHY_VERSION >= LooseVersion(MINIMAL_CRYPTOGRAPHY_VERSION)
+ can_use_pyopenssl = PYOPENSSL_FOUND and PYOPENSSL_VERSION >= LooseVersion(MINIMAL_PYOPENSSL_VERSION)
+
+ # If cryptography is available we'll use it
+ if can_use_cryptography:
+ backend = 'cryptography'
+ elif can_use_pyopenssl:
+ backend = 'pyopenssl'
+
+ # Fail if no backend has been found
+ if backend == 'auto':
+ module.fail_json(msg=("Can't detect any of the required Python libraries "
+ "cryptography (>= {0}) or PyOpenSSL (>= {1})").format(
+ MINIMAL_CRYPTOGRAPHY_VERSION,
+ MINIMAL_PYOPENSSL_VERSION))
+
+ if backend == 'pyopenssl':
+ if not PYOPENSSL_FOUND:
+ module.fail_json(msg=missing_required_lib('pyOpenSSL >= {0}'.format(MINIMAL_PYOPENSSL_VERSION)),
+ exception=PYOPENSSL_IMP_ERR)
+ try:
+ getattr(crypto.X509Req, 'get_extensions')
+ except AttributeError:
+ module.fail_json(msg='You need to have PyOpenSSL>=0.15')
+
+ module.deprecate('The module is using the PyOpenSSL backend. This backend has been deprecated', version='2.13')
+ certificate = CertificateInfoPyOpenSSL(module)
+ elif backend == 'cryptography':
+ if not CRYPTOGRAPHY_FOUND:
+ module.fail_json(msg=missing_required_lib('cryptography >= {0}'.format(MINIMAL_CRYPTOGRAPHY_VERSION)),
+ exception=CRYPTOGRAPHY_IMP_ERR)
+ certificate = CertificateInfoCryptography(module)
+
+ result = certificate.get_info()
+ module.exit_json(**result)
+ except crypto_utils.OpenSSLObjectError as exc:
+ module.fail_json(msg=to_native(exc))
+
+
+if __name__ == "__main__":
+ main()
diff --git a/test/support/integration/plugins/modules/openssl_csr.py b/test/support/integration/plugins/modules/openssl_csr.py
new file mode 100644
index 0000000000..ea2cf68c2a
--- /dev/null
+++ b/test/support/integration/plugins/modules/openssl_csr.py
@@ -0,0 +1,1159 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyrigt: (c) 2017, Yanis Guenane <yanis+ansible@guenane.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+module: openssl_csr
+version_added: '2.4'
+short_description: Generate OpenSSL Certificate Signing Request (CSR)
+description:
+ - This module allows one to (re)generate OpenSSL certificate signing requests.
+ - It uses the pyOpenSSL python library to interact with openssl. This module supports
+ the subjectAltName, keyUsage, extendedKeyUsage, basicConstraints and OCSP Must Staple
+ extensions.
+ - "Please note that the module regenerates existing CSR if it doesn't match the module's
+ options, or if it seems to be corrupt. If you are concerned that this could overwrite
+ your existing CSR, consider using the I(backup) option."
+ - The module can use the cryptography Python library, or the pyOpenSSL Python
+ library. By default, it tries to detect which one is available. This can be
+ overridden with the I(select_crypto_backend) option. Please note that the
+ PyOpenSSL backend was deprecated in Ansible 2.9 and will be removed in Ansible 2.13."
+requirements:
+ - Either cryptography >= 1.3
+ - Or pyOpenSSL >= 0.15
+author:
+- Yanis Guenane (@Spredzy)
+options:
+ state:
+ description:
+ - Whether the certificate signing request should exist or not, taking action if the state is different from what is stated.
+ type: str
+ default: present
+ choices: [ absent, present ]
+ digest:
+ description:
+ - The digest used when signing the certificate signing request with the private key.
+ type: str
+ default: sha256
+ privatekey_path:
+ description:
+ - The path to the private key to use when signing the certificate signing request.
+ - Either I(privatekey_path) or I(privatekey_content) must be specified if I(state) is C(present), but not both.
+ type: path
+ privatekey_content:
+ description:
+ - The content of the private key to use when signing the certificate signing request.
+ - Either I(privatekey_path) or I(privatekey_content) must be specified if I(state) is C(present), but not both.
+ type: str
+ version_added: "2.10"
+ privatekey_passphrase:
+ description:
+ - The passphrase for the private key.
+ - This is required if the private key is password protected.
+ type: str
+ version:
+ description:
+ - The version of the certificate signing request.
+ - "The only allowed value according to L(RFC 2986,https://tools.ietf.org/html/rfc2986#section-4.1)
+ is 1."
+ - This option will no longer accept unsupported values from Ansible 2.14 on.
+ type: int
+ default: 1
+ force:
+ description:
+ - Should the certificate signing request be forced regenerated by this ansible module.
+ type: bool
+ default: no
+ path:
+ description:
+ - The name of the file into which the generated OpenSSL certificate signing request will be written.
+ type: path
+ required: true
+ subject:
+ description:
+ - Key/value pairs that will be present in the subject name field of the certificate signing request.
+ - If you need to specify more than one value with the same key, use a list as value.
+ type: dict
+ version_added: '2.5'
+ country_name:
+ description:
+ - The countryName field of the certificate signing request subject.
+ type: str
+ aliases: [ C, countryName ]
+ state_or_province_name:
+ description:
+ - The stateOrProvinceName field of the certificate signing request subject.
+ type: str
+ aliases: [ ST, stateOrProvinceName ]
+ locality_name:
+ description:
+ - The localityName field of the certificate signing request subject.
+ type: str
+ aliases: [ L, localityName ]
+ organization_name:
+ description:
+ - The organizationName field of the certificate signing request subject.
+ type: str
+ aliases: [ O, organizationName ]
+ organizational_unit_name:
+ description:
+ - The organizationalUnitName field of the certificate signing request subject.
+ type: str
+ aliases: [ OU, organizationalUnitName ]
+ common_name:
+ description:
+ - The commonName field of the certificate signing request subject.
+ type: str
+ aliases: [ CN, commonName ]
+ email_address:
+ description:
+ - The emailAddress field of the certificate signing request subject.
+ type: str
+ aliases: [ E, emailAddress ]
+ subject_alt_name:
+ description:
+ - SAN extension to attach to the certificate signing request.
+ - This can either be a 'comma separated string' or a YAML list.
+ - Values must be prefixed by their options. (i.e., C(email), C(URI), C(DNS), C(RID), C(IP), C(dirName),
+ C(otherName) and the ones specific to your CA)
+ - Note that if no SAN is specified, but a common name, the common
+ name will be added as a SAN except if C(useCommonNameForSAN) is
+ set to I(false).
+ - More at U(https://tools.ietf.org/html/rfc5280#section-4.2.1.6).
+ type: list
+ elements: str
+ aliases: [ subjectAltName ]
+ subject_alt_name_critical:
+ description:
+ - Should the subjectAltName extension be considered as critical.
+ type: bool
+ aliases: [ subjectAltName_critical ]
+ use_common_name_for_san:
+ description:
+ - If set to C(yes), the module will fill the common name in for
+ C(subject_alt_name) with C(DNS:) prefix if no SAN is specified.
+ type: bool
+ default: yes
+ version_added: '2.8'
+ aliases: [ useCommonNameForSAN ]
+ key_usage:
+ description:
+ - This defines the purpose (e.g. encipherment, signature, certificate signing)
+ of the key contained in the certificate.
+ type: list
+ elements: str
+ aliases: [ keyUsage ]
+ key_usage_critical:
+ description:
+ - Should the keyUsage extension be considered as critical.
+ type: bool
+ aliases: [ keyUsage_critical ]
+ extended_key_usage:
+ description:
+ - Additional restrictions (e.g. client authentication, server authentication)
+ on the allowed purposes for which the public key may be used.
+ type: list
+ elements: str
+ aliases: [ extKeyUsage, extendedKeyUsage ]
+ extended_key_usage_critical:
+ description:
+ - Should the extkeyUsage extension be considered as critical.
+ type: bool
+ aliases: [ extKeyUsage_critical, extendedKeyUsage_critical ]
+ basic_constraints:
+ description:
+ - Indicates basic constraints, such as if the certificate is a CA.
+ type: list
+ elements: str
+ version_added: '2.5'
+ aliases: [ basicConstraints ]
+ basic_constraints_critical:
+ description:
+ - Should the basicConstraints extension be considered as critical.
+ type: bool
+ version_added: '2.5'
+ aliases: [ basicConstraints_critical ]
+ ocsp_must_staple:
+ description:
+ - Indicates that the certificate should contain the OCSP Must Staple
+ extension (U(https://tools.ietf.org/html/rfc7633)).
+ type: bool
+ version_added: '2.5'
+ aliases: [ ocspMustStaple ]
+ ocsp_must_staple_critical:
+ description:
+ - Should the OCSP Must Staple extension be considered as critical
+ - Note that according to the RFC, this extension should not be marked
+ as critical, as old clients not knowing about OCSP Must Staple
+ are required to reject such certificates
+ (see U(https://tools.ietf.org/html/rfc7633#section-4)).
+ type: bool
+ version_added: '2.5'
+ aliases: [ ocspMustStaple_critical ]
+ select_crypto_backend:
+ description:
+ - Determines which crypto backend to use.
+ - The default choice is C(auto), which tries to use C(cryptography) if available, and falls back to C(pyopenssl).
+ - If set to C(pyopenssl), will try to use the L(pyOpenSSL,https://pypi.org/project/pyOpenSSL/) library.
+ - If set to C(cryptography), will try to use the L(cryptography,https://cryptography.io/) library.
+ - Please note that the C(pyopenssl) backend has been deprecated in Ansible 2.9, and will be removed in Ansible 2.13.
+ From that point on, only the C(cryptography) backend will be available.
+ type: str
+ default: auto
+ choices: [ auto, cryptography, pyopenssl ]
+ version_added: '2.8'
+ backup:
+ description:
+ - Create a backup file including a timestamp so you can get the original
+ CSR back if you overwrote it with a new one by accident.
+ type: bool
+ default: no
+ version_added: "2.8"
+ create_subject_key_identifier:
+ description:
+ - Create the Subject Key Identifier from the public key.
+ - "Please note that commercial CAs can ignore the value, respectively use a value of
+ their own choice instead. Specifying this option is mostly useful for self-signed
+ certificates or for own CAs."
+ - Note that this is only supported if the C(cryptography) backend is used!
+ type: bool
+ default: no
+ version_added: "2.9"
+ subject_key_identifier:
+ description:
+ - The subject key identifier as a hex string, where two bytes are separated by colons.
+ - "Example: C(00:11:22:33:44:55:66:77:88:99:aa:bb:cc:dd:ee:ff:00:11:22:33)"
+ - "Please note that commercial CAs ignore this value, respectively use a value of their
+ own choice. Specifying this option is mostly useful for self-signed certificates
+ or for own CAs."
+ - Note that this option can only be used if I(create_subject_key_identifier) is C(no).
+ - Note that this is only supported if the C(cryptography) backend is used!
+ type: str
+ version_added: "2.9"
+ authority_key_identifier:
+ description:
+ - The authority key identifier as a hex string, where two bytes are separated by colons.
+ - "Example: C(00:11:22:33:44:55:66:77:88:99:aa:bb:cc:dd:ee:ff:00:11:22:33)"
+ - If specified, I(authority_cert_issuer) must also be specified.
+ - "Please note that commercial CAs ignore this value, respectively use a value of their
+ own choice. Specifying this option is mostly useful for self-signed certificates
+ or for own CAs."
+ - Note that this is only supported if the C(cryptography) backend is used!
+ - The C(AuthorityKeyIdentifier) will only be added if at least one of I(authority_key_identifier),
+ I(authority_cert_issuer) and I(authority_cert_serial_number) is specified.
+ type: str
+ version_added: "2.9"
+ authority_cert_issuer:
+ description:
+ - Names that will be present in the authority cert issuer field of the certificate signing request.
+ - Values must be prefixed by their options. (i.e., C(email), C(URI), C(DNS), C(RID), C(IP), C(dirName),
+ C(otherName) and the ones specific to your CA)
+ - "Example: C(DNS:ca.example.org)"
+ - If specified, I(authority_key_identifier) must also be specified.
+ - "Please note that commercial CAs ignore this value, respectively use a value of their
+ own choice. Specifying this option is mostly useful for self-signed certificates
+ or for own CAs."
+ - Note that this is only supported if the C(cryptography) backend is used!
+ - The C(AuthorityKeyIdentifier) will only be added if at least one of I(authority_key_identifier),
+ I(authority_cert_issuer) and I(authority_cert_serial_number) is specified.
+ type: list
+ elements: str
+ version_added: "2.9"
+ authority_cert_serial_number:
+ description:
+ - The authority cert serial number.
+ - Note that this is only supported if the C(cryptography) backend is used!
+ - "Please note that commercial CAs ignore this value, respectively use a value of their
+ own choice. Specifying this option is mostly useful for self-signed certificates
+ or for own CAs."
+ - The C(AuthorityKeyIdentifier) will only be added if at least one of I(authority_key_identifier),
+ I(authority_cert_issuer) and I(authority_cert_serial_number) is specified.
+ type: int
+ version_added: "2.9"
+ return_content:
+ description:
+ - If set to C(yes), will return the (current or generated) CSR's content as I(csr).
+ type: bool
+ default: no
+ version_added: "2.10"
+extends_documentation_fragment:
+- files
+notes:
+ - If the certificate signing request already exists it will be checked whether subjectAltName,
+ keyUsage, extendedKeyUsage and basicConstraints only contain the requested values, whether
+ OCSP Must Staple is as requested, and if the request was signed by the given private key.
+seealso:
+- module: openssl_certificate
+- module: openssl_dhparam
+- module: openssl_pkcs12
+- module: openssl_privatekey
+- module: openssl_publickey
+'''
+
+EXAMPLES = r'''
+- name: Generate an OpenSSL Certificate Signing Request
+ openssl_csr:
+ path: /etc/ssl/csr/www.ansible.com.csr
+ privatekey_path: /etc/ssl/private/ansible.com.pem
+ common_name: www.ansible.com
+
+- name: Generate an OpenSSL Certificate Signing Request with an inline key
+ openssl_csr:
+ path: /etc/ssl/csr/www.ansible.com.csr
+ privatekey_content: "{{ private_key_content }}"
+ common_name: www.ansible.com
+
+- name: Generate an OpenSSL Certificate Signing Request with a passphrase protected private key
+ openssl_csr:
+ path: /etc/ssl/csr/www.ansible.com.csr
+ privatekey_path: /etc/ssl/private/ansible.com.pem
+ privatekey_passphrase: ansible
+ common_name: www.ansible.com
+
+- name: Generate an OpenSSL Certificate Signing Request with Subject information
+ openssl_csr:
+ path: /etc/ssl/csr/www.ansible.com.csr
+ privatekey_path: /etc/ssl/private/ansible.com.pem
+ country_name: FR
+ organization_name: Ansible
+ email_address: jdoe@ansible.com
+ common_name: www.ansible.com
+
+- name: Generate an OpenSSL Certificate Signing Request with subjectAltName extension
+ openssl_csr:
+ path: /etc/ssl/csr/www.ansible.com.csr
+ privatekey_path: /etc/ssl/private/ansible.com.pem
+ subject_alt_name: 'DNS:www.ansible.com,DNS:m.ansible.com'
+
+- name: Generate an OpenSSL CSR with subjectAltName extension with dynamic list
+ openssl_csr:
+ path: /etc/ssl/csr/www.ansible.com.csr
+ privatekey_path: /etc/ssl/private/ansible.com.pem
+ subject_alt_name: "{{ item.value | map('regex_replace', '^', 'DNS:') | list }}"
+ with_dict:
+ dns_server:
+ - www.ansible.com
+ - m.ansible.com
+
+- name: Force regenerate an OpenSSL Certificate Signing Request
+ openssl_csr:
+ path: /etc/ssl/csr/www.ansible.com.csr
+ privatekey_path: /etc/ssl/private/ansible.com.pem
+ force: yes
+ common_name: www.ansible.com
+
+- name: Generate an OpenSSL Certificate Signing Request with special key usages
+ openssl_csr:
+ path: /etc/ssl/csr/www.ansible.com.csr
+ privatekey_path: /etc/ssl/private/ansible.com.pem
+ common_name: www.ansible.com
+ key_usage:
+ - digitalSignature
+ - keyAgreement
+ extended_key_usage:
+ - clientAuth
+
+- name: Generate an OpenSSL Certificate Signing Request with OCSP Must Staple
+ openssl_csr:
+ path: /etc/ssl/csr/www.ansible.com.csr
+ privatekey_path: /etc/ssl/private/ansible.com.pem
+ common_name: www.ansible.com
+ ocsp_must_staple: yes
+'''
+
+RETURN = r'''
+privatekey:
+ description:
+ - Path to the TLS/SSL private key the CSR was generated for
+ - Will be C(none) if the private key has been provided in I(privatekey_content).
+ returned: changed or success
+ type: str
+ sample: /etc/ssl/private/ansible.com.pem
+filename:
+ description: Path to the generated Certificate Signing Request
+ returned: changed or success
+ type: str
+ sample: /etc/ssl/csr/www.ansible.com.csr
+subject:
+ description: A list of the subject tuples attached to the CSR
+ returned: changed or success
+ type: list
+ elements: list
+ sample: "[('CN', 'www.ansible.com'), ('O', 'Ansible')]"
+subjectAltName:
+ description: The alternative names this CSR is valid for
+ returned: changed or success
+ type: list
+ elements: str
+ sample: [ 'DNS:www.ansible.com', 'DNS:m.ansible.com' ]
+keyUsage:
+ description: Purpose for which the public key may be used
+ returned: changed or success
+ type: list
+ elements: str
+ sample: [ 'digitalSignature', 'keyAgreement' ]
+extendedKeyUsage:
+ description: Additional restriction on the public key purposes
+ returned: changed or success
+ type: list
+ elements: str
+ sample: [ 'clientAuth' ]
+basicConstraints:
+ description: Indicates if the certificate belongs to a CA
+ returned: changed or success
+ type: list
+ elements: str
+ sample: ['CA:TRUE', 'pathLenConstraint:0']
+ocsp_must_staple:
+ description: Indicates whether the certificate has the OCSP
+ Must Staple feature enabled
+ returned: changed or success
+ type: bool
+ sample: false
+backup_file:
+ description: Name of backup file created.
+ returned: changed and if I(backup) is C(yes)
+ type: str
+ sample: /path/to/www.ansible.com.csr.2019-03-09@11:22~
+csr:
+ description: The (current or generated) CSR's content.
+ returned: if I(state) is C(present) and I(return_content) is C(yes)
+ type: str
+ version_added: "2.10"
+'''
+
+import abc
+import binascii
+import os
+import traceback
+from distutils.version import LooseVersion
+
+from ansible.module_utils import crypto as crypto_utils
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native, to_bytes, to_text
+from ansible.module_utils.compat import ipaddress as compat_ipaddress
+
+MINIMAL_PYOPENSSL_VERSION = '0.15'
+MINIMAL_CRYPTOGRAPHY_VERSION = '1.3'
+
+PYOPENSSL_IMP_ERR = None
+try:
+ import OpenSSL
+ from OpenSSL import crypto
+ PYOPENSSL_VERSION = LooseVersion(OpenSSL.__version__)
+except ImportError:
+ PYOPENSSL_IMP_ERR = traceback.format_exc()
+ PYOPENSSL_FOUND = False
+else:
+ PYOPENSSL_FOUND = True
+ if OpenSSL.SSL.OPENSSL_VERSION_NUMBER >= 0x10100000:
+ # OpenSSL 1.1.0 or newer
+ OPENSSL_MUST_STAPLE_NAME = b"tlsfeature"
+ OPENSSL_MUST_STAPLE_VALUE = b"status_request"
+ else:
+ # OpenSSL 1.0.x or older
+ OPENSSL_MUST_STAPLE_NAME = b"1.3.6.1.5.5.7.1.24"
+ OPENSSL_MUST_STAPLE_VALUE = b"DER:30:03:02:01:05"
+
+CRYPTOGRAPHY_IMP_ERR = None
+try:
+ import cryptography
+ import cryptography.x509
+ import cryptography.x509.oid
+ import cryptography.exceptions
+ import cryptography.hazmat.backends
+ import cryptography.hazmat.primitives.serialization
+ import cryptography.hazmat.primitives.hashes
+ CRYPTOGRAPHY_VERSION = LooseVersion(cryptography.__version__)
+except ImportError:
+ CRYPTOGRAPHY_IMP_ERR = traceback.format_exc()
+ CRYPTOGRAPHY_FOUND = False
+else:
+ CRYPTOGRAPHY_FOUND = True
+ CRYPTOGRAPHY_MUST_STAPLE_NAME = cryptography.x509.oid.ObjectIdentifier("1.3.6.1.5.5.7.1.24")
+ CRYPTOGRAPHY_MUST_STAPLE_VALUE = b"\x30\x03\x02\x01\x05"
+
+
+class CertificateSigningRequestError(crypto_utils.OpenSSLObjectError):
+ pass
+
+
+class CertificateSigningRequestBase(crypto_utils.OpenSSLObject):
+
+ def __init__(self, module):
+ super(CertificateSigningRequestBase, self).__init__(
+ module.params['path'],
+ module.params['state'],
+ module.params['force'],
+ module.check_mode
+ )
+ self.digest = module.params['digest']
+ self.privatekey_path = module.params['privatekey_path']
+ self.privatekey_content = module.params['privatekey_content']
+ if self.privatekey_content is not None:
+ self.privatekey_content = self.privatekey_content.encode('utf-8')
+ self.privatekey_passphrase = module.params['privatekey_passphrase']
+ self.version = module.params['version']
+ self.subjectAltName = module.params['subject_alt_name']
+ self.subjectAltName_critical = module.params['subject_alt_name_critical']
+ self.keyUsage = module.params['key_usage']
+ self.keyUsage_critical = module.params['key_usage_critical']
+ self.extendedKeyUsage = module.params['extended_key_usage']
+ self.extendedKeyUsage_critical = module.params['extended_key_usage_critical']
+ self.basicConstraints = module.params['basic_constraints']
+ self.basicConstraints_critical = module.params['basic_constraints_critical']
+ self.ocspMustStaple = module.params['ocsp_must_staple']
+ self.ocspMustStaple_critical = module.params['ocsp_must_staple_critical']
+ self.create_subject_key_identifier = module.params['create_subject_key_identifier']
+ self.subject_key_identifier = module.params['subject_key_identifier']
+ self.authority_key_identifier = module.params['authority_key_identifier']
+ self.authority_cert_issuer = module.params['authority_cert_issuer']
+ self.authority_cert_serial_number = module.params['authority_cert_serial_number']
+ self.request = None
+ self.privatekey = None
+ self.csr_bytes = None
+ self.return_content = module.params['return_content']
+
+ if self.create_subject_key_identifier and self.subject_key_identifier is not None:
+ module.fail_json(msg='subject_key_identifier cannot be specified if create_subject_key_identifier is true')
+
+ self.backup = module.params['backup']
+ self.backup_file = None
+
+ self.subject = [
+ ('C', module.params['country_name']),
+ ('ST', module.params['state_or_province_name']),
+ ('L', module.params['locality_name']),
+ ('O', module.params['organization_name']),
+ ('OU', module.params['organizational_unit_name']),
+ ('CN', module.params['common_name']),
+ ('emailAddress', module.params['email_address']),
+ ]
+
+ if module.params['subject']:
+ self.subject = self.subject + crypto_utils.parse_name_field(module.params['subject'])
+ self.subject = [(entry[0], entry[1]) for entry in self.subject if entry[1]]
+
+ if not self.subjectAltName and module.params['use_common_name_for_san']:
+ for sub in self.subject:
+ if sub[0] in ('commonName', 'CN'):
+ self.subjectAltName = ['DNS:%s' % sub[1]]
+ break
+
+ if self.subject_key_identifier is not None:
+ try:
+ self.subject_key_identifier = binascii.unhexlify(self.subject_key_identifier.replace(':', ''))
+ except Exception as e:
+ raise CertificateSigningRequestError('Cannot parse subject_key_identifier: {0}'.format(e))
+
+ if self.authority_key_identifier is not None:
+ try:
+ self.authority_key_identifier = binascii.unhexlify(self.authority_key_identifier.replace(':', ''))
+ except Exception as e:
+ raise CertificateSigningRequestError('Cannot parse authority_key_identifier: {0}'.format(e))
+
+ @abc.abstractmethod
+ def _generate_csr(self):
+ pass
+
+ def generate(self, module):
+ '''Generate the certificate signing request.'''
+ if not self.check(module, perms_required=False) or self.force:
+ result = self._generate_csr()
+ if self.backup:
+ self.backup_file = module.backup_local(self.path)
+ if self.return_content:
+ self.csr_bytes = result
+ crypto_utils.write_file(module, result)
+ self.changed = True
+
+ file_args = module.load_file_common_arguments(module.params)
+ if module.set_fs_attributes_if_different(file_args, False):
+ self.changed = True
+
+ @abc.abstractmethod
+ def _load_private_key(self):
+ pass
+
+ @abc.abstractmethod
+ def _check_csr(self):
+ pass
+
+ def check(self, module, perms_required=True):
+ """Ensure the resource is in its desired state."""
+ state_and_perms = super(CertificateSigningRequestBase, self).check(module, perms_required)
+
+ self._load_private_key()
+
+ if not state_and_perms:
+ return False
+
+ return self._check_csr()
+
+ def remove(self, module):
+ if self.backup:
+ self.backup_file = module.backup_local(self.path)
+ super(CertificateSigningRequestBase, self).remove(module)
+
+ def dump(self):
+ '''Serialize the object into a dictionary.'''
+
+ result = {
+ 'privatekey': self.privatekey_path,
+ 'filename': self.path,
+ 'subject': self.subject,
+ 'subjectAltName': self.subjectAltName,
+ 'keyUsage': self.keyUsage,
+ 'extendedKeyUsage': self.extendedKeyUsage,
+ 'basicConstraints': self.basicConstraints,
+ 'ocspMustStaple': self.ocspMustStaple,
+ 'changed': self.changed
+ }
+ if self.backup_file:
+ result['backup_file'] = self.backup_file
+ if self.return_content:
+ if self.csr_bytes is None:
+ self.csr_bytes = crypto_utils.load_file_if_exists(self.path, ignore_errors=True)
+ result['csr'] = self.csr_bytes.decode('utf-8') if self.csr_bytes else None
+
+ return result
+
+
+class CertificateSigningRequestPyOpenSSL(CertificateSigningRequestBase):
+
+ def __init__(self, module):
+ if module.params['create_subject_key_identifier']:
+ module.fail_json(msg='You cannot use create_subject_key_identifier with the pyOpenSSL backend!')
+ for o in ('subject_key_identifier', 'authority_key_identifier', 'authority_cert_issuer', 'authority_cert_serial_number'):
+ if module.params[o] is not None:
+ module.fail_json(msg='You cannot use {0} with the pyOpenSSL backend!'.format(o))
+ super(CertificateSigningRequestPyOpenSSL, self).__init__(module)
+
+ def _generate_csr(self):
+ req = crypto.X509Req()
+ req.set_version(self.version - 1)
+ subject = req.get_subject()
+ for entry in self.subject:
+ if entry[1] is not None:
+ # Workaround for https://github.com/pyca/pyopenssl/issues/165
+ nid = OpenSSL._util.lib.OBJ_txt2nid(to_bytes(entry[0]))
+ if nid == 0:
+ raise CertificateSigningRequestError('Unknown subject field identifier "{0}"'.format(entry[0]))
+ res = OpenSSL._util.lib.X509_NAME_add_entry_by_NID(subject._name, nid, OpenSSL._util.lib.MBSTRING_UTF8, to_bytes(entry[1]), -1, -1, 0)
+ if res == 0:
+ raise CertificateSigningRequestError('Invalid value for subject field identifier "{0}": {1}'.format(entry[0], entry[1]))
+
+ extensions = []
+ if self.subjectAltName:
+ altnames = ', '.join(self.subjectAltName)
+ try:
+ extensions.append(crypto.X509Extension(b"subjectAltName", self.subjectAltName_critical, altnames.encode('ascii')))
+ except OpenSSL.crypto.Error as e:
+ raise CertificateSigningRequestError(
+ 'Error while parsing Subject Alternative Names {0} (check for missing type prefix, such as "DNS:"!): {1}'.format(
+ ', '.join(["{0}".format(san) for san in self.subjectAltName]), str(e)
+ )
+ )
+
+ if self.keyUsage:
+ usages = ', '.join(self.keyUsage)
+ extensions.append(crypto.X509Extension(b"keyUsage", self.keyUsage_critical, usages.encode('ascii')))
+
+ if self.extendedKeyUsage:
+ usages = ', '.join(self.extendedKeyUsage)
+ extensions.append(crypto.X509Extension(b"extendedKeyUsage", self.extendedKeyUsage_critical, usages.encode('ascii')))
+
+ if self.basicConstraints:
+ usages = ', '.join(self.basicConstraints)
+ extensions.append(crypto.X509Extension(b"basicConstraints", self.basicConstraints_critical, usages.encode('ascii')))
+
+ if self.ocspMustStaple:
+ extensions.append(crypto.X509Extension(OPENSSL_MUST_STAPLE_NAME, self.ocspMustStaple_critical, OPENSSL_MUST_STAPLE_VALUE))
+
+ if extensions:
+ req.add_extensions(extensions)
+
+ req.set_pubkey(self.privatekey)
+ req.sign(self.privatekey, self.digest)
+ self.request = req
+
+ return crypto.dump_certificate_request(crypto.FILETYPE_PEM, self.request)
+
+ def _load_private_key(self):
+ try:
+ self.privatekey = crypto_utils.load_privatekey(
+ path=self.privatekey_path,
+ content=self.privatekey_content,
+ passphrase=self.privatekey_passphrase
+ )
+ except crypto_utils.OpenSSLBadPassphraseError as exc:
+ raise CertificateSigningRequestError(exc)
+
+ def _normalize_san(self, san):
+ # Apparently OpenSSL returns 'IP address' not 'IP' as specifier when converting the subjectAltName to string
+ # although it won't accept this specifier when generating the CSR. (https://github.com/openssl/openssl/issues/4004)
+ if san.startswith('IP Address:'):
+ san = 'IP:' + san[len('IP Address:'):]
+ if san.startswith('IP:'):
+ ip = compat_ipaddress.ip_address(san[3:])
+ san = 'IP:{0}'.format(ip.compressed)
+ return san
+
+ def _check_csr(self):
+ def _check_subject(csr):
+ subject = [(OpenSSL._util.lib.OBJ_txt2nid(to_bytes(sub[0])), to_bytes(sub[1])) for sub in self.subject]
+ current_subject = [(OpenSSL._util.lib.OBJ_txt2nid(to_bytes(sub[0])), to_bytes(sub[1])) for sub in csr.get_subject().get_components()]
+ if not set(subject) == set(current_subject):
+ return False
+
+ return True
+
+ def _check_subjectAltName(extensions):
+ altnames_ext = next((ext for ext in extensions if ext.get_short_name() == b'subjectAltName'), '')
+ altnames = [self._normalize_san(altname.strip()) for altname in
+ to_text(altnames_ext, errors='surrogate_or_strict').split(',') if altname.strip()]
+ if self.subjectAltName:
+ if (set(altnames) != set([self._normalize_san(to_text(name)) for name in self.subjectAltName]) or
+ altnames_ext.get_critical() != self.subjectAltName_critical):
+ return False
+ else:
+ if altnames:
+ return False
+
+ return True
+
+ def _check_keyUsage_(extensions, extName, expected, critical):
+ usages_ext = [ext for ext in extensions if ext.get_short_name() == extName]
+ if (not usages_ext and expected) or (usages_ext and not expected):
+ return False
+ elif not usages_ext and not expected:
+ return True
+ else:
+ current = [OpenSSL._util.lib.OBJ_txt2nid(to_bytes(usage.strip())) for usage in str(usages_ext[0]).split(',')]
+ expected = [OpenSSL._util.lib.OBJ_txt2nid(to_bytes(usage)) for usage in expected]
+ return set(current) == set(expected) and usages_ext[0].get_critical() == critical
+
+ def _check_keyUsage(extensions):
+ usages_ext = [ext for ext in extensions if ext.get_short_name() == b'keyUsage']
+ if (not usages_ext and self.keyUsage) or (usages_ext and not self.keyUsage):
+ return False
+ elif not usages_ext and not self.keyUsage:
+ return True
+ else:
+ # OpenSSL._util.lib.OBJ_txt2nid() always returns 0 for all keyUsage values
+ # (since keyUsage has a fixed bitfield for these values and is not extensible).
+ # Therefore, we create an extension for the wanted values, and compare the
+ # data of the extensions (which is the serialized bitfield).
+ expected_ext = crypto.X509Extension(b"keyUsage", False, ', '.join(self.keyUsage).encode('ascii'))
+ return usages_ext[0].get_data() == expected_ext.get_data() and usages_ext[0].get_critical() == self.keyUsage_critical
+
+ def _check_extenededKeyUsage(extensions):
+ return _check_keyUsage_(extensions, b'extendedKeyUsage', self.extendedKeyUsage, self.extendedKeyUsage_critical)
+
+ def _check_basicConstraints(extensions):
+ return _check_keyUsage_(extensions, b'basicConstraints', self.basicConstraints, self.basicConstraints_critical)
+
+ def _check_ocspMustStaple(extensions):
+ oms_ext = [ext for ext in extensions if to_bytes(ext.get_short_name()) == OPENSSL_MUST_STAPLE_NAME and to_bytes(ext) == OPENSSL_MUST_STAPLE_VALUE]
+ if OpenSSL.SSL.OPENSSL_VERSION_NUMBER < 0x10100000:
+ # Older versions of libssl don't know about OCSP Must Staple
+ oms_ext.extend([ext for ext in extensions if ext.get_short_name() == b'UNDEF' and ext.get_data() == b'\x30\x03\x02\x01\x05'])
+ if self.ocspMustStaple:
+ return len(oms_ext) > 0 and oms_ext[0].get_critical() == self.ocspMustStaple_critical
+ else:
+ return len(oms_ext) == 0
+
+ def _check_extensions(csr):
+ extensions = csr.get_extensions()
+ return (_check_subjectAltName(extensions) and _check_keyUsage(extensions) and
+ _check_extenededKeyUsage(extensions) and _check_basicConstraints(extensions) and
+ _check_ocspMustStaple(extensions))
+
+ def _check_signature(csr):
+ try:
+ return csr.verify(self.privatekey)
+ except crypto.Error:
+ return False
+
+ try:
+ csr = crypto_utils.load_certificate_request(self.path, backend='pyopenssl')
+ except Exception as dummy:
+ return False
+
+ return _check_subject(csr) and _check_extensions(csr) and _check_signature(csr)
+
+
+class CertificateSigningRequestCryptography(CertificateSigningRequestBase):
+
+ def __init__(self, module):
+ super(CertificateSigningRequestCryptography, self).__init__(module)
+ self.cryptography_backend = cryptography.hazmat.backends.default_backend()
+ self.module = module
+ if self.version != 1:
+ module.warn('The cryptography backend only supports version 1. (The only valid value according to RFC 2986.)')
+
+ def _generate_csr(self):
+ csr = cryptography.x509.CertificateSigningRequestBuilder()
+ try:
+ csr = csr.subject_name(cryptography.x509.Name([
+ cryptography.x509.NameAttribute(crypto_utils.cryptography_name_to_oid(entry[0]), to_text(entry[1])) for entry in self.subject
+ ]))
+ except ValueError as e:
+ raise CertificateSigningRequestError(e)
+
+ if self.subjectAltName:
+ csr = csr.add_extension(cryptography.x509.SubjectAlternativeName([
+ crypto_utils.cryptography_get_name(name) for name in self.subjectAltName
+ ]), critical=self.subjectAltName_critical)
+
+ if self.keyUsage:
+ params = crypto_utils.cryptography_parse_key_usage_params(self.keyUsage)
+ csr = csr.add_extension(cryptography.x509.KeyUsage(**params), critical=self.keyUsage_critical)
+
+ if self.extendedKeyUsage:
+ usages = [crypto_utils.cryptography_name_to_oid(usage) for usage in self.extendedKeyUsage]
+ csr = csr.add_extension(cryptography.x509.ExtendedKeyUsage(usages), critical=self.extendedKeyUsage_critical)
+
+ if self.basicConstraints:
+ params = {}
+ ca, path_length = crypto_utils.cryptography_get_basic_constraints(self.basicConstraints)
+ csr = csr.add_extension(cryptography.x509.BasicConstraints(ca, path_length), critical=self.basicConstraints_critical)
+
+ if self.ocspMustStaple:
+ try:
+ # This only works with cryptography >= 2.1
+ csr = csr.add_extension(cryptography.x509.TLSFeature([cryptography.x509.TLSFeatureType.status_request]), critical=self.ocspMustStaple_critical)
+ except AttributeError as dummy:
+ csr = csr.add_extension(
+ cryptography.x509.UnrecognizedExtension(CRYPTOGRAPHY_MUST_STAPLE_NAME, CRYPTOGRAPHY_MUST_STAPLE_VALUE),
+ critical=self.ocspMustStaple_critical
+ )
+
+ if self.create_subject_key_identifier:
+ csr = csr.add_extension(
+ cryptography.x509.SubjectKeyIdentifier.from_public_key(self.privatekey.public_key()),
+ critical=False
+ )
+ elif self.subject_key_identifier is not None:
+ csr = csr.add_extension(cryptography.x509.SubjectKeyIdentifier(self.subject_key_identifier), critical=False)
+
+ if self.authority_key_identifier is not None or self.authority_cert_issuer is not None or self.authority_cert_serial_number is not None:
+ issuers = None
+ if self.authority_cert_issuer is not None:
+ issuers = [crypto_utils.cryptography_get_name(n) for n in self.authority_cert_issuer]
+ csr = csr.add_extension(
+ cryptography.x509.AuthorityKeyIdentifier(self.authority_key_identifier, issuers, self.authority_cert_serial_number),
+ critical=False
+ )
+
+ digest = None
+ if crypto_utils.cryptography_key_needs_digest_for_signing(self.privatekey):
+ if self.digest == 'sha256':
+ digest = cryptography.hazmat.primitives.hashes.SHA256()
+ elif self.digest == 'sha384':
+ digest = cryptography.hazmat.primitives.hashes.SHA384()
+ elif self.digest == 'sha512':
+ digest = cryptography.hazmat.primitives.hashes.SHA512()
+ elif self.digest == 'sha1':
+ digest = cryptography.hazmat.primitives.hashes.SHA1()
+ elif self.digest == 'md5':
+ digest = cryptography.hazmat.primitives.hashes.MD5()
+ # FIXME
+ else:
+ raise CertificateSigningRequestError('Unsupported digest "{0}"'.format(self.digest))
+ try:
+ self.request = csr.sign(self.privatekey, digest, self.cryptography_backend)
+ except TypeError as e:
+ if str(e) == 'Algorithm must be a registered hash algorithm.' and digest is None:
+ self.module.fail_json(msg='Signing with Ed25519 and Ed448 keys requires cryptography 2.8 or newer.')
+ raise
+
+ return self.request.public_bytes(cryptography.hazmat.primitives.serialization.Encoding.PEM)
+
+ def _load_private_key(self):
+ try:
+ if self.privatekey_content is not None:
+ content = self.privatekey_content
+ else:
+ with open(self.privatekey_path, 'rb') as f:
+ content = f.read()
+ self.privatekey = cryptography.hazmat.primitives.serialization.load_pem_private_key(
+ content,
+ None if self.privatekey_passphrase is None else to_bytes(self.privatekey_passphrase),
+ backend=self.cryptography_backend
+ )
+ except Exception as e:
+ raise CertificateSigningRequestError(e)
+
+ def _check_csr(self):
+ def _check_subject(csr):
+ subject = [(crypto_utils.cryptography_name_to_oid(entry[0]), entry[1]) for entry in self.subject]
+ current_subject = [(sub.oid, sub.value) for sub in csr.subject]
+ return set(subject) == set(current_subject)
+
+ def _find_extension(extensions, exttype):
+ return next(
+ (ext for ext in extensions if isinstance(ext.value, exttype)),
+ None
+ )
+
+ def _check_subjectAltName(extensions):
+ current_altnames_ext = _find_extension(extensions, cryptography.x509.SubjectAlternativeName)
+ current_altnames = [str(altname) for altname in current_altnames_ext.value] if current_altnames_ext else []
+ altnames = [str(crypto_utils.cryptography_get_name(altname)) for altname in self.subjectAltName] if self.subjectAltName else []
+ if set(altnames) != set(current_altnames):
+ return False
+ if altnames:
+ if current_altnames_ext.critical != self.subjectAltName_critical:
+ return False
+ return True
+
+ def _check_keyUsage(extensions):
+ current_keyusage_ext = _find_extension(extensions, cryptography.x509.KeyUsage)
+ if not self.keyUsage:
+ return current_keyusage_ext is None
+ elif current_keyusage_ext is None:
+ return False
+ params = crypto_utils.cryptography_parse_key_usage_params(self.keyUsage)
+ for param in params:
+ if getattr(current_keyusage_ext.value, '_' + param) != params[param]:
+ return False
+ if current_keyusage_ext.critical != self.keyUsage_critical:
+ return False
+ return True
+
+ def _check_extenededKeyUsage(extensions):
+ current_usages_ext = _find_extension(extensions, cryptography.x509.ExtendedKeyUsage)
+ current_usages = [str(usage) for usage in current_usages_ext.value] if current_usages_ext else []
+ usages = [str(crypto_utils.cryptography_name_to_oid(usage)) for usage in self.extendedKeyUsage] if self.extendedKeyUsage else []
+ if set(current_usages) != set(usages):
+ return False
+ if usages:
+ if current_usages_ext.critical != self.extendedKeyUsage_critical:
+ return False
+ return True
+
+ def _check_basicConstraints(extensions):
+ bc_ext = _find_extension(extensions, cryptography.x509.BasicConstraints)
+ current_ca = bc_ext.value.ca if bc_ext else False
+ current_path_length = bc_ext.value.path_length if bc_ext else None
+ ca, path_length = crypto_utils.cryptography_get_basic_constraints(self.basicConstraints)
+ # Check CA flag
+ if ca != current_ca:
+ return False
+ # Check path length
+ if path_length != current_path_length:
+ return False
+ # Check criticality
+ if self.basicConstraints:
+ if bc_ext.critical != self.basicConstraints_critical:
+ return False
+ return True
+
+ def _check_ocspMustStaple(extensions):
+ try:
+ # This only works with cryptography >= 2.1
+ tlsfeature_ext = _find_extension(extensions, cryptography.x509.TLSFeature)
+ has_tlsfeature = True
+ except AttributeError as dummy:
+ tlsfeature_ext = next(
+ (ext for ext in extensions if ext.value.oid == CRYPTOGRAPHY_MUST_STAPLE_NAME),
+ None
+ )
+ has_tlsfeature = False
+ if self.ocspMustStaple:
+ if not tlsfeature_ext or tlsfeature_ext.critical != self.ocspMustStaple_critical:
+ return False
+ if has_tlsfeature:
+ return cryptography.x509.TLSFeatureType.status_request in tlsfeature_ext.value
+ else:
+ return tlsfeature_ext.value.value == CRYPTOGRAPHY_MUST_STAPLE_VALUE
+ else:
+ return tlsfeature_ext is None
+
+ def _check_subject_key_identifier(extensions):
+ ext = _find_extension(extensions, cryptography.x509.SubjectKeyIdentifier)
+ if self.create_subject_key_identifier or self.subject_key_identifier is not None:
+ if not ext or ext.critical:
+ return False
+ if self.create_subject_key_identifier:
+ digest = cryptography.x509.SubjectKeyIdentifier.from_public_key(self.privatekey.public_key()).digest
+ return ext.value.digest == digest
+ else:
+ return ext.value.digest == self.subject_key_identifier
+ else:
+ return ext is None
+
+ def _check_authority_key_identifier(extensions):
+ ext = _find_extension(extensions, cryptography.x509.AuthorityKeyIdentifier)
+ if self.authority_key_identifier is not None or self.authority_cert_issuer is not None or self.authority_cert_serial_number is not None:
+ if not ext or ext.critical:
+ return False
+ aci = None
+ csr_aci = None
+ if self.authority_cert_issuer is not None:
+ aci = [str(crypto_utils.cryptography_get_name(n)) for n in self.authority_cert_issuer]
+ if ext.value.authority_cert_issuer is not None:
+ csr_aci = [str(n) for n in ext.value.authority_cert_issuer]
+ return (ext.value.key_identifier == self.authority_key_identifier
+ and csr_aci == aci
+ and ext.value.authority_cert_serial_number == self.authority_cert_serial_number)
+ else:
+ return ext is None
+
+ def _check_extensions(csr):
+ extensions = csr.extensions
+ return (_check_subjectAltName(extensions) and _check_keyUsage(extensions) and
+ _check_extenededKeyUsage(extensions) and _check_basicConstraints(extensions) and
+ _check_ocspMustStaple(extensions) and _check_subject_key_identifier(extensions) and
+ _check_authority_key_identifier(extensions))
+
+ def _check_signature(csr):
+ if not csr.is_signature_valid:
+ return False
+ # To check whether public key of CSR belongs to private key,
+ # encode both public keys and compare PEMs.
+ key_a = csr.public_key().public_bytes(
+ cryptography.hazmat.primitives.serialization.Encoding.PEM,
+ cryptography.hazmat.primitives.serialization.PublicFormat.SubjectPublicKeyInfo
+ )
+ key_b = self.privatekey.public_key().public_bytes(
+ cryptography.hazmat.primitives.serialization.Encoding.PEM,
+ cryptography.hazmat.primitives.serialization.PublicFormat.SubjectPublicKeyInfo
+ )
+ return key_a == key_b
+
+ try:
+ csr = crypto_utils.load_certificate_request(self.path, backend='cryptography')
+ except Exception as dummy:
+ return False
+
+ return _check_subject(csr) and _check_extensions(csr) and _check_signature(csr)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ digest=dict(type='str', default='sha256'),
+ privatekey_path=dict(type='path'),
+ privatekey_content=dict(type='str'),
+ privatekey_passphrase=dict(type='str', no_log=True),
+ version=dict(type='int', default=1),
+ force=dict(type='bool', default=False),
+ path=dict(type='path', required=True),
+ subject=dict(type='dict'),
+ country_name=dict(type='str', aliases=['C', 'countryName']),
+ state_or_province_name=dict(type='str', aliases=['ST', 'stateOrProvinceName']),
+ locality_name=dict(type='str', aliases=['L', 'localityName']),
+ organization_name=dict(type='str', aliases=['O', 'organizationName']),
+ organizational_unit_name=dict(type='str', aliases=['OU', 'organizationalUnitName']),
+ common_name=dict(type='str', aliases=['CN', 'commonName']),
+ email_address=dict(type='str', aliases=['E', 'emailAddress']),
+ subject_alt_name=dict(type='list', elements='str', aliases=['subjectAltName']),
+ subject_alt_name_critical=dict(type='bool', default=False, aliases=['subjectAltName_critical']),
+ use_common_name_for_san=dict(type='bool', default=True, aliases=['useCommonNameForSAN']),
+ key_usage=dict(type='list', elements='str', aliases=['keyUsage']),
+ key_usage_critical=dict(type='bool', default=False, aliases=['keyUsage_critical']),
+ extended_key_usage=dict(type='list', elements='str', aliases=['extKeyUsage', 'extendedKeyUsage']),
+ extended_key_usage_critical=dict(type='bool', default=False, aliases=['extKeyUsage_critical', 'extendedKeyUsage_critical']),
+ basic_constraints=dict(type='list', elements='str', aliases=['basicConstraints']),
+ basic_constraints_critical=dict(type='bool', default=False, aliases=['basicConstraints_critical']),
+ ocsp_must_staple=dict(type='bool', default=False, aliases=['ocspMustStaple']),
+ ocsp_must_staple_critical=dict(type='bool', default=False, aliases=['ocspMustStaple_critical']),
+ backup=dict(type='bool', default=False),
+ create_subject_key_identifier=dict(type='bool', default=False),
+ subject_key_identifier=dict(type='str'),
+ authority_key_identifier=dict(type='str'),
+ authority_cert_issuer=dict(type='list', elements='str'),
+ authority_cert_serial_number=dict(type='int'),
+ select_crypto_backend=dict(type='str', default='auto', choices=['auto', 'cryptography', 'pyopenssl']),
+ return_content=dict(type='bool', default=False),
+ ),
+ required_together=[('authority_cert_issuer', 'authority_cert_serial_number')],
+ required_if=[('state', 'present', ['privatekey_path', 'privatekey_content'], True)],
+ mutually_exclusive=(
+ ['privatekey_path', 'privatekey_content'],
+ ),
+ add_file_common_args=True,
+ supports_check_mode=True,
+ )
+
+ if module.params['version'] != 1:
+ module.deprecate('The version option will only support allowed values from Ansible 2.14 on. '
+ 'Currently, only the value 1 is allowed by RFC 2986', version='2.14')
+
+ base_dir = os.path.dirname(module.params['path']) or '.'
+ if not os.path.isdir(base_dir):
+ module.fail_json(name=base_dir, msg='The directory %s does not exist or the file is not a directory' % base_dir)
+
+ backend = module.params['select_crypto_backend']
+ if backend == 'auto':
+ # Detection what is possible
+ can_use_cryptography = CRYPTOGRAPHY_FOUND and CRYPTOGRAPHY_VERSION >= LooseVersion(MINIMAL_CRYPTOGRAPHY_VERSION)
+ can_use_pyopenssl = PYOPENSSL_FOUND and PYOPENSSL_VERSION >= LooseVersion(MINIMAL_PYOPENSSL_VERSION)
+
+ # First try cryptography, then pyOpenSSL
+ if can_use_cryptography:
+ backend = 'cryptography'
+ elif can_use_pyopenssl:
+ backend = 'pyopenssl'
+
+ # Success?
+ if backend == 'auto':
+ module.fail_json(msg=("Can't detect any of the required Python libraries "
+ "cryptography (>= {0}) or PyOpenSSL (>= {1})").format(
+ MINIMAL_CRYPTOGRAPHY_VERSION,
+ MINIMAL_PYOPENSSL_VERSION))
+ try:
+ if backend == 'pyopenssl':
+ if not PYOPENSSL_FOUND:
+ module.fail_json(msg=missing_required_lib('pyOpenSSL >= {0}'.format(MINIMAL_PYOPENSSL_VERSION)),
+ exception=PYOPENSSL_IMP_ERR)
+ try:
+ getattr(crypto.X509Req, 'get_extensions')
+ except AttributeError:
+ module.fail_json(msg='You need to have PyOpenSSL>=0.15 to generate CSRs')
+
+ module.deprecate('The module is using the PyOpenSSL backend. This backend has been deprecated', version='2.13')
+ csr = CertificateSigningRequestPyOpenSSL(module)
+ elif backend == 'cryptography':
+ if not CRYPTOGRAPHY_FOUND:
+ module.fail_json(msg=missing_required_lib('cryptography >= {0}'.format(MINIMAL_CRYPTOGRAPHY_VERSION)),
+ exception=CRYPTOGRAPHY_IMP_ERR)
+ csr = CertificateSigningRequestCryptography(module)
+
+ if module.params['state'] == 'present':
+ if module.check_mode:
+ result = csr.dump()
+ result['changed'] = module.params['force'] or not csr.check(module)
+ module.exit_json(**result)
+
+ csr.generate(module)
+
+ else:
+ if module.check_mode:
+ result = csr.dump()
+ result['changed'] = os.path.exists(module.params['path'])
+ module.exit_json(**result)
+
+ csr.remove(module)
+
+ result = csr.dump()
+ module.exit_json(**result)
+ except crypto_utils.OpenSSLObjectError as exc:
+ module.fail_json(msg=to_native(exc))
+
+
+if __name__ == "__main__":
+ main()
diff --git a/test/support/integration/plugins/modules/openssl_privatekey.py b/test/support/integration/plugins/modules/openssl_privatekey.py
new file mode 100644
index 0000000000..2fdfdab10c
--- /dev/null
+++ b/test/support/integration/plugins/modules/openssl_privatekey.py
@@ -0,0 +1,943 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Yanis Guenane <yanis+ansible@guenane.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+module: openssl_privatekey
+version_added: "2.3"
+short_description: Generate OpenSSL private keys
+description:
+ - This module allows one to (re)generate OpenSSL private keys.
+ - One can generate L(RSA,https://en.wikipedia.org/wiki/RSA_%28cryptosystem%29),
+ L(DSA,https://en.wikipedia.org/wiki/Digital_Signature_Algorithm),
+ L(ECC,https://en.wikipedia.org/wiki/Elliptic-curve_cryptography) or
+ L(EdDSA,https://en.wikipedia.org/wiki/EdDSA) private keys.
+ - Keys are generated in PEM format.
+ - "Please note that the module regenerates private keys if they don't match
+ the module's options. In particular, if you provide another passphrase
+ (or specify none), change the keysize, etc., the private key will be
+ regenerated. If you are concerned that this could **overwrite your private key**,
+ consider using the I(backup) option."
+ - The module can use the cryptography Python library, or the pyOpenSSL Python
+ library. By default, it tries to detect which one is available. This can be
+ overridden with the I(select_crypto_backend) option. Please note that the
+ PyOpenSSL backend was deprecated in Ansible 2.9 and will be removed in Ansible 2.13."
+requirements:
+ - Either cryptography >= 1.2.3 (older versions might work as well)
+ - Or pyOpenSSL
+author:
+ - Yanis Guenane (@Spredzy)
+ - Felix Fontein (@felixfontein)
+options:
+ state:
+ description:
+ - Whether the private key should exist or not, taking action if the state is different from what is stated.
+ type: str
+ default: present
+ choices: [ absent, present ]
+ size:
+ description:
+ - Size (in bits) of the TLS/SSL key to generate.
+ type: int
+ default: 4096
+ type:
+ description:
+ - The algorithm used to generate the TLS/SSL private key.
+ - Note that C(ECC), C(X25519), C(X448), C(Ed25519) and C(Ed448) require the C(cryptography) backend.
+ C(X25519) needs cryptography 2.5 or newer, while C(X448), C(Ed25519) and C(Ed448) require
+ cryptography 2.6 or newer. For C(ECC), the minimal cryptography version required depends on the
+ I(curve) option.
+ type: str
+ default: RSA
+ choices: [ DSA, ECC, Ed25519, Ed448, RSA, X25519, X448 ]
+ curve:
+ description:
+ - Note that not all curves are supported by all versions of C(cryptography).
+ - For maximal interoperability, C(secp384r1) or C(secp256r1) should be used.
+ - We use the curve names as defined in the
+ L(IANA registry for TLS,https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#tls-parameters-8).
+ type: str
+ choices:
+ - secp384r1
+ - secp521r1
+ - secp224r1
+ - secp192r1
+ - secp256r1
+ - secp256k1
+ - brainpoolP256r1
+ - brainpoolP384r1
+ - brainpoolP512r1
+ - sect571k1
+ - sect409k1
+ - sect283k1
+ - sect233k1
+ - sect163k1
+ - sect571r1
+ - sect409r1
+ - sect283r1
+ - sect233r1
+ - sect163r2
+ version_added: "2.8"
+ force:
+ description:
+ - Should the key be regenerated even if it already exists.
+ type: bool
+ default: no
+ path:
+ description:
+ - Name of the file in which the generated TLS/SSL private key will be written. It will have 0600 mode.
+ type: path
+ required: true
+ passphrase:
+ description:
+ - The passphrase for the private key.
+ type: str
+ version_added: "2.4"
+ cipher:
+ description:
+ - The cipher to encrypt the private key. (Valid values can be found by
+ running `openssl list -cipher-algorithms` or `openssl list-cipher-algorithms`,
+ depending on your OpenSSL version.)
+ - When using the C(cryptography) backend, use C(auto).
+ type: str
+ version_added: "2.4"
+ select_crypto_backend:
+ description:
+ - Determines which crypto backend to use.
+ - The default choice is C(auto), which tries to use C(cryptography) if available, and falls back to C(pyopenssl).
+ - If set to C(pyopenssl), will try to use the L(pyOpenSSL,https://pypi.org/project/pyOpenSSL/) library.
+ - If set to C(cryptography), will try to use the L(cryptography,https://cryptography.io/) library.
+ - Please note that the C(pyopenssl) backend has been deprecated in Ansible 2.9, and will be removed in Ansible 2.13.
+ From that point on, only the C(cryptography) backend will be available.
+ type: str
+ default: auto
+ choices: [ auto, cryptography, pyopenssl ]
+ version_added: "2.8"
+ format:
+ description:
+ - Determines which format the private key is written in. By default, PKCS1 (traditional OpenSSL format)
+ is used for all keys which support it. Please note that not every key can be exported in any format.
+ - The value C(auto) selects a fromat based on the key format. The value C(auto_ignore) does the same,
+ but for existing private key files, it will not force a regenerate when its format is not the automatically
+ selected one for generation.
+ - Note that if the format for an existing private key mismatches, the key is *regenerated* by default.
+ To change this behavior, use the I(format_mismatch) option.
+ - The I(format) option is only supported by the C(cryptography) backend. The C(pyopenssl) backend will
+ fail if a value different from C(auto_ignore) is used.
+ type: str
+ default: auto_ignore
+ choices: [ pkcs1, pkcs8, raw, auto, auto_ignore ]
+ version_added: "2.10"
+ format_mismatch:
+ description:
+ - Determines behavior of the module if the format of a private key does not match the expected format, but all
+ other parameters are as expected.
+ - If set to C(regenerate) (default), generates a new private key.
+ - If set to C(convert), the key will be converted to the new format instead.
+ - Only supported by the C(cryptography) backend.
+ type: str
+ default: regenerate
+ choices: [ regenerate, convert ]
+ version_added: "2.10"
+ backup:
+ description:
+ - Create a backup file including a timestamp so you can get
+ the original private key back if you overwrote it with a new one by accident.
+ type: bool
+ default: no
+ version_added: "2.8"
+ return_content:
+ description:
+ - If set to C(yes), will return the (current or generated) private key's content as I(privatekey).
+ - Note that especially if the private key is not encrypted, you have to make sure that the returned
+ value is treated appropriately and not accidentally written to logs etc.! Use with care!
+ type: bool
+ default: no
+ version_added: "2.10"
+ regenerate:
+ description:
+ - Allows to configure in which situations the module is allowed to regenerate private keys.
+ The module will always generate a new key if the destination file does not exist.
+ - By default, the key will be regenerated when it doesn't match the module's options,
+ except when the key cannot be read or the passphrase does not match. Please note that
+ this B(changed) for Ansible 2.10. For Ansible 2.9, the behavior was as if C(full_idempotence)
+ is specified.
+ - If set to C(never), the module will fail if the key cannot be read or the passphrase
+ isn't matching, and will never regenerate an existing key.
+ - If set to C(fail), the module will fail if the key does not correspond to the module's
+ options.
+ - If set to C(partial_idempotence), the key will be regenerated if it does not conform to
+ the module's options. The key is B(not) regenerated if it cannot be read (broken file),
+ the key is protected by an unknown passphrase, or when they key is not protected by a
+ passphrase, but a passphrase is specified.
+ - If set to C(full_idempotence), the key will be regenerated if it does not conform to the
+ module's options. This is also the case if the key cannot be read (broken file), the key
+ is protected by an unknown passphrase, or when they key is not protected by a passphrase,
+ but a passphrase is specified. Make sure you have a B(backup) when using this option!
+ - If set to C(always), the module will always regenerate the key. This is equivalent to
+ setting I(force) to C(yes).
+ - Note that if I(format_mismatch) is set to C(convert) and everything matches except the
+ format, the key will always be converted, except if I(regenerate) is set to C(always).
+ type: str
+ choices:
+ - never
+ - fail
+ - partial_idempotence
+ - full_idempotence
+ - always
+ default: full_idempotence
+ version_added: '2.10'
+extends_documentation_fragment:
+- files
+seealso:
+- module: openssl_certificate
+- module: openssl_csr
+- module: openssl_dhparam
+- module: openssl_pkcs12
+- module: openssl_publickey
+'''
+
+EXAMPLES = r'''
+- name: Generate an OpenSSL private key with the default values (4096 bits, RSA)
+ openssl_privatekey:
+ path: /etc/ssl/private/ansible.com.pem
+
+- name: Generate an OpenSSL private key with the default values (4096 bits, RSA) and a passphrase
+ openssl_privatekey:
+ path: /etc/ssl/private/ansible.com.pem
+ passphrase: ansible
+ cipher: aes256
+
+- name: Generate an OpenSSL private key with a different size (2048 bits)
+ openssl_privatekey:
+ path: /etc/ssl/private/ansible.com.pem
+ size: 2048
+
+- name: Force regenerate an OpenSSL private key if it already exists
+ openssl_privatekey:
+ path: /etc/ssl/private/ansible.com.pem
+ force: yes
+
+- name: Generate an OpenSSL private key with a different algorithm (DSA)
+ openssl_privatekey:
+ path: /etc/ssl/private/ansible.com.pem
+ type: DSA
+'''
+
+RETURN = r'''
+size:
+ description: Size (in bits) of the TLS/SSL private key.
+ returned: changed or success
+ type: int
+ sample: 4096
+type:
+ description: Algorithm used to generate the TLS/SSL private key.
+ returned: changed or success
+ type: str
+ sample: RSA
+curve:
+ description: Elliptic curve used to generate the TLS/SSL private key.
+ returned: changed or success, and I(type) is C(ECC)
+ type: str
+ sample: secp256r1
+filename:
+ description: Path to the generated TLS/SSL private key file.
+ returned: changed or success
+ type: str
+ sample: /etc/ssl/private/ansible.com.pem
+fingerprint:
+ description:
+ - The fingerprint of the public key. Fingerprint will be generated for each C(hashlib.algorithms) available.
+ - The PyOpenSSL backend requires PyOpenSSL >= 16.0 for meaningful output.
+ returned: changed or success
+ type: dict
+ sample:
+ md5: "84:75:71:72:8d:04:b5:6c:4d:37:6d:66:83:f5:4c:29"
+ sha1: "51:cc:7c:68:5d:eb:41:43:88:7e:1a:ae:c7:f8:24:72:ee:71:f6:10"
+ sha224: "b1:19:a6:6c:14:ac:33:1d:ed:18:50:d3:06:5c:b2:32:91:f1:f1:52:8c:cb:d5:75:e9:f5:9b:46"
+ sha256: "41:ab:c7:cb:d5:5f:30:60:46:99:ac:d4:00:70:cf:a1:76:4f:24:5d:10:24:57:5d:51:6e:09:97:df:2f:de:c7"
+ sha384: "85:39:50:4e:de:d9:19:33:40:70:ae:10:ab:59:24:19:51:c3:a2:e4:0b:1c:b1:6e:dd:b3:0c:d9:9e:6a:46:af:da:18:f8:ef:ae:2e:c0:9a:75:2c:9b:b3:0f:3a:5f:3d"
+ sha512: "fd:ed:5e:39:48:5f:9f:fe:7f:25:06:3f:79:08:cd:ee:a5:e7:b3:3d:13:82:87:1f:84:e1:f5:c7:28:77:53:94:86:56:38:69:f0:d9:35:22:01:1e:a6:60:...:0f:9b"
+backup_file:
+ description: Name of backup file created.
+ returned: changed and if I(backup) is C(yes)
+ type: str
+ sample: /path/to/privatekey.pem.2019-03-09@11:22~
+privatekey:
+ description:
+ - The (current or generated) private key's content.
+ - Will be Base64-encoded if the key is in raw format.
+ returned: if I(state) is C(present) and I(return_content) is C(yes)
+ type: str
+ version_added: "2.10"
+'''
+
+import abc
+import base64
+import os
+import traceback
+from distutils.version import LooseVersion
+
+MINIMAL_PYOPENSSL_VERSION = '0.6'
+MINIMAL_CRYPTOGRAPHY_VERSION = '1.2.3'
+
+PYOPENSSL_IMP_ERR = None
+try:
+ import OpenSSL
+ from OpenSSL import crypto
+ PYOPENSSL_VERSION = LooseVersion(OpenSSL.__version__)
+except ImportError:
+ PYOPENSSL_IMP_ERR = traceback.format_exc()
+ PYOPENSSL_FOUND = False
+else:
+ PYOPENSSL_FOUND = True
+
+CRYPTOGRAPHY_IMP_ERR = None
+try:
+ import cryptography
+ import cryptography.exceptions
+ import cryptography.hazmat.backends
+ import cryptography.hazmat.primitives.serialization
+ import cryptography.hazmat.primitives.asymmetric.rsa
+ import cryptography.hazmat.primitives.asymmetric.dsa
+ import cryptography.hazmat.primitives.asymmetric.ec
+ import cryptography.hazmat.primitives.asymmetric.utils
+ CRYPTOGRAPHY_VERSION = LooseVersion(cryptography.__version__)
+except ImportError:
+ CRYPTOGRAPHY_IMP_ERR = traceback.format_exc()
+ CRYPTOGRAPHY_FOUND = False
+else:
+ CRYPTOGRAPHY_FOUND = True
+
+from ansible.module_utils.crypto import (
+ CRYPTOGRAPHY_HAS_X25519,
+ CRYPTOGRAPHY_HAS_X25519_FULL,
+ CRYPTOGRAPHY_HAS_X448,
+ CRYPTOGRAPHY_HAS_ED25519,
+ CRYPTOGRAPHY_HAS_ED448,
+)
+
+from ansible.module_utils import crypto as crypto_utils
+from ansible.module_utils._text import to_native, to_bytes
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+class PrivateKeyError(crypto_utils.OpenSSLObjectError):
+ pass
+
+
+class PrivateKeyBase(crypto_utils.OpenSSLObject):
+
+ def __init__(self, module):
+ super(PrivateKeyBase, self).__init__(
+ module.params['path'],
+ module.params['state'],
+ module.params['force'],
+ module.check_mode
+ )
+ self.size = module.params['size']
+ self.passphrase = module.params['passphrase']
+ self.cipher = module.params['cipher']
+ self.privatekey = None
+ self.fingerprint = {}
+ self.format = module.params['format']
+ self.format_mismatch = module.params['format_mismatch']
+ self.privatekey_bytes = None
+ self.return_content = module.params['return_content']
+ self.regenerate = module.params['regenerate']
+ if self.regenerate == 'always':
+ self.force = True
+
+ self.backup = module.params['backup']
+ self.backup_file = None
+
+ if module.params['mode'] is None:
+ module.params['mode'] = '0600'
+
+ @abc.abstractmethod
+ def _generate_private_key(self):
+ """(Re-)Generate private key."""
+ pass
+
+ @abc.abstractmethod
+ def _ensure_private_key_loaded(self):
+ """Make sure that the private key has been loaded."""
+ pass
+
+ @abc.abstractmethod
+ def _get_private_key_data(self):
+ """Return bytes for self.privatekey"""
+ pass
+
+ @abc.abstractmethod
+ def _get_fingerprint(self):
+ pass
+
+ def generate(self, module):
+ """Generate a keypair."""
+
+ if not self.check(module, perms_required=False, ignore_conversion=True) or self.force:
+ # Regenerate
+ if self.backup:
+ self.backup_file = module.backup_local(self.path)
+ self._generate_private_key()
+ privatekey_data = self._get_private_key_data()
+ if self.return_content:
+ self.privatekey_bytes = privatekey_data
+ crypto_utils.write_file(module, privatekey_data, 0o600)
+ self.changed = True
+ elif not self.check(module, perms_required=False, ignore_conversion=False):
+ # Convert
+ if self.backup:
+ self.backup_file = module.backup_local(self.path)
+ self._ensure_private_key_loaded()
+ privatekey_data = self._get_private_key_data()
+ if self.return_content:
+ self.privatekey_bytes = privatekey_data
+ crypto_utils.write_file(module, privatekey_data, 0o600)
+ self.changed = True
+
+ self.fingerprint = self._get_fingerprint()
+ file_args = module.load_file_common_arguments(module.params)
+ if module.set_fs_attributes_if_different(file_args, False):
+ self.changed = True
+
+ def remove(self, module):
+ if self.backup:
+ self.backup_file = module.backup_local(self.path)
+ super(PrivateKeyBase, self).remove(module)
+
+ @abc.abstractmethod
+ def _check_passphrase(self):
+ pass
+
+ @abc.abstractmethod
+ def _check_size_and_type(self):
+ pass
+
+ @abc.abstractmethod
+ def _check_format(self):
+ pass
+
+ def check(self, module, perms_required=True, ignore_conversion=True):
+ """Ensure the resource is in its desired state."""
+
+ state_and_perms = super(PrivateKeyBase, self).check(module, perms_required=False)
+
+ if not state_and_perms:
+ # key does not exist
+ return False
+
+ if not self._check_passphrase():
+ if self.regenerate in ('full_idempotence', 'always'):
+ return False
+ module.fail_json(msg='Unable to read the key. The key is protected with a another passphrase / no passphrase or broken.'
+ ' Will not proceed. To force regeneration, call the module with `generate`'
+ ' set to `full_idempotence` or `always`, or with `force=yes`.')
+
+ if self.regenerate != 'never':
+ if not self._check_size_and_type():
+ if self.regenerate in ('partial_idempotence', 'full_idempotence', 'always'):
+ return False
+ module.fail_json(msg='Key has wrong type and/or size.'
+ ' Will not proceed. To force regeneration, call the module with `generate`'
+ ' set to `partial_idempotence`, `full_idempotence` or `always`, or with `force=yes`.')
+
+ if not self._check_format():
+ # During conversion step, convert if format does not match and format_mismatch == 'convert'
+ if not ignore_conversion and self.format_mismatch == 'convert':
+ return False
+ # During generation step, regenerate if format does not match and format_mismatch == 'regenerate'
+ if ignore_conversion and self.format_mismatch == 'regenerate' and self.regenerate != 'never':
+ if not ignore_conversion or self.regenerate in ('partial_idempotence', 'full_idempotence', 'always'):
+ return False
+ module.fail_json(msg='Key has wrong format.'
+ ' Will not proceed. To force regeneration, call the module with `generate`'
+ ' set to `partial_idempotence`, `full_idempotence` or `always`, or with `force=yes`.'
+ ' To convert the key, set `format_mismatch` to `convert`.')
+
+ # check whether permissions are correct (in case that needs to be checked)
+ return not perms_required or super(PrivateKeyBase, self).check(module, perms_required=perms_required)
+
+ def dump(self):
+ """Serialize the object into a dictionary."""
+
+ result = {
+ 'size': self.size,
+ 'filename': self.path,
+ 'changed': self.changed,
+ 'fingerprint': self.fingerprint,
+ }
+ if self.backup_file:
+ result['backup_file'] = self.backup_file
+ if self.return_content:
+ if self.privatekey_bytes is None:
+ self.privatekey_bytes = crypto_utils.load_file_if_exists(self.path, ignore_errors=True)
+ if self.privatekey_bytes:
+ if crypto_utils.identify_private_key_format(self.privatekey_bytes) == 'raw':
+ result['privatekey'] = base64.b64encode(self.privatekey_bytes)
+ else:
+ result['privatekey'] = self.privatekey_bytes.decode('utf-8')
+ else:
+ result['privatekey'] = None
+
+ return result
+
+
+# Implementation with using pyOpenSSL
+class PrivateKeyPyOpenSSL(PrivateKeyBase):
+
+ def __init__(self, module):
+ super(PrivateKeyPyOpenSSL, self).__init__(module)
+
+ if module.params['type'] == 'RSA':
+ self.type = crypto.TYPE_RSA
+ elif module.params['type'] == 'DSA':
+ self.type = crypto.TYPE_DSA
+ else:
+ module.fail_json(msg="PyOpenSSL backend only supports RSA and DSA keys.")
+
+ if self.format != 'auto_ignore':
+ module.fail_json(msg="PyOpenSSL backend only supports auto_ignore format.")
+
+ def _generate_private_key(self):
+ """(Re-)Generate private key."""
+ self.privatekey = crypto.PKey()
+ try:
+ self.privatekey.generate_key(self.type, self.size)
+ except (TypeError, ValueError) as exc:
+ raise PrivateKeyError(exc)
+
+ def _ensure_private_key_loaded(self):
+ """Make sure that the private key has been loaded."""
+ if self.privatekey is None:
+ try:
+ self.privatekey = privatekey = crypto_utils.load_privatekey(self.path, self.passphrase)
+ except crypto_utils.OpenSSLBadPassphraseError as exc:
+ raise PrivateKeyError(exc)
+
+ def _get_private_key_data(self):
+ """Return bytes for self.privatekey"""
+ if self.cipher and self.passphrase:
+ return crypto.dump_privatekey(crypto.FILETYPE_PEM, self.privatekey,
+ self.cipher, to_bytes(self.passphrase))
+ else:
+ return crypto.dump_privatekey(crypto.FILETYPE_PEM, self.privatekey)
+
+ def _get_fingerprint(self):
+ return crypto_utils.get_fingerprint(self.path, self.passphrase)
+
+ def _check_passphrase(self):
+ try:
+ crypto_utils.load_privatekey(self.path, self.passphrase)
+ return True
+ except Exception as dummy:
+ return False
+
+ def _check_size_and_type(self):
+ def _check_size(privatekey):
+ return self.size == privatekey.bits()
+
+ def _check_type(privatekey):
+ return self.type == privatekey.type()
+
+ self._ensure_private_key_loaded()
+ return _check_size(self.privatekey) and _check_type(self.privatekey)
+
+ def _check_format(self):
+ # Not supported by this backend
+ return True
+
+ def dump(self):
+ """Serialize the object into a dictionary."""
+
+ result = super(PrivateKeyPyOpenSSL, self).dump()
+
+ if self.type == crypto.TYPE_RSA:
+ result['type'] = 'RSA'
+ else:
+ result['type'] = 'DSA'
+
+ return result
+
+
+# Implementation with using cryptography
+class PrivateKeyCryptography(PrivateKeyBase):
+
+ def _get_ec_class(self, ectype):
+ ecclass = cryptography.hazmat.primitives.asymmetric.ec.__dict__.get(ectype)
+ if ecclass is None:
+ self.module.fail_json(msg='Your cryptography version does not support {0}'.format(ectype))
+ return ecclass
+
+ def _add_curve(self, name, ectype, deprecated=False):
+ def create(size):
+ ecclass = self._get_ec_class(ectype)
+ return ecclass()
+
+ def verify(privatekey):
+ ecclass = self._get_ec_class(ectype)
+ return isinstance(privatekey.private_numbers().public_numbers.curve, ecclass)
+
+ self.curves[name] = {
+ 'create': create,
+ 'verify': verify,
+ 'deprecated': deprecated,
+ }
+
+ def __init__(self, module):
+ super(PrivateKeyCryptography, self).__init__(module)
+
+ self.curves = dict()
+ self._add_curve('secp384r1', 'SECP384R1')
+ self._add_curve('secp521r1', 'SECP521R1')
+ self._add_curve('secp224r1', 'SECP224R1')
+ self._add_curve('secp192r1', 'SECP192R1')
+ self._add_curve('secp256r1', 'SECP256R1')
+ self._add_curve('secp256k1', 'SECP256K1')
+ self._add_curve('brainpoolP256r1', 'BrainpoolP256R1', deprecated=True)
+ self._add_curve('brainpoolP384r1', 'BrainpoolP384R1', deprecated=True)
+ self._add_curve('brainpoolP512r1', 'BrainpoolP512R1', deprecated=True)
+ self._add_curve('sect571k1', 'SECT571K1', deprecated=True)
+ self._add_curve('sect409k1', 'SECT409K1', deprecated=True)
+ self._add_curve('sect283k1', 'SECT283K1', deprecated=True)
+ self._add_curve('sect233k1', 'SECT233K1', deprecated=True)
+ self._add_curve('sect163k1', 'SECT163K1', deprecated=True)
+ self._add_curve('sect571r1', 'SECT571R1', deprecated=True)
+ self._add_curve('sect409r1', 'SECT409R1', deprecated=True)
+ self._add_curve('sect283r1', 'SECT283R1', deprecated=True)
+ self._add_curve('sect233r1', 'SECT233R1', deprecated=True)
+ self._add_curve('sect163r2', 'SECT163R2', deprecated=True)
+
+ self.module = module
+ self.cryptography_backend = cryptography.hazmat.backends.default_backend()
+
+ self.type = module.params['type']
+ self.curve = module.params['curve']
+ if not CRYPTOGRAPHY_HAS_X25519 and self.type == 'X25519':
+ self.module.fail_json(msg='Your cryptography version does not support X25519')
+ if not CRYPTOGRAPHY_HAS_X25519_FULL and self.type == 'X25519':
+ self.module.fail_json(msg='Your cryptography version does not support X25519 serialization')
+ if not CRYPTOGRAPHY_HAS_X448 and self.type == 'X448':
+ self.module.fail_json(msg='Your cryptography version does not support X448')
+ if not CRYPTOGRAPHY_HAS_ED25519 and self.type == 'Ed25519':
+ self.module.fail_json(msg='Your cryptography version does not support Ed25519')
+ if not CRYPTOGRAPHY_HAS_ED448 and self.type == 'Ed448':
+ self.module.fail_json(msg='Your cryptography version does not support Ed448')
+
+ def _get_wanted_format(self):
+ if self.format not in ('auto', 'auto_ignore'):
+ return self.format
+ if self.type in ('X25519', 'X448', 'Ed25519', 'Ed448'):
+ return 'pkcs8'
+ else:
+ return 'pkcs1'
+
+ def _generate_private_key(self):
+ """(Re-)Generate private key."""
+ try:
+ if self.type == 'RSA':
+ self.privatekey = cryptography.hazmat.primitives.asymmetric.rsa.generate_private_key(
+ public_exponent=65537, # OpenSSL always uses this
+ key_size=self.size,
+ backend=self.cryptography_backend
+ )
+ if self.type == 'DSA':
+ self.privatekey = cryptography.hazmat.primitives.asymmetric.dsa.generate_private_key(
+ key_size=self.size,
+ backend=self.cryptography_backend
+ )
+ if CRYPTOGRAPHY_HAS_X25519_FULL and self.type == 'X25519':
+ self.privatekey = cryptography.hazmat.primitives.asymmetric.x25519.X25519PrivateKey.generate()
+ if CRYPTOGRAPHY_HAS_X448 and self.type == 'X448':
+ self.privatekey = cryptography.hazmat.primitives.asymmetric.x448.X448PrivateKey.generate()
+ if CRYPTOGRAPHY_HAS_ED25519 and self.type == 'Ed25519':
+ self.privatekey = cryptography.hazmat.primitives.asymmetric.ed25519.Ed25519PrivateKey.generate()
+ if CRYPTOGRAPHY_HAS_ED448 and self.type == 'Ed448':
+ self.privatekey = cryptography.hazmat.primitives.asymmetric.ed448.Ed448PrivateKey.generate()
+ if self.type == 'ECC' and self.curve in self.curves:
+ if self.curves[self.curve]['deprecated']:
+ self.module.warn('Elliptic curves of type {0} should not be used for new keys!'.format(self.curve))
+ self.privatekey = cryptography.hazmat.primitives.asymmetric.ec.generate_private_key(
+ curve=self.curves[self.curve]['create'](self.size),
+ backend=self.cryptography_backend
+ )
+ except cryptography.exceptions.UnsupportedAlgorithm as dummy:
+ self.module.fail_json(msg='Cryptography backend does not support the algorithm required for {0}'.format(self.type))
+
+ def _ensure_private_key_loaded(self):
+ """Make sure that the private key has been loaded."""
+ if self.privatekey is None:
+ self.privatekey = self._load_privatekey()
+
+ def _get_private_key_data(self):
+ """Return bytes for self.privatekey"""
+ # Select export format and encoding
+ try:
+ export_format = self._get_wanted_format()
+ export_encoding = cryptography.hazmat.primitives.serialization.Encoding.PEM
+ if export_format == 'pkcs1':
+ # "TraditionalOpenSSL" format is PKCS1
+ export_format = cryptography.hazmat.primitives.serialization.PrivateFormat.TraditionalOpenSSL
+ elif export_format == 'pkcs8':
+ export_format = cryptography.hazmat.primitives.serialization.PrivateFormat.PKCS8
+ elif export_format == 'raw':
+ export_format = cryptography.hazmat.primitives.serialization.PrivateFormat.Raw
+ export_encoding = cryptography.hazmat.primitives.serialization.Encoding.Raw
+ except AttributeError:
+ self.module.fail_json(msg='Cryptography backend does not support the selected output format "{0}"'.format(self.format))
+
+ # Select key encryption
+ encryption_algorithm = cryptography.hazmat.primitives.serialization.NoEncryption()
+ if self.cipher and self.passphrase:
+ if self.cipher == 'auto':
+ encryption_algorithm = cryptography.hazmat.primitives.serialization.BestAvailableEncryption(to_bytes(self.passphrase))
+ else:
+ self.module.fail_json(msg='Cryptography backend can only use "auto" for cipher option.')
+
+ # Serialize key
+ try:
+ return self.privatekey.private_bytes(
+ encoding=export_encoding,
+ format=export_format,
+ encryption_algorithm=encryption_algorithm
+ )
+ except ValueError as dummy:
+ self.module.fail_json(
+ msg='Cryptography backend cannot serialize the private key in the required format "{0}"'.format(self.format)
+ )
+ except Exception as dummy:
+ self.module.fail_json(
+ msg='Error while serializing the private key in the required format "{0}"'.format(self.format),
+ exception=traceback.format_exc()
+ )
+
+ def _load_privatekey(self):
+ try:
+ # Read bytes
+ with open(self.path, 'rb') as f:
+ data = f.read()
+ # Interpret bytes depending on format.
+ format = crypto_utils.identify_private_key_format(data)
+ if format == 'raw':
+ if len(data) == 56 and CRYPTOGRAPHY_HAS_X448:
+ return cryptography.hazmat.primitives.asymmetric.x448.X448PrivateKey.from_private_bytes(data)
+ if len(data) == 57 and CRYPTOGRAPHY_HAS_ED448:
+ return cryptography.hazmat.primitives.asymmetric.ed448.Ed448PrivateKey.from_private_bytes(data)
+ if len(data) == 32:
+ if CRYPTOGRAPHY_HAS_X25519 and (self.type == 'X25519' or not CRYPTOGRAPHY_HAS_ED25519):
+ return cryptography.hazmat.primitives.asymmetric.x25519.X25519PrivateKey.from_private_bytes(data)
+ if CRYPTOGRAPHY_HAS_ED25519 and (self.type == 'Ed25519' or not CRYPTOGRAPHY_HAS_X25519):
+ return cryptography.hazmat.primitives.asymmetric.ed25519.Ed25519PrivateKey.from_private_bytes(data)
+ if CRYPTOGRAPHY_HAS_X25519 and CRYPTOGRAPHY_HAS_ED25519:
+ try:
+ return cryptography.hazmat.primitives.asymmetric.x25519.X25519PrivateKey.from_private_bytes(data)
+ except Exception:
+ return cryptography.hazmat.primitives.asymmetric.ed25519.Ed25519PrivateKey.from_private_bytes(data)
+ raise PrivateKeyError('Cannot load raw key')
+ else:
+ return cryptography.hazmat.primitives.serialization.load_pem_private_key(
+ data,
+ None if self.passphrase is None else to_bytes(self.passphrase),
+ backend=self.cryptography_backend
+ )
+ except Exception as e:
+ raise PrivateKeyError(e)
+
+ def _get_fingerprint(self):
+ # Get bytes of public key
+ private_key = self._load_privatekey()
+ public_key = private_key.public_key()
+ public_key_bytes = public_key.public_bytes(
+ cryptography.hazmat.primitives.serialization.Encoding.DER,
+ cryptography.hazmat.primitives.serialization.PublicFormat.SubjectPublicKeyInfo
+ )
+ # Get fingerprints of public_key_bytes
+ return crypto_utils.get_fingerprint_of_bytes(public_key_bytes)
+
+ def _check_passphrase(self):
+ try:
+ with open(self.path, 'rb') as f:
+ data = f.read()
+ format = crypto_utils.identify_private_key_format(data)
+ if format == 'raw':
+ # Raw keys cannot be encrypted. To avoid incompatibilities, we try to
+ # actually load the key (and return False when this fails).
+ self._load_privatekey()
+ # Loading the key succeeded. Only return True when no passphrase was
+ # provided.
+ return self.passphrase is None
+ else:
+ return cryptography.hazmat.primitives.serialization.load_pem_private_key(
+ data,
+ None if self.passphrase is None else to_bytes(self.passphrase),
+ backend=self.cryptography_backend
+ )
+ except Exception as dummy:
+ return False
+
+ def _check_size_and_type(self):
+ self._ensure_private_key_loaded()
+
+ if isinstance(self.privatekey, cryptography.hazmat.primitives.asymmetric.rsa.RSAPrivateKey):
+ return self.type == 'RSA' and self.size == self.privatekey.key_size
+ if isinstance(self.privatekey, cryptography.hazmat.primitives.asymmetric.dsa.DSAPrivateKey):
+ return self.type == 'DSA' and self.size == self.privatekey.key_size
+ if CRYPTOGRAPHY_HAS_X25519 and isinstance(self.privatekey, cryptography.hazmat.primitives.asymmetric.x25519.X25519PrivateKey):
+ return self.type == 'X25519'
+ if CRYPTOGRAPHY_HAS_X448 and isinstance(self.privatekey, cryptography.hazmat.primitives.asymmetric.x448.X448PrivateKey):
+ return self.type == 'X448'
+ if CRYPTOGRAPHY_HAS_ED25519 and isinstance(self.privatekey, cryptography.hazmat.primitives.asymmetric.ed25519.Ed25519PrivateKey):
+ return self.type == 'Ed25519'
+ if CRYPTOGRAPHY_HAS_ED448 and isinstance(self.privatekey, cryptography.hazmat.primitives.asymmetric.ed448.Ed448PrivateKey):
+ return self.type == 'Ed448'
+ if isinstance(self.privatekey, cryptography.hazmat.primitives.asymmetric.ec.EllipticCurvePrivateKey):
+ if self.type != 'ECC':
+ return False
+ if self.curve not in self.curves:
+ return False
+ return self.curves[self.curve]['verify'](self.privatekey)
+
+ return False
+
+ def _check_format(self):
+ if self.format == 'auto_ignore':
+ return True
+ try:
+ with open(self.path, 'rb') as f:
+ content = f.read()
+ format = crypto_utils.identify_private_key_format(content)
+ return format == self._get_wanted_format()
+ except Exception as dummy:
+ return False
+
+ def dump(self):
+ """Serialize the object into a dictionary."""
+ result = super(PrivateKeyCryptography, self).dump()
+ result['type'] = self.type
+ if self.type == 'ECC':
+ result['curve'] = self.curve
+ return result
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ size=dict(type='int', default=4096),
+ type=dict(type='str', default='RSA', choices=[
+ 'DSA', 'ECC', 'Ed25519', 'Ed448', 'RSA', 'X25519', 'X448'
+ ]),
+ curve=dict(type='str', choices=[
+ 'secp384r1', 'secp521r1', 'secp224r1', 'secp192r1', 'secp256r1',
+ 'secp256k1', 'brainpoolP256r1', 'brainpoolP384r1', 'brainpoolP512r1',
+ 'sect571k1', 'sect409k1', 'sect283k1', 'sect233k1', 'sect163k1',
+ 'sect571r1', 'sect409r1', 'sect283r1', 'sect233r1', 'sect163r2',
+ ]),
+ force=dict(type='bool', default=False),
+ path=dict(type='path', required=True),
+ passphrase=dict(type='str', no_log=True),
+ cipher=dict(type='str'),
+ backup=dict(type='bool', default=False),
+ format=dict(type='str', default='auto_ignore', choices=['pkcs1', 'pkcs8', 'raw', 'auto', 'auto_ignore']),
+ format_mismatch=dict(type='str', default='regenerate', choices=['regenerate', 'convert']),
+ select_crypto_backend=dict(type='str', choices=['auto', 'pyopenssl', 'cryptography'], default='auto'),
+ return_content=dict(type='bool', default=False),
+ regenerate=dict(
+ type='str',
+ default='full_idempotence',
+ choices=['never', 'fail', 'partial_idempotence', 'full_idempotence', 'always']
+ ),
+ ),
+ supports_check_mode=True,
+ add_file_common_args=True,
+ required_together=[
+ ['cipher', 'passphrase']
+ ],
+ required_if=[
+ ['type', 'ECC', ['curve']],
+ ],
+ )
+
+ base_dir = os.path.dirname(module.params['path']) or '.'
+ if not os.path.isdir(base_dir):
+ module.fail_json(
+ name=base_dir,
+ msg='The directory %s does not exist or the file is not a directory' % base_dir
+ )
+
+ backend = module.params['select_crypto_backend']
+ if backend == 'auto':
+ # Detection what is possible
+ can_use_cryptography = CRYPTOGRAPHY_FOUND and CRYPTOGRAPHY_VERSION >= LooseVersion(MINIMAL_CRYPTOGRAPHY_VERSION)
+ can_use_pyopenssl = PYOPENSSL_FOUND and PYOPENSSL_VERSION >= LooseVersion(MINIMAL_PYOPENSSL_VERSION)
+
+ # Decision
+ if module.params['cipher'] and module.params['passphrase'] and module.params['cipher'] != 'auto':
+ # First try pyOpenSSL, then cryptography
+ if can_use_pyopenssl:
+ backend = 'pyopenssl'
+ elif can_use_cryptography:
+ backend = 'cryptography'
+ else:
+ # First try cryptography, then pyOpenSSL
+ if can_use_cryptography:
+ backend = 'cryptography'
+ elif can_use_pyopenssl:
+ backend = 'pyopenssl'
+
+ # Success?
+ if backend == 'auto':
+ module.fail_json(msg=("Can't detect any of the required Python libraries "
+ "cryptography (>= {0}) or PyOpenSSL (>= {1})").format(
+ MINIMAL_CRYPTOGRAPHY_VERSION,
+ MINIMAL_PYOPENSSL_VERSION))
+ try:
+ if backend == 'pyopenssl':
+ if not PYOPENSSL_FOUND:
+ module.fail_json(msg=missing_required_lib('pyOpenSSL >= {0}'.format(MINIMAL_PYOPENSSL_VERSION)),
+ exception=PYOPENSSL_IMP_ERR)
+ module.deprecate('The module is using the PyOpenSSL backend. This backend has been deprecated', version='2.13')
+ private_key = PrivateKeyPyOpenSSL(module)
+ elif backend == 'cryptography':
+ if not CRYPTOGRAPHY_FOUND:
+ module.fail_json(msg=missing_required_lib('cryptography >= {0}'.format(MINIMAL_CRYPTOGRAPHY_VERSION)),
+ exception=CRYPTOGRAPHY_IMP_ERR)
+ private_key = PrivateKeyCryptography(module)
+
+ if private_key.state == 'present':
+ if module.check_mode:
+ result = private_key.dump()
+ result['changed'] = private_key.force \
+ or not private_key.check(module, ignore_conversion=True) \
+ or not private_key.check(module, ignore_conversion=False)
+ module.exit_json(**result)
+
+ private_key.generate(module)
+ else:
+ if module.check_mode:
+ result = private_key.dump()
+ result['changed'] = os.path.exists(module.params['path'])
+ module.exit_json(**result)
+
+ private_key.remove(module)
+
+ result = private_key.dump()
+ module.exit_json(**result)
+ except crypto_utils.OpenSSLObjectError as exc:
+ module.fail_json(msg=to_native(exc))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/pids.py b/test/support/integration/plugins/modules/pids.py
new file mode 100644
index 0000000000..4cbf45a969
--- /dev/null
+++ b/test/support/integration/plugins/modules/pids.py
@@ -0,0 +1,89 @@
+#!/usr/bin/python
+# Copyright: (c) 2019, Saranya Sridharan
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = '''
+module: pids
+version_added: 2.8
+description: "Retrieves a list of PIDs of given process name in Ansible controller/controlled machines.Returns an empty list if no process in that name exists."
+short_description: "Retrieves process IDs list if the process is running otherwise return empty list"
+author:
+ - Saranya Sridharan (@saranyasridharan)
+requirements:
+ - psutil(python module)
+options:
+ name:
+ description: the name of the process you want to get PID for.
+ required: true
+ type: str
+'''
+
+EXAMPLES = '''
+# Pass the process name
+- name: Getting process IDs of the process
+ pids:
+ name: python
+ register: pids_of_python
+
+- name: Printing the process IDs obtained
+ debug:
+ msg: "PIDS of python:{{pids_of_python.pids|join(',')}}"
+'''
+
+RETURN = '''
+pids:
+ description: Process IDs of the given process
+ returned: list of none, one, or more process IDs
+ type: list
+ sample: [100,200]
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+try:
+ import psutil
+ HAS_PSUTIL = True
+except ImportError:
+ HAS_PSUTIL = False
+
+
+def compare_lower(a, b):
+ if a is None or b is None:
+ # this could just be "return False" but would lead to surprising behavior if both a and b are None
+ return a == b
+
+ return a.lower() == b.lower()
+
+
+def get_pid(name):
+ pids = []
+
+ for proc in psutil.process_iter(attrs=['name', 'cmdline']):
+ if compare_lower(proc.info['name'], name) or \
+ proc.info['cmdline'] and compare_lower(proc.info['cmdline'][0], name):
+ pids.append(proc.pid)
+
+ return pids
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True, type="str"),
+ ),
+ supports_check_mode=True,
+ )
+ if not HAS_PSUTIL:
+ module.fail_json(msg="Missing required 'psutil' python module. Try installing it with: pip install psutil")
+ name = module.params["name"]
+ response = dict(pids=get_pid(name))
+ module.exit_json(**response)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/pkgng.py b/test/support/integration/plugins/modules/pkgng.py
new file mode 100644
index 0000000000..1136347904
--- /dev/null
+++ b/test/support/integration/plugins/modules/pkgng.py
@@ -0,0 +1,406 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, bleader
+# Written by bleader <bleader@ratonland.org>
+# Based on pkgin module written by Shaun Zinck <shaun.zinck at gmail.com>
+# that was based on pacman module written by Afterburn <https://github.com/afterburn>
+# that was based on apt module written by Matthew Williams <matthew@flowroute.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+---
+module: pkgng
+short_description: Package manager for FreeBSD >= 9.0
+description:
+ - Manage binary packages for FreeBSD using 'pkgng' which is available in versions after 9.0.
+version_added: "1.2"
+options:
+ name:
+ description:
+ - Name or list of names of packages to install/remove.
+ required: true
+ state:
+ description:
+ - State of the package.
+ - 'Note: "latest" added in 2.7'
+ choices: [ 'present', 'latest', 'absent' ]
+ required: false
+ default: present
+ cached:
+ description:
+ - Use local package base instead of fetching an updated one.
+ type: bool
+ required: false
+ default: no
+ annotation:
+ description:
+ - A comma-separated list of keyvalue-pairs of the form
+ C(<+/-/:><key>[=<value>]). A C(+) denotes adding an annotation, a
+ C(-) denotes removing an annotation, and C(:) denotes modifying an
+ annotation.
+ If setting or modifying annotations, a value must be provided.
+ required: false
+ version_added: "1.6"
+ pkgsite:
+ description:
+ - For pkgng versions before 1.1.4, specify packagesite to use
+ for downloading packages. If not specified, use settings from
+ C(/usr/local/etc/pkg.conf).
+ - For newer pkgng versions, specify a the name of a repository
+ configured in C(/usr/local/etc/pkg/repos).
+ required: false
+ rootdir:
+ description:
+ - For pkgng versions 1.5 and later, pkg will install all packages
+ within the specified root directory.
+ - Can not be used together with I(chroot) or I(jail) options.
+ required: false
+ chroot:
+ version_added: "2.1"
+ description:
+ - Pkg will chroot in the specified environment.
+ - Can not be used together with I(rootdir) or I(jail) options.
+ required: false
+ jail:
+ version_added: "2.4"
+ description:
+ - Pkg will execute in the given jail name or id.
+ - Can not be used together with I(chroot) or I(rootdir) options.
+ autoremove:
+ version_added: "2.2"
+ description:
+ - Remove automatically installed packages which are no longer needed.
+ required: false
+ type: bool
+ default: no
+author: "bleader (@bleader)"
+notes:
+ - When using pkgsite, be careful that already in cache packages won't be downloaded again.
+ - When used with a `loop:` each package will be processed individually,
+ it is much more efficient to pass the list directly to the `name` option.
+'''
+
+EXAMPLES = '''
+- name: Install package foo
+ pkgng:
+ name: foo
+ state: present
+
+- name: Annotate package foo and bar
+ pkgng:
+ name: foo,bar
+ annotation: '+test1=baz,-test2,:test3=foobar'
+
+- name: Remove packages foo and bar
+ pkgng:
+ name: foo,bar
+ state: absent
+
+# "latest" support added in 2.7
+- name: Upgrade package baz
+ pkgng:
+ name: baz
+ state: latest
+'''
+
+
+import re
+from ansible.module_utils.basic import AnsibleModule
+
+
+def query_package(module, pkgng_path, name, dir_arg):
+
+ rc, out, err = module.run_command("%s %s info -g -e %s" % (pkgng_path, dir_arg, name))
+
+ if rc == 0:
+ return True
+
+ return False
+
+
+def query_update(module, pkgng_path, name, dir_arg, old_pkgng, pkgsite):
+
+ # Check to see if a package upgrade is available.
+ # rc = 0, no updates available or package not installed
+ # rc = 1, updates available
+ if old_pkgng:
+ rc, out, err = module.run_command("%s %s upgrade -g -n %s" % (pkgsite, pkgng_path, name))
+ else:
+ rc, out, err = module.run_command("%s %s upgrade %s -g -n %s" % (pkgng_path, dir_arg, pkgsite, name))
+
+ if rc == 1:
+ return True
+
+ return False
+
+
+def pkgng_older_than(module, pkgng_path, compare_version):
+
+ rc, out, err = module.run_command("%s -v" % pkgng_path)
+ version = [int(x) for x in re.split(r'[\._]', out)]
+
+ i = 0
+ new_pkgng = True
+ while compare_version[i] == version[i]:
+ i += 1
+ if i == min(len(compare_version), len(version)):
+ break
+ else:
+ if compare_version[i] > version[i]:
+ new_pkgng = False
+ return not new_pkgng
+
+
+def remove_packages(module, pkgng_path, packages, dir_arg):
+
+ remove_c = 0
+ # Using a for loop in case of error, we can report the package that failed
+ for package in packages:
+ # Query the package first, to see if we even need to remove
+ if not query_package(module, pkgng_path, package, dir_arg):
+ continue
+
+ if not module.check_mode:
+ rc, out, err = module.run_command("%s %s delete -y %s" % (pkgng_path, dir_arg, package))
+
+ if not module.check_mode and query_package(module, pkgng_path, package, dir_arg):
+ module.fail_json(msg="failed to remove %s: %s" % (package, out))
+
+ remove_c += 1
+
+ if remove_c > 0:
+
+ return (True, "removed %s package(s)" % remove_c)
+
+ return (False, "package(s) already absent")
+
+
+def install_packages(module, pkgng_path, packages, cached, pkgsite, dir_arg, state):
+
+ install_c = 0
+
+ # as of pkg-1.1.4, PACKAGESITE is deprecated in favor of repository definitions
+ # in /usr/local/etc/pkg/repos
+ old_pkgng = pkgng_older_than(module, pkgng_path, [1, 1, 4])
+ if pkgsite != "":
+ if old_pkgng:
+ pkgsite = "PACKAGESITE=%s" % (pkgsite)
+ else:
+ pkgsite = "-r %s" % (pkgsite)
+
+ # This environment variable skips mid-install prompts,
+ # setting them to their default values.
+ batch_var = 'env BATCH=yes'
+
+ if not module.check_mode and not cached:
+ if old_pkgng:
+ rc, out, err = module.run_command("%s %s update" % (pkgsite, pkgng_path))
+ else:
+ rc, out, err = module.run_command("%s %s update" % (pkgng_path, dir_arg))
+ if rc != 0:
+ module.fail_json(msg="Could not update catalogue [%d]: %s %s" % (rc, out, err))
+
+ for package in packages:
+ already_installed = query_package(module, pkgng_path, package, dir_arg)
+ if already_installed and state == "present":
+ continue
+
+ update_available = query_update(module, pkgng_path, package, dir_arg, old_pkgng, pkgsite)
+ if not update_available and already_installed and state == "latest":
+ continue
+
+ if not module.check_mode:
+ if already_installed:
+ action = "upgrade"
+ else:
+ action = "install"
+ if old_pkgng:
+ rc, out, err = module.run_command("%s %s %s %s -g -U -y %s" % (batch_var, pkgsite, pkgng_path, action, package))
+ else:
+ rc, out, err = module.run_command("%s %s %s %s %s -g -U -y %s" % (batch_var, pkgng_path, dir_arg, action, pkgsite, package))
+
+ if not module.check_mode and not query_package(module, pkgng_path, package, dir_arg):
+ module.fail_json(msg="failed to %s %s: %s" % (action, package, out), stderr=err)
+
+ install_c += 1
+
+ if install_c > 0:
+ return (True, "added %s package(s)" % (install_c))
+
+ return (False, "package(s) already %s" % (state))
+
+
+def annotation_query(module, pkgng_path, package, tag, dir_arg):
+ rc, out, err = module.run_command("%s %s info -g -A %s" % (pkgng_path, dir_arg, package))
+ match = re.search(r'^\s*(?P<tag>%s)\s*:\s*(?P<value>\w+)' % tag, out, flags=re.MULTILINE)
+ if match:
+ return match.group('value')
+ return False
+
+
+def annotation_add(module, pkgng_path, package, tag, value, dir_arg):
+ _value = annotation_query(module, pkgng_path, package, tag, dir_arg)
+ if not _value:
+ # Annotation does not exist, add it.
+ rc, out, err = module.run_command('%s %s annotate -y -A %s %s "%s"'
+ % (pkgng_path, dir_arg, package, tag, value))
+ if rc != 0:
+ module.fail_json(msg="could not annotate %s: %s"
+ % (package, out), stderr=err)
+ return True
+ elif _value != value:
+ # Annotation exists, but value differs
+ module.fail_json(
+ mgs="failed to annotate %s, because %s is already set to %s, but should be set to %s"
+ % (package, tag, _value, value))
+ return False
+ else:
+ # Annotation exists, nothing to do
+ return False
+
+
+def annotation_delete(module, pkgng_path, package, tag, value, dir_arg):
+ _value = annotation_query(module, pkgng_path, package, tag, dir_arg)
+ if _value:
+ rc, out, err = module.run_command('%s %s annotate -y -D %s %s'
+ % (pkgng_path, dir_arg, package, tag))
+ if rc != 0:
+ module.fail_json(msg="could not delete annotation to %s: %s"
+ % (package, out), stderr=err)
+ return True
+ return False
+
+
+def annotation_modify(module, pkgng_path, package, tag, value, dir_arg):
+ _value = annotation_query(module, pkgng_path, package, tag, dir_arg)
+ if not value:
+ # No such tag
+ module.fail_json(msg="could not change annotation to %s: tag %s does not exist"
+ % (package, tag))
+ elif _value == value:
+ # No change in value
+ return False
+ else:
+ rc, out, err = module.run_command('%s %s annotate -y -M %s %s "%s"'
+ % (pkgng_path, dir_arg, package, tag, value))
+ if rc != 0:
+ module.fail_json(msg="could not change annotation annotation to %s: %s"
+ % (package, out), stderr=err)
+ return True
+
+
+def annotate_packages(module, pkgng_path, packages, annotation, dir_arg):
+ annotate_c = 0
+ annotations = map(lambda _annotation:
+ re.match(r'(?P<operation>[\+-:])(?P<tag>\w+)(=(?P<value>\w+))?',
+ _annotation).groupdict(),
+ re.split(r',', annotation))
+
+ operation = {
+ '+': annotation_add,
+ '-': annotation_delete,
+ ':': annotation_modify
+ }
+
+ for package in packages:
+ for _annotation in annotations:
+ if operation[_annotation['operation']](module, pkgng_path, package, _annotation['tag'], _annotation['value']):
+ annotate_c += 1
+
+ if annotate_c > 0:
+ return (True, "added %s annotations." % annotate_c)
+ return (False, "changed no annotations")
+
+
+def autoremove_packages(module, pkgng_path, dir_arg):
+ rc, out, err = module.run_command("%s %s autoremove -n" % (pkgng_path, dir_arg))
+
+ autoremove_c = 0
+
+ match = re.search('^Deinstallation has been requested for the following ([0-9]+) packages', out, re.MULTILINE)
+ if match:
+ autoremove_c = int(match.group(1))
+
+ if autoremove_c == 0:
+ return False, "no package(s) to autoremove"
+
+ if not module.check_mode:
+ rc, out, err = module.run_command("%s %s autoremove -y" % (pkgng_path, dir_arg))
+
+ return True, "autoremoved %d package(s)" % (autoremove_c)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(default="present", choices=["present", "latest", "absent"], required=False),
+ name=dict(aliases=["pkg"], required=True, type='list'),
+ cached=dict(default=False, type='bool'),
+ annotation=dict(default="", required=False),
+ pkgsite=dict(default="", required=False),
+ rootdir=dict(default="", required=False, type='path'),
+ chroot=dict(default="", required=False, type='path'),
+ jail=dict(default="", required=False, type='str'),
+ autoremove=dict(default=False, type='bool')),
+ supports_check_mode=True,
+ mutually_exclusive=[["rootdir", "chroot", "jail"]])
+
+ pkgng_path = module.get_bin_path('pkg', True)
+
+ p = module.params
+
+ pkgs = p["name"]
+
+ changed = False
+ msgs = []
+ dir_arg = ""
+
+ if p["rootdir"] != "":
+ old_pkgng = pkgng_older_than(module, pkgng_path, [1, 5, 0])
+ if old_pkgng:
+ module.fail_json(msg="To use option 'rootdir' pkg version must be 1.5 or greater")
+ else:
+ dir_arg = "--rootdir %s" % (p["rootdir"])
+
+ if p["chroot"] != "":
+ dir_arg = '--chroot %s' % (p["chroot"])
+
+ if p["jail"] != "":
+ dir_arg = '--jail %s' % (p["jail"])
+
+ if p["state"] in ("present", "latest"):
+ _changed, _msg = install_packages(module, pkgng_path, pkgs, p["cached"], p["pkgsite"], dir_arg, p["state"])
+ changed = changed or _changed
+ msgs.append(_msg)
+
+ elif p["state"] == "absent":
+ _changed, _msg = remove_packages(module, pkgng_path, pkgs, dir_arg)
+ changed = changed or _changed
+ msgs.append(_msg)
+
+ if p["autoremove"]:
+ _changed, _msg = autoremove_packages(module, pkgng_path, dir_arg)
+ changed = changed or _changed
+ msgs.append(_msg)
+
+ if p["annotation"]:
+ _changed, _msg = annotate_packages(module, pkgng_path, pkgs, p["annotation"], dir_arg)
+ changed = changed or _changed
+ msgs.append(_msg)
+
+ module.exit_json(changed=changed, msg=", ".join(msgs))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/postgresql_db.py b/test/support/integration/plugins/modules/postgresql_db.py
new file mode 100644
index 0000000000..40858d9974
--- /dev/null
+++ b/test/support/integration/plugins/modules/postgresql_db.py
@@ -0,0 +1,657 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['stableinterface'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+module: postgresql_db
+short_description: Add or remove PostgreSQL databases from a remote host.
+description:
+ - Add or remove PostgreSQL databases from a remote host.
+version_added: '0.6'
+options:
+ name:
+ description:
+ - Name of the database to add or remove
+ type: str
+ required: true
+ aliases: [ db ]
+ port:
+ description:
+ - Database port to connect (if needed)
+ type: int
+ default: 5432
+ aliases:
+ - login_port
+ owner:
+ description:
+ - Name of the role to set as owner of the database
+ type: str
+ template:
+ description:
+ - Template used to create the database
+ type: str
+ encoding:
+ description:
+ - Encoding of the database
+ type: str
+ lc_collate:
+ description:
+ - Collation order (LC_COLLATE) to use in the database. Must match collation order of template database unless C(template0) is used as template.
+ type: str
+ lc_ctype:
+ description:
+ - Character classification (LC_CTYPE) to use in the database (e.g. lower, upper, ...) Must match LC_CTYPE of template database unless C(template0)
+ is used as template.
+ type: str
+ session_role:
+ description:
+ - Switch to session_role after connecting. The specified session_role must be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though the session_role were the one that had logged in originally.
+ type: str
+ version_added: '2.8'
+ state:
+ description:
+ - The database state.
+ - C(present) implies that the database should be created if necessary.
+ - C(absent) implies that the database should be removed if present.
+ - C(dump) requires a target definition to which the database will be backed up. (Added in Ansible 2.4)
+ Note that in some PostgreSQL versions of pg_dump, which is an embedded PostgreSQL utility and is used by the module,
+ returns rc 0 even when errors occurred (e.g. the connection is forbidden by pg_hba.conf, etc.),
+ so the module returns changed=True but the dump has not actually been done. Please, be sure that your version of
+ pg_dump returns rc 1 in this case.
+ - C(restore) also requires a target definition from which the database will be restored. (Added in Ansible 2.4)
+ - The format of the backup will be detected based on the target name.
+ - Supported compression formats for dump and restore include C(.pgc), C(.bz2), C(.gz) and C(.xz)
+ - Supported formats for dump and restore include C(.sql) and C(.tar)
+ type: str
+ choices: [ absent, dump, present, restore ]
+ default: present
+ target:
+ description:
+ - File to back up or restore from.
+ - Used when I(state) is C(dump) or C(restore).
+ type: path
+ version_added: '2.4'
+ target_opts:
+ description:
+ - Further arguments for pg_dump or pg_restore.
+ - Used when I(state) is C(dump) or C(restore).
+ type: str
+ version_added: '2.4'
+ maintenance_db:
+ description:
+ - The value specifies the initial database (which is also called as maintenance DB) that Ansible connects to.
+ type: str
+ default: postgres
+ version_added: '2.5'
+ conn_limit:
+ description:
+ - Specifies the database connection limit.
+ type: str
+ version_added: '2.8'
+ tablespace:
+ description:
+ - The tablespace to set for the database
+ U(https://www.postgresql.org/docs/current/sql-alterdatabase.html).
+ - If you want to move the database back to the default tablespace,
+ explicitly set this to pg_default.
+ type: path
+ version_added: '2.9'
+ dump_extra_args:
+ description:
+ - Provides additional arguments when I(state) is C(dump).
+ - Cannot be used with dump-file-format-related arguments like ``--format=d``.
+ type: str
+ version_added: '2.10'
+seealso:
+- name: CREATE DATABASE reference
+ description: Complete reference of the CREATE DATABASE command documentation.
+ link: https://www.postgresql.org/docs/current/sql-createdatabase.html
+- name: DROP DATABASE reference
+ description: Complete reference of the DROP DATABASE command documentation.
+ link: https://www.postgresql.org/docs/current/sql-dropdatabase.html
+- name: pg_dump reference
+ description: Complete reference of pg_dump documentation.
+ link: https://www.postgresql.org/docs/current/app-pgdump.html
+- name: pg_restore reference
+ description: Complete reference of pg_restore documentation.
+ link: https://www.postgresql.org/docs/current/app-pgrestore.html
+- module: postgresql_tablespace
+- module: postgresql_info
+- module: postgresql_ping
+notes:
+- State C(dump) and C(restore) don't require I(psycopg2) since version 2.8.
+author: "Ansible Core Team"
+extends_documentation_fragment:
+- postgres
+'''
+
+EXAMPLES = r'''
+- name: Create a new database with name "acme"
+ postgresql_db:
+ name: acme
+
+# Note: If a template different from "template0" is specified, encoding and locale settings must match those of the template.
+- name: Create a new database with name "acme" and specific encoding and locale # settings.
+ postgresql_db:
+ name: acme
+ encoding: UTF-8
+ lc_collate: de_DE.UTF-8
+ lc_ctype: de_DE.UTF-8
+ template: template0
+
+# Note: Default limit for the number of concurrent connections to a specific database is "-1", which means "unlimited"
+- name: Create a new database with name "acme" which has a limit of 100 concurrent connections
+ postgresql_db:
+ name: acme
+ conn_limit: "100"
+
+- name: Dump an existing database to a file
+ postgresql_db:
+ name: acme
+ state: dump
+ target: /tmp/acme.sql
+
+- name: Dump an existing database to a file excluding the test table
+ postgresql_db:
+ name: acme
+ state: dump
+ target: /tmp/acme.sql
+ dump_extra_args: --exclude-table=test
+
+- name: Dump an existing database to a file (with compression)
+ postgresql_db:
+ name: acme
+ state: dump
+ target: /tmp/acme.sql.gz
+
+- name: Dump a single schema for an existing database
+ postgresql_db:
+ name: acme
+ state: dump
+ target: /tmp/acme.sql
+ target_opts: "-n public"
+
+# Note: In the example below, if database foo exists and has another tablespace
+# the tablespace will be changed to foo. Access to the database will be locked
+# until the copying of database files is finished.
+- name: Create a new database called foo in tablespace bar
+ postgresql_db:
+ name: foo
+ tablespace: bar
+'''
+
+RETURN = r'''
+executed_commands:
+ description: List of commands which tried to run.
+ returned: always
+ type: list
+ sample: ["CREATE DATABASE acme"]
+ version_added: '2.10'
+'''
+
+
+import os
+import subprocess
+import traceback
+
+try:
+ import psycopg2
+ import psycopg2.extras
+except ImportError:
+ HAS_PSYCOPG2 = False
+else:
+ HAS_PSYCOPG2 = True
+
+import ansible.module_utils.postgres as pgutils
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.database import SQLParseError, pg_quote_identifier
+from ansible.module_utils.six import iteritems
+from ansible.module_utils.six.moves import shlex_quote
+from ansible.module_utils._text import to_native
+
+executed_commands = []
+
+
+class NotSupportedError(Exception):
+ pass
+
+# ===========================================
+# PostgreSQL module specific support methods.
+#
+
+
+def set_owner(cursor, db, owner):
+ query = 'ALTER DATABASE %s OWNER TO "%s"' % (
+ pg_quote_identifier(db, 'database'),
+ owner)
+ executed_commands.append(query)
+ cursor.execute(query)
+ return True
+
+
+def set_conn_limit(cursor, db, conn_limit):
+ query = "ALTER DATABASE %s CONNECTION LIMIT %s" % (
+ pg_quote_identifier(db, 'database'),
+ conn_limit)
+ executed_commands.append(query)
+ cursor.execute(query)
+ return True
+
+
+def get_encoding_id(cursor, encoding):
+ query = "SELECT pg_char_to_encoding(%(encoding)s) AS encoding_id;"
+ cursor.execute(query, {'encoding': encoding})
+ return cursor.fetchone()['encoding_id']
+
+
+def get_db_info(cursor, db):
+ query = """
+ SELECT rolname AS owner,
+ pg_encoding_to_char(encoding) AS encoding, encoding AS encoding_id,
+ datcollate AS lc_collate, datctype AS lc_ctype, pg_database.datconnlimit AS conn_limit,
+ spcname AS tablespace
+ FROM pg_database
+ JOIN pg_roles ON pg_roles.oid = pg_database.datdba
+ JOIN pg_tablespace ON pg_tablespace.oid = pg_database.dattablespace
+ WHERE datname = %(db)s
+ """
+ cursor.execute(query, {'db': db})
+ return cursor.fetchone()
+
+
+def db_exists(cursor, db):
+ query = "SELECT * FROM pg_database WHERE datname=%(db)s"
+ cursor.execute(query, {'db': db})
+ return cursor.rowcount == 1
+
+
+def db_delete(cursor, db):
+ if db_exists(cursor, db):
+ query = "DROP DATABASE %s" % pg_quote_identifier(db, 'database')
+ executed_commands.append(query)
+ cursor.execute(query)
+ return True
+ else:
+ return False
+
+
+def db_create(cursor, db, owner, template, encoding, lc_collate, lc_ctype, conn_limit, tablespace):
+ params = dict(enc=encoding, collate=lc_collate, ctype=lc_ctype, conn_limit=conn_limit, tablespace=tablespace)
+ if not db_exists(cursor, db):
+ query_fragments = ['CREATE DATABASE %s' % pg_quote_identifier(db, 'database')]
+ if owner:
+ query_fragments.append('OWNER "%s"' % owner)
+ if template:
+ query_fragments.append('TEMPLATE %s' % pg_quote_identifier(template, 'database'))
+ if encoding:
+ query_fragments.append('ENCODING %(enc)s')
+ if lc_collate:
+ query_fragments.append('LC_COLLATE %(collate)s')
+ if lc_ctype:
+ query_fragments.append('LC_CTYPE %(ctype)s')
+ if tablespace:
+ query_fragments.append('TABLESPACE %s' % pg_quote_identifier(tablespace, 'tablespace'))
+ if conn_limit:
+ query_fragments.append("CONNECTION LIMIT %(conn_limit)s" % {"conn_limit": conn_limit})
+ query = ' '.join(query_fragments)
+ executed_commands.append(cursor.mogrify(query, params))
+ cursor.execute(query, params)
+ return True
+ else:
+ db_info = get_db_info(cursor, db)
+ if (encoding and get_encoding_id(cursor, encoding) != db_info['encoding_id']):
+ raise NotSupportedError(
+ 'Changing database encoding is not supported. '
+ 'Current encoding: %s' % db_info['encoding']
+ )
+ elif lc_collate and lc_collate != db_info['lc_collate']:
+ raise NotSupportedError(
+ 'Changing LC_COLLATE is not supported. '
+ 'Current LC_COLLATE: %s' % db_info['lc_collate']
+ )
+ elif lc_ctype and lc_ctype != db_info['lc_ctype']:
+ raise NotSupportedError(
+ 'Changing LC_CTYPE is not supported.'
+ 'Current LC_CTYPE: %s' % db_info['lc_ctype']
+ )
+ else:
+ changed = False
+
+ if owner and owner != db_info['owner']:
+ changed = set_owner(cursor, db, owner)
+
+ if conn_limit and conn_limit != str(db_info['conn_limit']):
+ changed = set_conn_limit(cursor, db, conn_limit)
+
+ if tablespace and tablespace != db_info['tablespace']:
+ changed = set_tablespace(cursor, db, tablespace)
+
+ return changed
+
+
+def db_matches(cursor, db, owner, template, encoding, lc_collate, lc_ctype, conn_limit, tablespace):
+ if not db_exists(cursor, db):
+ return False
+ else:
+ db_info = get_db_info(cursor, db)
+ if (encoding and get_encoding_id(cursor, encoding) != db_info['encoding_id']):
+ return False
+ elif lc_collate and lc_collate != db_info['lc_collate']:
+ return False
+ elif lc_ctype and lc_ctype != db_info['lc_ctype']:
+ return False
+ elif owner and owner != db_info['owner']:
+ return False
+ elif conn_limit and conn_limit != str(db_info['conn_limit']):
+ return False
+ elif tablespace and tablespace != db_info['tablespace']:
+ return False
+ else:
+ return True
+
+
+def db_dump(module, target, target_opts="",
+ db=None,
+ dump_extra_args=None,
+ user=None,
+ password=None,
+ host=None,
+ port=None,
+ **kw):
+
+ flags = login_flags(db, host, port, user, db_prefix=False)
+ cmd = module.get_bin_path('pg_dump', True)
+ comp_prog_path = None
+
+ if os.path.splitext(target)[-1] == '.tar':
+ flags.append(' --format=t')
+ elif os.path.splitext(target)[-1] == '.pgc':
+ flags.append(' --format=c')
+ if os.path.splitext(target)[-1] == '.gz':
+ if module.get_bin_path('pigz'):
+ comp_prog_path = module.get_bin_path('pigz', True)
+ else:
+ comp_prog_path = module.get_bin_path('gzip', True)
+ elif os.path.splitext(target)[-1] == '.bz2':
+ comp_prog_path = module.get_bin_path('bzip2', True)
+ elif os.path.splitext(target)[-1] == '.xz':
+ comp_prog_path = module.get_bin_path('xz', True)
+
+ cmd += "".join(flags)
+
+ if dump_extra_args:
+ cmd += " {0} ".format(dump_extra_args)
+
+ if target_opts:
+ cmd += " {0} ".format(target_opts)
+
+ if comp_prog_path:
+ # Use a fifo to be notified of an error in pg_dump
+ # Using shell pipe has no way to return the code of the first command
+ # in a portable way.
+ fifo = os.path.join(module.tmpdir, 'pg_fifo')
+ os.mkfifo(fifo)
+ cmd = '{1} <{3} > {2} & {0} >{3}'.format(cmd, comp_prog_path, shlex_quote(target), fifo)
+ else:
+ cmd = '{0} > {1}'.format(cmd, shlex_quote(target))
+
+ return do_with_password(module, cmd, password)
+
+
+def db_restore(module, target, target_opts="",
+ db=None,
+ user=None,
+ password=None,
+ host=None,
+ port=None,
+ **kw):
+
+ flags = login_flags(db, host, port, user)
+ comp_prog_path = None
+ cmd = module.get_bin_path('psql', True)
+
+ if os.path.splitext(target)[-1] == '.sql':
+ flags.append(' --file={0}'.format(target))
+
+ elif os.path.splitext(target)[-1] == '.tar':
+ flags.append(' --format=Tar')
+ cmd = module.get_bin_path('pg_restore', True)
+
+ elif os.path.splitext(target)[-1] == '.pgc':
+ flags.append(' --format=Custom')
+ cmd = module.get_bin_path('pg_restore', True)
+
+ elif os.path.splitext(target)[-1] == '.gz':
+ comp_prog_path = module.get_bin_path('zcat', True)
+
+ elif os.path.splitext(target)[-1] == '.bz2':
+ comp_prog_path = module.get_bin_path('bzcat', True)
+
+ elif os.path.splitext(target)[-1] == '.xz':
+ comp_prog_path = module.get_bin_path('xzcat', True)
+
+ cmd += "".join(flags)
+ if target_opts:
+ cmd += " {0} ".format(target_opts)
+
+ if comp_prog_path:
+ env = os.environ.copy()
+ if password:
+ env = {"PGPASSWORD": password}
+ p1 = subprocess.Popen([comp_prog_path, target], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ p2 = subprocess.Popen(cmd, stdin=p1.stdout, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, env=env)
+ (stdout2, stderr2) = p2.communicate()
+ p1.stdout.close()
+ p1.wait()
+ if p1.returncode != 0:
+ stderr1 = p1.stderr.read()
+ return p1.returncode, '', stderr1, 'cmd: ****'
+ else:
+ return p2.returncode, '', stderr2, 'cmd: ****'
+ else:
+ cmd = '{0} < {1}'.format(cmd, shlex_quote(target))
+
+ return do_with_password(module, cmd, password)
+
+
+def login_flags(db, host, port, user, db_prefix=True):
+ """
+ returns a list of connection argument strings each prefixed
+ with a space and quoted where necessary to later be combined
+ in a single shell string with `"".join(rv)`
+
+ db_prefix determines if "--dbname" is prefixed to the db argument,
+ since the argument was introduced in 9.3.
+ """
+ flags = []
+ if db:
+ if db_prefix:
+ flags.append(' --dbname={0}'.format(shlex_quote(db)))
+ else:
+ flags.append(' {0}'.format(shlex_quote(db)))
+ if host:
+ flags.append(' --host={0}'.format(host))
+ if port:
+ flags.append(' --port={0}'.format(port))
+ if user:
+ flags.append(' --username={0}'.format(user))
+ return flags
+
+
+def do_with_password(module, cmd, password):
+ env = {}
+ if password:
+ env = {"PGPASSWORD": password}
+ executed_commands.append(cmd)
+ rc, stderr, stdout = module.run_command(cmd, use_unsafe_shell=True, environ_update=env)
+ return rc, stderr, stdout, cmd
+
+
+def set_tablespace(cursor, db, tablespace):
+ query = "ALTER DATABASE %s SET TABLESPACE %s" % (
+ pg_quote_identifier(db, 'database'),
+ pg_quote_identifier(tablespace, 'tablespace'))
+ executed_commands.append(query)
+ cursor.execute(query)
+ return True
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+ argument_spec = pgutils.postgres_common_argument_spec()
+ argument_spec.update(
+ db=dict(type='str', required=True, aliases=['name']),
+ owner=dict(type='str', default=''),
+ template=dict(type='str', default=''),
+ encoding=dict(type='str', default=''),
+ lc_collate=dict(type='str', default=''),
+ lc_ctype=dict(type='str', default=''),
+ state=dict(type='str', default='present', choices=['absent', 'dump', 'present', 'restore']),
+ target=dict(type='path', default=''),
+ target_opts=dict(type='str', default=''),
+ maintenance_db=dict(type='str', default="postgres"),
+ session_role=dict(type='str'),
+ conn_limit=dict(type='str', default=''),
+ tablespace=dict(type='path', default=''),
+ dump_extra_args=dict(type='str', default=None),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ db = module.params["db"]
+ owner = module.params["owner"]
+ template = module.params["template"]
+ encoding = module.params["encoding"]
+ lc_collate = module.params["lc_collate"]
+ lc_ctype = module.params["lc_ctype"]
+ target = module.params["target"]
+ target_opts = module.params["target_opts"]
+ state = module.params["state"]
+ changed = False
+ maintenance_db = module.params['maintenance_db']
+ session_role = module.params["session_role"]
+ conn_limit = module.params['conn_limit']
+ tablespace = module.params['tablespace']
+ dump_extra_args = module.params['dump_extra_args']
+
+ raw_connection = state in ("dump", "restore")
+
+ if not raw_connection:
+ pgutils.ensure_required_libs(module)
+
+ # To use defaults values, keyword arguments must be absent, so
+ # check which values are empty and don't include in the **kw
+ # dictionary
+ params_map = {
+ "login_host": "host",
+ "login_user": "user",
+ "login_password": "password",
+ "port": "port",
+ "ssl_mode": "sslmode",
+ "ca_cert": "sslrootcert"
+ }
+ kw = dict((params_map[k], v) for (k, v) in iteritems(module.params)
+ if k in params_map and v != '' and v is not None)
+
+ # If a login_unix_socket is specified, incorporate it here.
+ is_localhost = "host" not in kw or kw["host"] == "" or kw["host"] == "localhost"
+
+ if is_localhost and module.params["login_unix_socket"] != "":
+ kw["host"] = module.params["login_unix_socket"]
+
+ if target == "":
+ target = "{0}/{1}.sql".format(os.getcwd(), db)
+ target = os.path.expanduser(target)
+
+ if not raw_connection:
+ try:
+ db_connection = psycopg2.connect(database=maintenance_db, **kw)
+
+ # Enable autocommit so we can create databases
+ if psycopg2.__version__ >= '2.4.2':
+ db_connection.autocommit = True
+ else:
+ db_connection.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
+ cursor = db_connection.cursor(cursor_factory=psycopg2.extras.DictCursor)
+
+ except TypeError as e:
+ if 'sslrootcert' in e.args[0]:
+ module.fail_json(msg='Postgresql server must be at least version 8.4 to support sslrootcert. Exception: {0}'.format(to_native(e)),
+ exception=traceback.format_exc())
+ module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc())
+
+ except Exception as e:
+ module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc())
+
+ if session_role:
+ try:
+ cursor.execute('SET ROLE "%s"' % session_role)
+ except Exception as e:
+ module.fail_json(msg="Could not switch role: %s" % to_native(e), exception=traceback.format_exc())
+
+ try:
+ if module.check_mode:
+ if state == "absent":
+ changed = db_exists(cursor, db)
+ elif state == "present":
+ changed = not db_matches(cursor, db, owner, template, encoding, lc_collate, lc_ctype, conn_limit, tablespace)
+ module.exit_json(changed=changed, db=db, executed_commands=executed_commands)
+
+ if state == "absent":
+ try:
+ changed = db_delete(cursor, db)
+ except SQLParseError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ elif state == "present":
+ try:
+ changed = db_create(cursor, db, owner, template, encoding, lc_collate, lc_ctype, conn_limit, tablespace)
+ except SQLParseError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ elif state in ("dump", "restore"):
+ method = state == "dump" and db_dump or db_restore
+ try:
+ if state == 'dump':
+ rc, stdout, stderr, cmd = method(module, target, target_opts, db, dump_extra_args, **kw)
+ else:
+ rc, stdout, stderr, cmd = method(module, target, target_opts, db, **kw)
+
+ if rc != 0:
+ module.fail_json(msg=stderr, stdout=stdout, rc=rc, cmd=cmd)
+ else:
+ module.exit_json(changed=True, msg=stdout, stderr=stderr, rc=rc, cmd=cmd,
+ executed_commands=executed_commands)
+ except SQLParseError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ except NotSupportedError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ except SystemExit:
+ # Avoid catching this on Python 2.4
+ raise
+ except Exception as e:
+ module.fail_json(msg="Database query failed: %s" % to_native(e), exception=traceback.format_exc())
+
+ module.exit_json(changed=changed, db=db, executed_commands=executed_commands)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/postgresql_privs.py b/test/support/integration/plugins/modules/postgresql_privs.py
new file mode 100644
index 0000000000..ba8324dde6
--- /dev/null
+++ b/test/support/integration/plugins/modules/postgresql_privs.py
@@ -0,0 +1,1097 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# Copyright: (c) 2019, Tobias Birkefeld (@tcraxs) <t@craxs.de>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['stableinterface'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+module: postgresql_privs
+version_added: '1.2'
+short_description: Grant or revoke privileges on PostgreSQL database objects
+description:
+- Grant or revoke privileges on PostgreSQL database objects.
+- This module is basically a wrapper around most of the functionality of
+ PostgreSQL's GRANT and REVOKE statements with detection of changes
+ (GRANT/REVOKE I(privs) ON I(type) I(objs) TO/FROM I(roles)).
+options:
+ database:
+ description:
+ - Name of database to connect to.
+ required: yes
+ type: str
+ aliases:
+ - db
+ - login_db
+ state:
+ description:
+ - If C(present), the specified privileges are granted, if C(absent) they are revoked.
+ type: str
+ default: present
+ choices: [ absent, present ]
+ privs:
+ description:
+ - Comma separated list of privileges to grant/revoke.
+ type: str
+ aliases:
+ - priv
+ type:
+ description:
+ - Type of database object to set privileges on.
+ - The C(default_privs) choice is available starting at version 2.7.
+ - The C(foreign_data_wrapper) and C(foreign_server) object types are available from Ansible version '2.8'.
+ - The C(type) choice is available from Ansible version '2.10'.
+ type: str
+ default: table
+ choices: [ database, default_privs, foreign_data_wrapper, foreign_server, function,
+ group, language, table, tablespace, schema, sequence, type ]
+ objs:
+ description:
+ - Comma separated list of database objects to set privileges on.
+ - If I(type) is C(table), C(partition table), C(sequence) or C(function),
+ the special valueC(ALL_IN_SCHEMA) can be provided instead to specify all
+ database objects of type I(type) in the schema specified via I(schema).
+ (This also works with PostgreSQL < 9.0.) (C(ALL_IN_SCHEMA) is available
+ for C(function) and C(partition table) from version 2.8)
+ - If I(type) is C(database), this parameter can be omitted, in which case
+ privileges are set for the database specified via I(database).
+ - 'If I(type) is I(function), colons (":") in object names will be
+ replaced with commas (needed to specify function signatures, see examples)'
+ type: str
+ aliases:
+ - obj
+ schema:
+ description:
+ - Schema that contains the database objects specified via I(objs).
+ - May only be provided if I(type) is C(table), C(sequence), C(function), C(type),
+ or C(default_privs). Defaults to C(public) in these cases.
+ - Pay attention, for embedded types when I(type=type)
+ I(schema) can be C(pg_catalog) or C(information_schema) respectively.
+ type: str
+ roles:
+ description:
+ - Comma separated list of role (user/group) names to set permissions for.
+ - The special value C(PUBLIC) can be provided instead to set permissions
+ for the implicitly defined PUBLIC group.
+ type: str
+ required: yes
+ aliases:
+ - role
+ fail_on_role:
+ version_added: '2.8'
+ description:
+ - If C(yes), fail when target role (for whom privs need to be granted) does not exist.
+ Otherwise just warn and continue.
+ default: yes
+ type: bool
+ session_role:
+ version_added: '2.8'
+ description:
+ - Switch to session_role after connecting.
+ - The specified session_role must be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though the session_role were the one that had logged in originally.
+ type: str
+ target_roles:
+ description:
+ - A list of existing role (user/group) names to set as the
+ default permissions for database objects subsequently created by them.
+ - Parameter I(target_roles) is only available with C(type=default_privs).
+ type: str
+ version_added: '2.8'
+ grant_option:
+ description:
+ - Whether C(role) may grant/revoke the specified privileges/group memberships to others.
+ - Set to C(no) to revoke GRANT OPTION, leave unspecified to make no changes.
+ - I(grant_option) only has an effect if I(state) is C(present).
+ type: bool
+ aliases:
+ - admin_option
+ host:
+ description:
+ - Database host address. If unspecified, connect via Unix socket.
+ type: str
+ aliases:
+ - login_host
+ port:
+ description:
+ - Database port to connect to.
+ type: int
+ default: 5432
+ aliases:
+ - login_port
+ unix_socket:
+ description:
+ - Path to a Unix domain socket for local connections.
+ type: str
+ aliases:
+ - login_unix_socket
+ login:
+ description:
+ - The username to authenticate with.
+ type: str
+ default: postgres
+ aliases:
+ - login_user
+ password:
+ description:
+ - The password to authenticate with.
+ type: str
+ aliases:
+ - login_password
+ ssl_mode:
+ description:
+ - Determines whether or with what priority a secure SSL TCP/IP connection will be negotiated with the server.
+ - See https://www.postgresql.org/docs/current/static/libpq-ssl.html for more information on the modes.
+ - Default of C(prefer) matches libpq default.
+ type: str
+ default: prefer
+ choices: [ allow, disable, prefer, require, verify-ca, verify-full ]
+ version_added: '2.3'
+ ca_cert:
+ description:
+ - Specifies the name of a file containing SSL certificate authority (CA) certificate(s).
+ - If the file exists, the server's certificate will be verified to be signed by one of these authorities.
+ version_added: '2.3'
+ type: str
+ aliases:
+ - ssl_rootcert
+
+notes:
+- Parameters that accept comma separated lists (I(privs), I(objs), I(roles))
+ have singular alias names (I(priv), I(obj), I(role)).
+- To revoke only C(GRANT OPTION) for a specific object, set I(state) to
+ C(present) and I(grant_option) to C(no) (see examples).
+- Note that when revoking privileges from a role R, this role may still have
+ access via privileges granted to any role R is a member of including C(PUBLIC).
+- Note that when revoking privileges from a role R, you do so as the user
+ specified via I(login). If R has been granted the same privileges by
+ another user also, R can still access database objects via these privileges.
+- When revoking privileges, C(RESTRICT) is assumed (see PostgreSQL docs).
+
+seealso:
+- module: postgresql_user
+- module: postgresql_owner
+- module: postgresql_membership
+- name: PostgreSQL privileges
+ description: General information about PostgreSQL privileges.
+ link: https://www.postgresql.org/docs/current/ddl-priv.html
+- name: PostgreSQL GRANT command reference
+ description: Complete reference of the PostgreSQL GRANT command documentation.
+ link: https://www.postgresql.org/docs/current/sql-grant.html
+- name: PostgreSQL REVOKE command reference
+ description: Complete reference of the PostgreSQL REVOKE command documentation.
+ link: https://www.postgresql.org/docs/current/sql-revoke.html
+
+extends_documentation_fragment:
+- postgres
+
+author:
+- Bernhard Weitzhofer (@b6d)
+- Tobias Birkefeld (@tcraxs)
+'''
+
+EXAMPLES = r'''
+# On database "library":
+# GRANT SELECT, INSERT, UPDATE ON TABLE public.books, public.authors
+# TO librarian, reader WITH GRANT OPTION
+- name: Grant privs to librarian and reader on database library
+ postgresql_privs:
+ database: library
+ state: present
+ privs: SELECT,INSERT,UPDATE
+ type: table
+ objs: books,authors
+ schema: public
+ roles: librarian,reader
+ grant_option: yes
+
+- name: Same as above leveraging default values
+ postgresql_privs:
+ db: library
+ privs: SELECT,INSERT,UPDATE
+ objs: books,authors
+ roles: librarian,reader
+ grant_option: yes
+
+# REVOKE GRANT OPTION FOR INSERT ON TABLE books FROM reader
+# Note that role "reader" will be *granted* INSERT privilege itself if this
+# isn't already the case (since state: present).
+- name: Revoke privs from reader
+ postgresql_privs:
+ db: library
+ state: present
+ priv: INSERT
+ obj: books
+ role: reader
+ grant_option: no
+
+# "public" is the default schema. This also works for PostgreSQL 8.x.
+- name: REVOKE INSERT, UPDATE ON ALL TABLES IN SCHEMA public FROM reader
+ postgresql_privs:
+ db: library
+ state: absent
+ privs: INSERT,UPDATE
+ objs: ALL_IN_SCHEMA
+ role: reader
+
+- name: GRANT ALL PRIVILEGES ON SCHEMA public, math TO librarian
+ postgresql_privs:
+ db: library
+ privs: ALL
+ type: schema
+ objs: public,math
+ role: librarian
+
+# Note the separation of arguments with colons.
+- name: GRANT ALL PRIVILEGES ON FUNCTION math.add(int, int) TO librarian, reader
+ postgresql_privs:
+ db: library
+ privs: ALL
+ type: function
+ obj: add(int:int)
+ schema: math
+ roles: librarian,reader
+
+# Note that group role memberships apply cluster-wide and therefore are not
+# restricted to database "library" here.
+- name: GRANT librarian, reader TO alice, bob WITH ADMIN OPTION
+ postgresql_privs:
+ db: library
+ type: group
+ objs: librarian,reader
+ roles: alice,bob
+ admin_option: yes
+
+# Note that here "db: postgres" specifies the database to connect to, not the
+# database to grant privileges on (which is specified via the "objs" param)
+- name: GRANT ALL PRIVILEGES ON DATABASE library TO librarian
+ postgresql_privs:
+ db: postgres
+ privs: ALL
+ type: database
+ obj: library
+ role: librarian
+
+# If objs is omitted for type "database", it defaults to the database
+# to which the connection is established
+- name: GRANT ALL PRIVILEGES ON DATABASE library TO librarian
+ postgresql_privs:
+ db: library
+ privs: ALL
+ type: database
+ role: librarian
+
+# Available since version 2.7
+# Objs must be set, ALL_DEFAULT to TABLES/SEQUENCES/TYPES/FUNCTIONS
+# ALL_DEFAULT works only with privs=ALL
+# For specific
+- name: ALTER DEFAULT PRIVILEGES ON DATABASE library TO librarian
+ postgresql_privs:
+ db: library
+ objs: ALL_DEFAULT
+ privs: ALL
+ type: default_privs
+ role: librarian
+ grant_option: yes
+
+# Available since version 2.7
+# Objs must be set, ALL_DEFAULT to TABLES/SEQUENCES/TYPES/FUNCTIONS
+# ALL_DEFAULT works only with privs=ALL
+# For specific
+- name: ALTER DEFAULT PRIVILEGES ON DATABASE library TO reader, step 1
+ postgresql_privs:
+ db: library
+ objs: TABLES,SEQUENCES
+ privs: SELECT
+ type: default_privs
+ role: reader
+
+- name: ALTER DEFAULT PRIVILEGES ON DATABASE library TO reader, step 2
+ postgresql_privs:
+ db: library
+ objs: TYPES
+ privs: USAGE
+ type: default_privs
+ role: reader
+
+# Available since version 2.8
+- name: GRANT ALL PRIVILEGES ON FOREIGN DATA WRAPPER fdw TO reader
+ postgresql_privs:
+ db: test
+ objs: fdw
+ privs: ALL
+ type: foreign_data_wrapper
+ role: reader
+
+# Available since version 2.10
+- name: GRANT ALL PRIVILEGES ON TYPE customtype TO reader
+ postgresql_privs:
+ db: test
+ objs: customtype
+ privs: ALL
+ type: type
+ role: reader
+
+# Available since version 2.8
+- name: GRANT ALL PRIVILEGES ON FOREIGN SERVER fdw_server TO reader
+ postgresql_privs:
+ db: test
+ objs: fdw_server
+ privs: ALL
+ type: foreign_server
+ role: reader
+
+# Available since version 2.8
+# Grant 'execute' permissions on all functions in schema 'common' to role 'caller'
+- name: GRANT EXECUTE ON ALL FUNCTIONS IN SCHEMA common TO caller
+ postgresql_privs:
+ type: function
+ state: present
+ privs: EXECUTE
+ roles: caller
+ objs: ALL_IN_SCHEMA
+ schema: common
+
+# Available since version 2.8
+# ALTER DEFAULT PRIVILEGES FOR ROLE librarian IN SCHEMA library GRANT SELECT ON TABLES TO reader
+# GRANT SELECT privileges for new TABLES objects created by librarian as
+# default to the role reader.
+# For specific
+- name: ALTER privs
+ postgresql_privs:
+ db: library
+ schema: library
+ objs: TABLES
+ privs: SELECT
+ type: default_privs
+ role: reader
+ target_roles: librarian
+
+# Available since version 2.8
+# ALTER DEFAULT PRIVILEGES FOR ROLE librarian IN SCHEMA library REVOKE SELECT ON TABLES FROM reader
+# REVOKE SELECT privileges for new TABLES objects created by librarian as
+# default from the role reader.
+# For specific
+- name: ALTER privs
+ postgresql_privs:
+ db: library
+ state: absent
+ schema: library
+ objs: TABLES
+ privs: SELECT
+ type: default_privs
+ role: reader
+ target_roles: librarian
+
+# Available since version 2.10
+- name: Grant type privileges for pg_catalog.numeric type to alice
+ postgresql_privs:
+ type: type
+ roles: alice
+ privs: ALL
+ objs: numeric
+ schema: pg_catalog
+ db: acme
+'''
+
+RETURN = r'''
+queries:
+ description: List of executed queries.
+ returned: always
+ type: list
+ sample: ['REVOKE GRANT OPTION FOR INSERT ON TABLE "books" FROM "reader";']
+ version_added: '2.8'
+'''
+
+import traceback
+
+PSYCOPG2_IMP_ERR = None
+try:
+ import psycopg2
+ import psycopg2.extensions
+except ImportError:
+ PSYCOPG2_IMP_ERR = traceback.format_exc()
+ psycopg2 = None
+
+# import module snippets
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.database import pg_quote_identifier
+from ansible.module_utils.postgres import postgres_common_argument_spec
+from ansible.module_utils._text import to_native
+
+VALID_PRIVS = frozenset(('SELECT', 'INSERT', 'UPDATE', 'DELETE', 'TRUNCATE',
+ 'REFERENCES', 'TRIGGER', 'CREATE', 'CONNECT',
+ 'TEMPORARY', 'TEMP', 'EXECUTE', 'USAGE', 'ALL', 'USAGE'))
+VALID_DEFAULT_OBJS = {'TABLES': ('ALL', 'SELECT', 'INSERT', 'UPDATE', 'DELETE', 'TRUNCATE', 'REFERENCES', 'TRIGGER'),
+ 'SEQUENCES': ('ALL', 'SELECT', 'UPDATE', 'USAGE'),
+ 'FUNCTIONS': ('ALL', 'EXECUTE'),
+ 'TYPES': ('ALL', 'USAGE')}
+
+executed_queries = []
+
+
+class Error(Exception):
+ pass
+
+
+def role_exists(module, cursor, rolname):
+ """Check user exists or not"""
+ query = "SELECT 1 FROM pg_roles WHERE rolname = '%s'" % rolname
+ try:
+ cursor.execute(query)
+ return cursor.rowcount > 0
+
+ except Exception as e:
+ module.fail_json(msg="Cannot execute SQL '%s': %s" % (query, to_native(e)))
+
+ return False
+
+
+# We don't have functools.partial in Python < 2.5
+def partial(f, *args, **kwargs):
+ """Partial function application"""
+
+ def g(*g_args, **g_kwargs):
+ new_kwargs = kwargs.copy()
+ new_kwargs.update(g_kwargs)
+ return f(*(args + g_args), **g_kwargs)
+
+ g.f = f
+ g.args = args
+ g.kwargs = kwargs
+ return g
+
+
+class Connection(object):
+ """Wrapper around a psycopg2 connection with some convenience methods"""
+
+ def __init__(self, params, module):
+ self.database = params.database
+ self.module = module
+ # To use defaults values, keyword arguments must be absent, so
+ # check which values are empty and don't include in the **kw
+ # dictionary
+ params_map = {
+ "host": "host",
+ "login": "user",
+ "password": "password",
+ "port": "port",
+ "database": "database",
+ "ssl_mode": "sslmode",
+ "ca_cert": "sslrootcert"
+ }
+
+ kw = dict((params_map[k], getattr(params, k)) for k in params_map
+ if getattr(params, k) != '' and getattr(params, k) is not None)
+
+ # If a unix_socket is specified, incorporate it here.
+ is_localhost = "host" not in kw or kw["host"] == "" or kw["host"] == "localhost"
+ if is_localhost and params.unix_socket != "":
+ kw["host"] = params.unix_socket
+
+ sslrootcert = params.ca_cert
+ if psycopg2.__version__ < '2.4.3' and sslrootcert is not None:
+ raise ValueError('psycopg2 must be at least 2.4.3 in order to user the ca_cert parameter')
+
+ self.connection = psycopg2.connect(**kw)
+ self.cursor = self.connection.cursor()
+
+ def commit(self):
+ self.connection.commit()
+
+ def rollback(self):
+ self.connection.rollback()
+
+ @property
+ def encoding(self):
+ """Connection encoding in Python-compatible form"""
+ return psycopg2.extensions.encodings[self.connection.encoding]
+
+ # Methods for querying database objects
+
+ # PostgreSQL < 9.0 doesn't support "ALL TABLES IN SCHEMA schema"-like
+ # phrases in GRANT or REVOKE statements, therefore alternative methods are
+ # provided here.
+
+ def schema_exists(self, schema):
+ query = """SELECT count(*)
+ FROM pg_catalog.pg_namespace WHERE nspname = %s"""
+ self.cursor.execute(query, (schema,))
+ return self.cursor.fetchone()[0] > 0
+
+ def get_all_tables_in_schema(self, schema):
+ if not self.schema_exists(schema):
+ raise Error('Schema "%s" does not exist.' % schema)
+ query = """SELECT relname
+ FROM pg_catalog.pg_class c
+ JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
+ WHERE nspname = %s AND relkind in ('r', 'v', 'm', 'p')"""
+ self.cursor.execute(query, (schema,))
+ return [t[0] for t in self.cursor.fetchall()]
+
+ def get_all_sequences_in_schema(self, schema):
+ if not self.schema_exists(schema):
+ raise Error('Schema "%s" does not exist.' % schema)
+ query = """SELECT relname
+ FROM pg_catalog.pg_class c
+ JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
+ WHERE nspname = %s AND relkind = 'S'"""
+ self.cursor.execute(query, (schema,))
+ return [t[0] for t in self.cursor.fetchall()]
+
+ def get_all_functions_in_schema(self, schema):
+ if not self.schema_exists(schema):
+ raise Error('Schema "%s" does not exist.' % schema)
+ query = """SELECT p.proname, oidvectortypes(p.proargtypes)
+ FROM pg_catalog.pg_proc p
+ JOIN pg_namespace n ON n.oid = p.pronamespace
+ WHERE nspname = %s"""
+ self.cursor.execute(query, (schema,))
+ return ["%s(%s)" % (t[0], t[1]) for t in self.cursor.fetchall()]
+
+ # Methods for getting access control lists and group membership info
+
+ # To determine whether anything has changed after granting/revoking
+ # privileges, we compare the access control lists of the specified database
+ # objects before and afterwards. Python's list/string comparison should
+ # suffice for change detection, we should not actually have to parse ACLs.
+ # The same should apply to group membership information.
+
+ def get_table_acls(self, schema, tables):
+ query = """SELECT relacl
+ FROM pg_catalog.pg_class c
+ JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
+ WHERE nspname = %s AND relkind in ('r','p','v','m') AND relname = ANY (%s)
+ ORDER BY relname"""
+ self.cursor.execute(query, (schema, tables))
+ return [t[0] for t in self.cursor.fetchall()]
+
+ def get_sequence_acls(self, schema, sequences):
+ query = """SELECT relacl
+ FROM pg_catalog.pg_class c
+ JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
+ WHERE nspname = %s AND relkind = 'S' AND relname = ANY (%s)
+ ORDER BY relname"""
+ self.cursor.execute(query, (schema, sequences))
+ return [t[0] for t in self.cursor.fetchall()]
+
+ def get_function_acls(self, schema, function_signatures):
+ funcnames = [f.split('(', 1)[0] for f in function_signatures]
+ query = """SELECT proacl
+ FROM pg_catalog.pg_proc p
+ JOIN pg_catalog.pg_namespace n ON n.oid = p.pronamespace
+ WHERE nspname = %s AND proname = ANY (%s)
+ ORDER BY proname, proargtypes"""
+ self.cursor.execute(query, (schema, funcnames))
+ return [t[0] for t in self.cursor.fetchall()]
+
+ def get_schema_acls(self, schemas):
+ query = """SELECT nspacl FROM pg_catalog.pg_namespace
+ WHERE nspname = ANY (%s) ORDER BY nspname"""
+ self.cursor.execute(query, (schemas,))
+ return [t[0] for t in self.cursor.fetchall()]
+
+ def get_language_acls(self, languages):
+ query = """SELECT lanacl FROM pg_catalog.pg_language
+ WHERE lanname = ANY (%s) ORDER BY lanname"""
+ self.cursor.execute(query, (languages,))
+ return [t[0] for t in self.cursor.fetchall()]
+
+ def get_tablespace_acls(self, tablespaces):
+ query = """SELECT spcacl FROM pg_catalog.pg_tablespace
+ WHERE spcname = ANY (%s) ORDER BY spcname"""
+ self.cursor.execute(query, (tablespaces,))
+ return [t[0] for t in self.cursor.fetchall()]
+
+ def get_database_acls(self, databases):
+ query = """SELECT datacl FROM pg_catalog.pg_database
+ WHERE datname = ANY (%s) ORDER BY datname"""
+ self.cursor.execute(query, (databases,))
+ return [t[0] for t in self.cursor.fetchall()]
+
+ def get_group_memberships(self, groups):
+ query = """SELECT roleid, grantor, member, admin_option
+ FROM pg_catalog.pg_auth_members am
+ JOIN pg_catalog.pg_roles r ON r.oid = am.roleid
+ WHERE r.rolname = ANY(%s)
+ ORDER BY roleid, grantor, member"""
+ self.cursor.execute(query, (groups,))
+ return self.cursor.fetchall()
+
+ def get_default_privs(self, schema, *args):
+ query = """SELECT defaclacl
+ FROM pg_default_acl a
+ JOIN pg_namespace b ON a.defaclnamespace=b.oid
+ WHERE b.nspname = %s;"""
+ self.cursor.execute(query, (schema,))
+ return [t[0] for t in self.cursor.fetchall()]
+
+ def get_foreign_data_wrapper_acls(self, fdws):
+ query = """SELECT fdwacl FROM pg_catalog.pg_foreign_data_wrapper
+ WHERE fdwname = ANY (%s) ORDER BY fdwname"""
+ self.cursor.execute(query, (fdws,))
+ return [t[0] for t in self.cursor.fetchall()]
+
+ def get_foreign_server_acls(self, fs):
+ query = """SELECT srvacl FROM pg_catalog.pg_foreign_server
+ WHERE srvname = ANY (%s) ORDER BY srvname"""
+ self.cursor.execute(query, (fs,))
+ return [t[0] for t in self.cursor.fetchall()]
+
+ def get_type_acls(self, schema, types):
+ query = """SELECT t.typacl FROM pg_catalog.pg_type t
+ JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace
+ WHERE n.nspname = %s AND t.typname = ANY (%s) ORDER BY typname"""
+ self.cursor.execute(query, (schema, types))
+ return [t[0] for t in self.cursor.fetchall()]
+
+ # Manipulating privileges
+
+ def manipulate_privs(self, obj_type, privs, objs, roles, target_roles,
+ state, grant_option, schema_qualifier=None, fail_on_role=True):
+ """Manipulate database object privileges.
+
+ :param obj_type: Type of database object to grant/revoke
+ privileges for.
+ :param privs: Either a list of privileges to grant/revoke
+ or None if type is "group".
+ :param objs: List of database objects to grant/revoke
+ privileges for.
+ :param roles: Either a list of role names or "PUBLIC"
+ for the implicitly defined "PUBLIC" group
+ :param target_roles: List of role names to grant/revoke
+ default privileges as.
+ :param state: "present" to grant privileges, "absent" to revoke.
+ :param grant_option: Only for state "present": If True, set
+ grant/admin option. If False, revoke it.
+ If None, don't change grant option.
+ :param schema_qualifier: Some object types ("TABLE", "SEQUENCE",
+ "FUNCTION") must be qualified by schema.
+ Ignored for other Types.
+ """
+ # get_status: function to get current status
+ if obj_type == 'table':
+ get_status = partial(self.get_table_acls, schema_qualifier)
+ elif obj_type == 'sequence':
+ get_status = partial(self.get_sequence_acls, schema_qualifier)
+ elif obj_type == 'function':
+ get_status = partial(self.get_function_acls, schema_qualifier)
+ elif obj_type == 'schema':
+ get_status = self.get_schema_acls
+ elif obj_type == 'language':
+ get_status = self.get_language_acls
+ elif obj_type == 'tablespace':
+ get_status = self.get_tablespace_acls
+ elif obj_type == 'database':
+ get_status = self.get_database_acls
+ elif obj_type == 'group':
+ get_status = self.get_group_memberships
+ elif obj_type == 'default_privs':
+ get_status = partial(self.get_default_privs, schema_qualifier)
+ elif obj_type == 'foreign_data_wrapper':
+ get_status = self.get_foreign_data_wrapper_acls
+ elif obj_type == 'foreign_server':
+ get_status = self.get_foreign_server_acls
+ elif obj_type == 'type':
+ get_status = partial(self.get_type_acls, schema_qualifier)
+ else:
+ raise Error('Unsupported database object type "%s".' % obj_type)
+
+ # Return False (nothing has changed) if there are no objs to work on.
+ if not objs:
+ return False
+
+ # obj_ids: quoted db object identifiers (sometimes schema-qualified)
+ if obj_type == 'function':
+ obj_ids = []
+ for obj in objs:
+ try:
+ f, args = obj.split('(', 1)
+ except Exception:
+ raise Error('Illegal function signature: "%s".' % obj)
+ obj_ids.append('"%s"."%s"(%s' % (schema_qualifier, f, args))
+ elif obj_type in ['table', 'sequence', 'type']:
+ obj_ids = ['"%s"."%s"' % (schema_qualifier, o) for o in objs]
+ else:
+ obj_ids = ['"%s"' % o for o in objs]
+
+ # set_what: SQL-fragment specifying what to set for the target roles:
+ # Either group membership or privileges on objects of a certain type
+ if obj_type == 'group':
+ set_what = ','.join('"%s"' % i for i in obj_ids)
+ elif obj_type == 'default_privs':
+ # We don't want privs to be quoted here
+ set_what = ','.join(privs)
+ else:
+ # function types are already quoted above
+ if obj_type != 'function':
+ obj_ids = [pg_quote_identifier(i, 'table') for i in obj_ids]
+ # Note: obj_type has been checked against a set of string literals
+ # and privs was escaped when it was parsed
+ # Note: Underscores are replaced with spaces to support multi-word obj_type
+ set_what = '%s ON %s %s' % (','.join(privs), obj_type.replace('_', ' '),
+ ','.join(obj_ids))
+
+ # for_whom: SQL-fragment specifying for whom to set the above
+ if roles == 'PUBLIC':
+ for_whom = 'PUBLIC'
+ else:
+ for_whom = []
+ for r in roles:
+ if not role_exists(self.module, self.cursor, r):
+ if fail_on_role:
+ self.module.fail_json(msg="Role '%s' does not exist" % r.strip())
+
+ else:
+ self.module.warn("Role '%s' does not exist, pass it" % r.strip())
+ else:
+ for_whom.append('"%s"' % r)
+
+ if not for_whom:
+ return False
+
+ for_whom = ','.join(for_whom)
+
+ # as_who:
+ as_who = None
+ if target_roles:
+ as_who = ','.join('"%s"' % r for r in target_roles)
+
+ status_before = get_status(objs)
+
+ query = QueryBuilder(state) \
+ .for_objtype(obj_type) \
+ .with_grant_option(grant_option) \
+ .for_whom(for_whom) \
+ .as_who(as_who) \
+ .for_schema(schema_qualifier) \
+ .set_what(set_what) \
+ .for_objs(objs) \
+ .build()
+
+ executed_queries.append(query)
+ self.cursor.execute(query)
+ status_after = get_status(objs)
+
+ def nonesorted(e):
+ # For python 3+ that can fail trying
+ # to compare NoneType elements by sort method.
+ if e is None:
+ return ''
+ return e
+
+ status_before.sort(key=nonesorted)
+ status_after.sort(key=nonesorted)
+ return status_before != status_after
+
+
+class QueryBuilder(object):
+ def __init__(self, state):
+ self._grant_option = None
+ self._for_whom = None
+ self._as_who = None
+ self._set_what = None
+ self._obj_type = None
+ self._state = state
+ self._schema = None
+ self._objs = None
+ self.query = []
+
+ def for_objs(self, objs):
+ self._objs = objs
+ return self
+
+ def for_schema(self, schema):
+ self._schema = schema
+ return self
+
+ def with_grant_option(self, option):
+ self._grant_option = option
+ return self
+
+ def for_whom(self, who):
+ self._for_whom = who
+ return self
+
+ def as_who(self, target_roles):
+ self._as_who = target_roles
+ return self
+
+ def set_what(self, what):
+ self._set_what = what
+ return self
+
+ def for_objtype(self, objtype):
+ self._obj_type = objtype
+ return self
+
+ def build(self):
+ if self._state == 'present':
+ self.build_present()
+ elif self._state == 'absent':
+ self.build_absent()
+ else:
+ self.build_absent()
+ return '\n'.join(self.query)
+
+ def add_default_revoke(self):
+ for obj in self._objs:
+ if self._as_who:
+ self.query.append(
+ 'ALTER DEFAULT PRIVILEGES FOR ROLE {0} IN SCHEMA {1} REVOKE ALL ON {2} FROM {3};'.format(self._as_who,
+ self._schema, obj,
+ self._for_whom))
+ else:
+ self.query.append(
+ 'ALTER DEFAULT PRIVILEGES IN SCHEMA {0} REVOKE ALL ON {1} FROM {2};'.format(self._schema, obj,
+ self._for_whom))
+
+ def add_grant_option(self):
+ if self._grant_option:
+ if self._obj_type == 'group':
+ self.query[-1] += ' WITH ADMIN OPTION;'
+ else:
+ self.query[-1] += ' WITH GRANT OPTION;'
+ else:
+ self.query[-1] += ';'
+ if self._obj_type == 'group':
+ self.query.append('REVOKE ADMIN OPTION FOR {0} FROM {1};'.format(self._set_what, self._for_whom))
+ elif not self._obj_type == 'default_privs':
+ self.query.append('REVOKE GRANT OPTION FOR {0} FROM {1};'.format(self._set_what, self._for_whom))
+
+ def add_default_priv(self):
+ for obj in self._objs:
+ if self._as_who:
+ self.query.append(
+ 'ALTER DEFAULT PRIVILEGES FOR ROLE {0} IN SCHEMA {1} GRANT {2} ON {3} TO {4}'.format(self._as_who,
+ self._schema,
+ self._set_what,
+ obj,
+ self._for_whom))
+ else:
+ self.query.append(
+ 'ALTER DEFAULT PRIVILEGES IN SCHEMA {0} GRANT {1} ON {2} TO {3}'.format(self._schema,
+ self._set_what,
+ obj,
+ self._for_whom))
+ self.add_grant_option()
+ if self._as_who:
+ self.query.append(
+ 'ALTER DEFAULT PRIVILEGES FOR ROLE {0} IN SCHEMA {1} GRANT USAGE ON TYPES TO {2}'.format(self._as_who,
+ self._schema,
+ self._for_whom))
+ else:
+ self.query.append(
+ 'ALTER DEFAULT PRIVILEGES IN SCHEMA {0} GRANT USAGE ON TYPES TO {1}'.format(self._schema, self._for_whom))
+ self.add_grant_option()
+
+ def build_present(self):
+ if self._obj_type == 'default_privs':
+ self.add_default_revoke()
+ self.add_default_priv()
+ else:
+ self.query.append('GRANT {0} TO {1}'.format(self._set_what, self._for_whom))
+ self.add_grant_option()
+
+ def build_absent(self):
+ if self._obj_type == 'default_privs':
+ self.query = []
+ for obj in ['TABLES', 'SEQUENCES', 'TYPES']:
+ if self._as_who:
+ self.query.append(
+ 'ALTER DEFAULT PRIVILEGES FOR ROLE {0} IN SCHEMA {1} REVOKE ALL ON {2} FROM {3};'.format(self._as_who,
+ self._schema, obj,
+ self._for_whom))
+ else:
+ self.query.append(
+ 'ALTER DEFAULT PRIVILEGES IN SCHEMA {0} REVOKE ALL ON {1} FROM {2};'.format(self._schema, obj,
+ self._for_whom))
+ else:
+ self.query.append('REVOKE {0} FROM {1};'.format(self._set_what, self._for_whom))
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ database=dict(required=True, aliases=['db', 'login_db']),
+ state=dict(default='present', choices=['present', 'absent']),
+ privs=dict(required=False, aliases=['priv']),
+ type=dict(default='table',
+ choices=['table',
+ 'sequence',
+ 'function',
+ 'database',
+ 'schema',
+ 'language',
+ 'tablespace',
+ 'group',
+ 'default_privs',
+ 'foreign_data_wrapper',
+ 'foreign_server',
+ 'type', ]),
+ objs=dict(required=False, aliases=['obj']),
+ schema=dict(required=False),
+ roles=dict(required=True, aliases=['role']),
+ session_role=dict(required=False),
+ target_roles=dict(required=False),
+ grant_option=dict(required=False, type='bool',
+ aliases=['admin_option']),
+ host=dict(default='', aliases=['login_host']),
+ unix_socket=dict(default='', aliases=['login_unix_socket']),
+ login=dict(default='postgres', aliases=['login_user']),
+ password=dict(default='', aliases=['login_password'], no_log=True),
+ fail_on_role=dict(type='bool', default=True),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ fail_on_role = module.params['fail_on_role']
+
+ # Create type object as namespace for module params
+ p = type('Params', (), module.params)
+ # param "schema": default, allowed depends on param "type"
+ if p.type in ['table', 'sequence', 'function', 'type', 'default_privs']:
+ p.schema = p.schema or 'public'
+ elif p.schema:
+ module.fail_json(msg='Argument "schema" is not allowed '
+ 'for type "%s".' % p.type)
+
+ # param "objs": default, required depends on param "type"
+ if p.type == 'database':
+ p.objs = p.objs or p.database
+ elif not p.objs:
+ module.fail_json(msg='Argument "objs" is required '
+ 'for type "%s".' % p.type)
+
+ # param "privs": allowed, required depends on param "type"
+ if p.type == 'group':
+ if p.privs:
+ module.fail_json(msg='Argument "privs" is not allowed '
+ 'for type "group".')
+ elif not p.privs:
+ module.fail_json(msg='Argument "privs" is required '
+ 'for type "%s".' % p.type)
+
+ # Connect to Database
+ if not psycopg2:
+ module.fail_json(msg=missing_required_lib('psycopg2'), exception=PSYCOPG2_IMP_ERR)
+ try:
+ conn = Connection(p, module)
+ except psycopg2.Error as e:
+ module.fail_json(msg='Could not connect to database: %s' % to_native(e), exception=traceback.format_exc())
+ except TypeError as e:
+ if 'sslrootcert' in e.args[0]:
+ module.fail_json(msg='Postgresql server must be at least version 8.4 to support sslrootcert')
+ module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc())
+ except ValueError as e:
+ # We raise this when the psycopg library is too old
+ module.fail_json(msg=to_native(e))
+
+ if p.session_role:
+ try:
+ conn.cursor.execute('SET ROLE "%s"' % p.session_role)
+ except Exception as e:
+ module.fail_json(msg="Could not switch to role %s: %s" % (p.session_role, to_native(e)), exception=traceback.format_exc())
+
+ try:
+ # privs
+ if p.privs:
+ privs = frozenset(pr.upper() for pr in p.privs.split(','))
+ if not privs.issubset(VALID_PRIVS):
+ module.fail_json(msg='Invalid privileges specified: %s' % privs.difference(VALID_PRIVS))
+ else:
+ privs = None
+ # objs:
+ if p.type == 'table' and p.objs == 'ALL_IN_SCHEMA':
+ objs = conn.get_all_tables_in_schema(p.schema)
+ elif p.type == 'sequence' and p.objs == 'ALL_IN_SCHEMA':
+ objs = conn.get_all_sequences_in_schema(p.schema)
+ elif p.type == 'function' and p.objs == 'ALL_IN_SCHEMA':
+ objs = conn.get_all_functions_in_schema(p.schema)
+ elif p.type == 'default_privs':
+ if p.objs == 'ALL_DEFAULT':
+ objs = frozenset(VALID_DEFAULT_OBJS.keys())
+ else:
+ objs = frozenset(obj.upper() for obj in p.objs.split(','))
+ if not objs.issubset(VALID_DEFAULT_OBJS):
+ module.fail_json(
+ msg='Invalid Object set specified: %s' % objs.difference(VALID_DEFAULT_OBJS.keys()))
+ # Again, do we have valid privs specified for object type:
+ valid_objects_for_priv = frozenset(obj for obj in objs if privs.issubset(VALID_DEFAULT_OBJS[obj]))
+ if not valid_objects_for_priv == objs:
+ module.fail_json(
+ msg='Invalid priv specified. Valid object for priv: {0}. Objects: {1}'.format(
+ valid_objects_for_priv, objs))
+ else:
+ objs = p.objs.split(',')
+
+ # function signatures are encoded using ':' to separate args
+ if p.type == 'function':
+ objs = [obj.replace(':', ',') for obj in objs]
+
+ # roles
+ if p.roles == 'PUBLIC':
+ roles = 'PUBLIC'
+ else:
+ roles = p.roles.split(',')
+
+ if len(roles) == 1 and not role_exists(module, conn.cursor, roles[0]):
+ module.exit_json(changed=False)
+
+ if fail_on_role:
+ module.fail_json(msg="Role '%s' does not exist" % roles[0].strip())
+
+ else:
+ module.warn("Role '%s' does not exist, nothing to do" % roles[0].strip())
+
+ # check if target_roles is set with type: default_privs
+ if p.target_roles and not p.type == 'default_privs':
+ module.warn('"target_roles" will be ignored '
+ 'Argument "type: default_privs" is required for usage of "target_roles".')
+
+ # target roles
+ if p.target_roles:
+ target_roles = p.target_roles.split(',')
+ else:
+ target_roles = None
+
+ changed = conn.manipulate_privs(
+ obj_type=p.type,
+ privs=privs,
+ objs=objs,
+ roles=roles,
+ target_roles=target_roles,
+ state=p.state,
+ grant_option=p.grant_option,
+ schema_qualifier=p.schema,
+ fail_on_role=fail_on_role,
+ )
+
+ except Error as e:
+ conn.rollback()
+ module.fail_json(msg=e.message, exception=traceback.format_exc())
+
+ except psycopg2.Error as e:
+ conn.rollback()
+ module.fail_json(msg=to_native(e.message))
+
+ if module.check_mode:
+ conn.rollback()
+ else:
+ conn.commit()
+ module.exit_json(changed=changed, queries=executed_queries)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/postgresql_query.py b/test/support/integration/plugins/modules/postgresql_query.py
new file mode 100644
index 0000000000..18d63e332a
--- /dev/null
+++ b/test/support/integration/plugins/modules/postgresql_query.py
@@ -0,0 +1,364 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Felix Archambault
+# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ 'metadata_version': '1.1',
+ 'supported_by': 'community',
+ 'status': ['preview']
+}
+
+DOCUMENTATION = r'''
+---
+module: postgresql_query
+short_description: Run PostgreSQL queries
+description:
+- Runs arbitrary PostgreSQL queries.
+- Can run queries from SQL script files.
+- Does not run against backup files. Use M(postgresql_db) with I(state=restore)
+ to run queries on files made by pg_dump/pg_dumpall utilities.
+version_added: '2.8'
+options:
+ query:
+ description:
+ - SQL query to run. Variables can be escaped with psycopg2 syntax
+ U(http://initd.org/psycopg/docs/usage.html).
+ type: str
+ positional_args:
+ description:
+ - List of values to be passed as positional arguments to the query.
+ When the value is a list, it will be converted to PostgreSQL array.
+ - Mutually exclusive with I(named_args).
+ type: list
+ elements: raw
+ named_args:
+ description:
+ - Dictionary of key-value arguments to pass to the query.
+ When the value is a list, it will be converted to PostgreSQL array.
+ - Mutually exclusive with I(positional_args).
+ type: dict
+ path_to_script:
+ description:
+ - Path to SQL script on the remote host.
+ - Returns result of the last query in the script.
+ - Mutually exclusive with I(query).
+ type: path
+ session_role:
+ description:
+ - Switch to session_role after connecting. The specified session_role must
+ be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though
+ the session_role were the one that had logged in originally.
+ type: str
+ db:
+ description:
+ - Name of database to connect to and run queries against.
+ type: str
+ aliases:
+ - login_db
+ autocommit:
+ description:
+ - Execute in autocommit mode when the query can't be run inside a transaction block
+ (e.g., VACUUM).
+ - Mutually exclusive with I(check_mode).
+ type: bool
+ default: no
+ version_added: '2.9'
+ encoding:
+ description:
+ - Set the client encoding for the current session (e.g. C(UTF-8)).
+ - The default is the encoding defined by the database.
+ type: str
+ version_added: '2.10'
+seealso:
+- module: postgresql_db
+author:
+- Felix Archambault (@archf)
+- Andrew Klychkov (@Andersson007)
+- Will Rouesnel (@wrouesnel)
+extends_documentation_fragment: postgres
+'''
+
+EXAMPLES = r'''
+- name: Simple select query to acme db
+ postgresql_query:
+ db: acme
+ query: SELECT version()
+
+- name: Select query to db acme with positional arguments and non-default credentials
+ postgresql_query:
+ db: acme
+ login_user: django
+ login_password: mysecretpass
+ query: SELECT * FROM acme WHERE id = %s AND story = %s
+ positional_args:
+ - 1
+ - test
+
+- name: Select query to test_db with named_args
+ postgresql_query:
+ db: test_db
+ query: SELECT * FROM test WHERE id = %(id_val)s AND story = %(story_val)s
+ named_args:
+ id_val: 1
+ story_val: test
+
+- name: Insert query to test_table in db test_db
+ postgresql_query:
+ db: test_db
+ query: INSERT INTO test_table (id, story) VALUES (2, 'my_long_story')
+
+- name: Run queries from SQL script using UTF-8 client encoding for session
+ postgresql_query:
+ db: test_db
+ path_to_script: /var/lib/pgsql/test.sql
+ positional_args:
+ - 1
+ encoding: UTF-8
+
+- name: Example of using autocommit parameter
+ postgresql_query:
+ db: test_db
+ query: VACUUM
+ autocommit: yes
+
+- name: >
+ Insert data to the column of array type using positional_args.
+ Note that we use quotes here, the same as for passing JSON, etc.
+ postgresql_query:
+ query: INSERT INTO test_table (array_column) VALUES (%s)
+ positional_args:
+ - '{1,2,3}'
+
+# Pass list and string vars as positional_args
+- name: Set vars
+ set_fact:
+ my_list:
+ - 1
+ - 2
+ - 3
+ my_arr: '{1, 2, 3}'
+
+- name: Select from test table by passing positional_args as arrays
+ postgresql_query:
+ query: SELECT * FROM test_array_table WHERE arr_col1 = %s AND arr_col2 = %s
+ positional_args:
+ - '{{ my_list }}'
+ - '{{ my_arr|string }}'
+'''
+
+RETURN = r'''
+query:
+ description: Query that was tried to be executed.
+ returned: always
+ type: str
+ sample: 'SELECT * FROM bar'
+statusmessage:
+ description: Attribute containing the message returned by the command.
+ returned: always
+ type: str
+ sample: 'INSERT 0 1'
+query_result:
+ description:
+ - List of dictionaries in column:value form representing returned rows.
+ returned: changed
+ type: list
+ sample: [{"Column": "Value1"},{"Column": "Value2"}]
+rowcount:
+ description: Number of affected rows.
+ returned: changed
+ type: int
+ sample: 5
+'''
+
+try:
+ from psycopg2 import ProgrammingError as Psycopg2ProgrammingError
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # it is needed for checking 'no result to fetch' in main(),
+ # psycopg2 availability will be checked by connect_to_db() into
+ # ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.postgres import (
+ connect_to_db,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+from ansible.module_utils._text import to_native
+from ansible.module_utils.six import iteritems
+
+
+# ===========================================
+# Module execution.
+#
+
+def list_to_pg_array(elem):
+ """Convert the passed list to PostgreSQL array
+ represented as a string.
+
+ Args:
+ elem (list): List that needs to be converted.
+
+ Returns:
+ elem (str): String representation of PostgreSQL array.
+ """
+ elem = str(elem).strip('[]')
+ elem = '{' + elem + '}'
+ return elem
+
+
+def convert_elements_to_pg_arrays(obj):
+ """Convert list elements of the passed object
+ to PostgreSQL arrays represented as strings.
+
+ Args:
+ obj (dict or list): Object whose elements need to be converted.
+
+ Returns:
+ obj (dict or list): Object with converted elements.
+ """
+ if isinstance(obj, dict):
+ for (key, elem) in iteritems(obj):
+ if isinstance(elem, list):
+ obj[key] = list_to_pg_array(elem)
+
+ elif isinstance(obj, list):
+ for i, elem in enumerate(obj):
+ if isinstance(elem, list):
+ obj[i] = list_to_pg_array(elem)
+
+ return obj
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ query=dict(type='str'),
+ db=dict(type='str', aliases=['login_db']),
+ positional_args=dict(type='list', elements='raw'),
+ named_args=dict(type='dict'),
+ session_role=dict(type='str'),
+ path_to_script=dict(type='path'),
+ autocommit=dict(type='bool', default=False),
+ encoding=dict(type='str'),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=(('positional_args', 'named_args'),),
+ supports_check_mode=True,
+ )
+
+ query = module.params["query"]
+ positional_args = module.params["positional_args"]
+ named_args = module.params["named_args"]
+ path_to_script = module.params["path_to_script"]
+ autocommit = module.params["autocommit"]
+ encoding = module.params["encoding"]
+
+ if autocommit and module.check_mode:
+ module.fail_json(msg="Using autocommit is mutually exclusive with check_mode")
+
+ if path_to_script and query:
+ module.fail_json(msg="path_to_script is mutually exclusive with query")
+
+ if positional_args:
+ positional_args = convert_elements_to_pg_arrays(positional_args)
+
+ elif named_args:
+ named_args = convert_elements_to_pg_arrays(named_args)
+
+ if path_to_script:
+ try:
+ with open(path_to_script, 'rb') as f:
+ query = to_native(f.read())
+ except Exception as e:
+ module.fail_json(msg="Cannot read file '%s' : %s" % (path_to_script, to_native(e)))
+
+ conn_params = get_conn_params(module, module.params)
+ db_connection = connect_to_db(module, conn_params, autocommit=autocommit)
+ if encoding is not None:
+ db_connection.set_client_encoding(encoding)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ # Prepare args:
+ if module.params.get("positional_args"):
+ arguments = module.params["positional_args"]
+ elif module.params.get("named_args"):
+ arguments = module.params["named_args"]
+ else:
+ arguments = None
+
+ # Set defaults:
+ changed = False
+
+ # Execute query:
+ try:
+ cursor.execute(query, arguments)
+ except Exception as e:
+ if not autocommit:
+ db_connection.rollback()
+
+ cursor.close()
+ db_connection.close()
+ module.fail_json(msg="Cannot execute SQL '%s' %s: %s" % (query, arguments, to_native(e)))
+
+ statusmessage = cursor.statusmessage
+ rowcount = cursor.rowcount
+
+ try:
+ query_result = [dict(row) for row in cursor.fetchall()]
+ except Psycopg2ProgrammingError as e:
+ if to_native(e) == 'no results to fetch':
+ query_result = {}
+
+ except Exception as e:
+ module.fail_json(msg="Cannot fetch rows from cursor: %s" % to_native(e))
+
+ if 'SELECT' not in statusmessage:
+ if 'UPDATE' in statusmessage or 'INSERT' in statusmessage or 'DELETE' in statusmessage:
+ s = statusmessage.split()
+ if len(s) == 3:
+ if statusmessage.split()[2] != '0':
+ changed = True
+
+ elif len(s) == 2:
+ if statusmessage.split()[1] != '0':
+ changed = True
+
+ else:
+ changed = True
+
+ else:
+ changed = True
+
+ if module.check_mode:
+ db_connection.rollback()
+ else:
+ if not autocommit:
+ db_connection.commit()
+
+ kw = dict(
+ changed=changed,
+ query=cursor.query,
+ statusmessage=statusmessage,
+ query_result=query_result,
+ rowcount=rowcount if rowcount >= 0 else 0,
+ )
+
+ cursor.close()
+ db_connection.close()
+
+ module.exit_json(**kw)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/postgresql_set.py b/test/support/integration/plugins/modules/postgresql_set.py
new file mode 100644
index 0000000000..cfbdae642b
--- /dev/null
+++ b/test/support/integration/plugins/modules/postgresql_set.py
@@ -0,0 +1,434 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ 'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'
+}
+
+DOCUMENTATION = r'''
+---
+module: postgresql_set
+short_description: Change a PostgreSQL server configuration parameter
+description:
+ - Allows to change a PostgreSQL server configuration parameter.
+ - The module uses ALTER SYSTEM command and applies changes by reload server configuration.
+ - ALTER SYSTEM is used for changing server configuration parameters across the entire database cluster.
+ - It can be more convenient and safe than the traditional method of manually editing the postgresql.conf file.
+ - ALTER SYSTEM writes the given parameter setting to the $PGDATA/postgresql.auto.conf file,
+ which is read in addition to postgresql.conf.
+ - The module allows to reset parameter to boot_val (cluster initial value) by I(reset=yes) or remove parameter
+ string from postgresql.auto.conf and reload I(value=default) (for settings with postmaster context restart is required).
+ - After change you can see in the ansible output the previous and
+ the new parameter value and other information using returned values and M(debug) module.
+version_added: '2.8'
+options:
+ name:
+ description:
+ - Name of PostgreSQL server parameter.
+ type: str
+ required: true
+ value:
+ description:
+ - Parameter value to set.
+ - To remove parameter string from postgresql.auto.conf and
+ reload the server configuration you must pass I(value=default).
+ With I(value=default) the playbook always returns changed is true.
+ type: str
+ reset:
+ description:
+ - Restore parameter to initial state (boot_val). Mutually exclusive with I(value).
+ type: bool
+ default: false
+ session_role:
+ description:
+ - Switch to session_role after connecting. The specified session_role must
+ be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though
+ the session_role were the one that had logged in originally.
+ type: str
+ db:
+ description:
+ - Name of database to connect.
+ type: str
+ aliases:
+ - login_db
+notes:
+- Supported version of PostgreSQL is 9.4 and later.
+- Pay attention, change setting with 'postmaster' context can return changed is true
+ when actually nothing changes because the same value may be presented in
+ several different form, for example, 1024MB, 1GB, etc. However in pg_settings
+ system view it can be defined like 131072 number of 8kB pages.
+ The final check of the parameter value cannot compare it because the server was
+ not restarted and the value in pg_settings is not updated yet.
+- For some parameters restart of PostgreSQL server is required.
+ See official documentation U(https://www.postgresql.org/docs/current/view-pg-settings.html).
+seealso:
+- module: postgresql_info
+- name: PostgreSQL server configuration
+ description: General information about PostgreSQL server configuration.
+ link: https://www.postgresql.org/docs/current/runtime-config.html
+- name: PostgreSQL view pg_settings reference
+ description: Complete reference of the pg_settings view documentation.
+ link: https://www.postgresql.org/docs/current/view-pg-settings.html
+- name: PostgreSQL ALTER SYSTEM command reference
+ description: Complete reference of the ALTER SYSTEM command documentation.
+ link: https://www.postgresql.org/docs/current/sql-altersystem.html
+author:
+- Andrew Klychkov (@Andersson007)
+extends_documentation_fragment: postgres
+'''
+
+EXAMPLES = r'''
+- name: Restore wal_keep_segments parameter to initial state
+ postgresql_set:
+ name: wal_keep_segments
+ reset: yes
+
+# Set work_mem parameter to 32MB and show what's been changed and restart is required or not
+# (output example: "msg": "work_mem 4MB >> 64MB restart_req: False")
+- name: Set work mem parameter
+ postgresql_set:
+ name: work_mem
+ value: 32mb
+ register: set
+
+- debug:
+ msg: "{{ set.name }} {{ set.prev_val_pretty }} >> {{ set.value_pretty }} restart_req: {{ set.restart_required }}"
+ when: set.changed
+# Ensure that the restart of PostgreSQL server must be required for some parameters.
+# In this situation you see the same parameter in prev_val and value_prettyue, but 'changed=True'
+# (If you passed the value that was different from the current server setting).
+
+- name: Set log_min_duration_statement parameter to 1 second
+ postgresql_set:
+ name: log_min_duration_statement
+ value: 1s
+
+- name: Set wal_log_hints parameter to default value (remove parameter from postgresql.auto.conf)
+ postgresql_set:
+ name: wal_log_hints
+ value: default
+'''
+
+RETURN = r'''
+name:
+ description: Name of PostgreSQL server parameter.
+ returned: always
+ type: str
+ sample: 'shared_buffers'
+restart_required:
+ description: Information about parameter current state.
+ returned: always
+ type: bool
+ sample: true
+prev_val_pretty:
+ description: Information about previous state of the parameter.
+ returned: always
+ type: str
+ sample: '4MB'
+value_pretty:
+ description: Information about current state of the parameter.
+ returned: always
+ type: str
+ sample: '64MB'
+value:
+ description:
+ - Dictionary that contains the current parameter value (at the time of playbook finish).
+ - Pay attention that for real change some parameters restart of PostgreSQL server is required.
+ - Returns the current value in the check mode.
+ returned: always
+ type: dict
+ sample: { "value": 67108864, "unit": "b" }
+context:
+ description:
+ - PostgreSQL setting context.
+ returned: always
+ type: str
+ sample: user
+'''
+
+try:
+ from psycopg2.extras import DictCursor
+except Exception:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from copy import deepcopy
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.postgres import (
+ connect_to_db,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+from ansible.module_utils._text import to_native
+
+PG_REQ_VER = 90400
+
+# To allow to set value like 1mb instead of 1MB, etc:
+POSSIBLE_SIZE_UNITS = ("mb", "gb", "tb")
+
+# ===========================================
+# PostgreSQL module specific support methods.
+#
+
+
+def param_get(cursor, module, name):
+ query = ("SELECT name, setting, unit, context, boot_val "
+ "FROM pg_settings WHERE name = %(name)s")
+ try:
+ cursor.execute(query, {'name': name})
+ info = cursor.fetchall()
+ cursor.execute("SHOW %s" % name)
+ val = cursor.fetchone()
+
+ except Exception as e:
+ module.fail_json(msg="Unable to get %s value due to : %s" % (name, to_native(e)))
+
+ raw_val = info[0][1]
+ unit = info[0][2]
+ context = info[0][3]
+ boot_val = info[0][4]
+
+ if val[0] == 'True':
+ val[0] = 'on'
+ elif val[0] == 'False':
+ val[0] = 'off'
+
+ if unit == 'kB':
+ if int(raw_val) > 0:
+ raw_val = int(raw_val) * 1024
+ if int(boot_val) > 0:
+ boot_val = int(boot_val) * 1024
+
+ unit = 'b'
+
+ elif unit == 'MB':
+ if int(raw_val) > 0:
+ raw_val = int(raw_val) * 1024 * 1024
+ if int(boot_val) > 0:
+ boot_val = int(boot_val) * 1024 * 1024
+
+ unit = 'b'
+
+ return (val[0], raw_val, unit, boot_val, context)
+
+
+def pretty_to_bytes(pretty_val):
+ # The function returns a value in bytes
+ # if the value contains 'B', 'kB', 'MB', 'GB', 'TB'.
+ # Otherwise it returns the passed argument.
+
+ val_in_bytes = None
+
+ if 'kB' in pretty_val:
+ num_part = int(''.join(d for d in pretty_val if d.isdigit()))
+ val_in_bytes = num_part * 1024
+
+ elif 'MB' in pretty_val.upper():
+ num_part = int(''.join(d for d in pretty_val if d.isdigit()))
+ val_in_bytes = num_part * 1024 * 1024
+
+ elif 'GB' in pretty_val.upper():
+ num_part = int(''.join(d for d in pretty_val if d.isdigit()))
+ val_in_bytes = num_part * 1024 * 1024 * 1024
+
+ elif 'TB' in pretty_val.upper():
+ num_part = int(''.join(d for d in pretty_val if d.isdigit()))
+ val_in_bytes = num_part * 1024 * 1024 * 1024 * 1024
+
+ elif 'B' in pretty_val.upper():
+ num_part = int(''.join(d for d in pretty_val if d.isdigit()))
+ val_in_bytes = num_part
+
+ else:
+ return pretty_val
+
+ return val_in_bytes
+
+
+def param_set(cursor, module, name, value, context):
+ try:
+ if str(value).lower() == 'default':
+ query = "ALTER SYSTEM SET %s = DEFAULT" % name
+ else:
+ query = "ALTER SYSTEM SET %s = '%s'" % (name, value)
+ cursor.execute(query)
+
+ if context != 'postmaster':
+ cursor.execute("SELECT pg_reload_conf()")
+
+ except Exception as e:
+ module.fail_json(msg="Unable to get %s value due to : %s" % (name, to_native(e)))
+
+ return True
+
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ name=dict(type='str', required=True),
+ db=dict(type='str', aliases=['login_db']),
+ value=dict(type='str'),
+ reset=dict(type='bool'),
+ session_role=dict(type='str'),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ name = module.params["name"]
+ value = module.params["value"]
+ reset = module.params["reset"]
+
+ # Allow to pass values like 1mb instead of 1MB, etc:
+ if value:
+ for unit in POSSIBLE_SIZE_UNITS:
+ if value[:-2].isdigit() and unit in value[-2:]:
+ value = value.upper()
+
+ if value and reset:
+ module.fail_json(msg="%s: value and reset params are mutually exclusive" % name)
+
+ if not value and not reset:
+ module.fail_json(msg="%s: at least one of value or reset param must be specified" % name)
+
+ conn_params = get_conn_params(module, module.params, warn_db_default=False)
+ db_connection = connect_to_db(module, conn_params, autocommit=True)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ kw = {}
+ # Check server version (needs 9.4 or later):
+ ver = db_connection.server_version
+ if ver < PG_REQ_VER:
+ module.warn("PostgreSQL is %s version but %s or later is required" % (ver, PG_REQ_VER))
+ kw = dict(
+ changed=False,
+ restart_required=False,
+ value_pretty="",
+ prev_val_pretty="",
+ value={"value": "", "unit": ""},
+ )
+ kw['name'] = name
+ db_connection.close()
+ module.exit_json(**kw)
+
+ # Set default returned values:
+ restart_required = False
+ changed = False
+ kw['name'] = name
+ kw['restart_required'] = False
+
+ # Get info about param state:
+ res = param_get(cursor, module, name)
+ current_value = res[0]
+ raw_val = res[1]
+ unit = res[2]
+ boot_val = res[3]
+ context = res[4]
+
+ if value == 'True':
+ value = 'on'
+ elif value == 'False':
+ value = 'off'
+
+ kw['prev_val_pretty'] = current_value
+ kw['value_pretty'] = deepcopy(kw['prev_val_pretty'])
+ kw['context'] = context
+
+ # Do job
+ if context == "internal":
+ module.fail_json(msg="%s: cannot be changed (internal context). See "
+ "https://www.postgresql.org/docs/current/runtime-config-preset.html" % name)
+
+ if context == "postmaster":
+ restart_required = True
+
+ # If check_mode, just compare and exit:
+ if module.check_mode:
+ if pretty_to_bytes(value) == pretty_to_bytes(current_value):
+ kw['changed'] = False
+
+ else:
+ kw['value_pretty'] = value
+ kw['changed'] = True
+
+ # Anyway returns current raw value in the check_mode:
+ kw['value'] = dict(
+ value=raw_val,
+ unit=unit,
+ )
+ kw['restart_required'] = restart_required
+ module.exit_json(**kw)
+
+ # Set param:
+ if value and value != current_value:
+ changed = param_set(cursor, module, name, value, context)
+
+ kw['value_pretty'] = value
+
+ # Reset param:
+ elif reset:
+ if raw_val == boot_val:
+ # nothing to change, exit:
+ kw['value'] = dict(
+ value=raw_val,
+ unit=unit,
+ )
+ module.exit_json(**kw)
+
+ changed = param_set(cursor, module, name, boot_val, context)
+
+ if restart_required:
+ module.warn("Restart of PostgreSQL is required for setting %s" % name)
+
+ cursor.close()
+ db_connection.close()
+
+ # Reconnect and recheck current value:
+ if context in ('sighup', 'superuser-backend', 'backend', 'superuser', 'user'):
+ db_connection = connect_to_db(module, conn_params, autocommit=True)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ res = param_get(cursor, module, name)
+ # f_ means 'final'
+ f_value = res[0]
+ f_raw_val = res[1]
+
+ if raw_val == f_raw_val:
+ changed = False
+
+ else:
+ changed = True
+
+ kw['value_pretty'] = f_value
+ kw['value'] = dict(
+ value=f_raw_val,
+ unit=unit,
+ )
+
+ cursor.close()
+ db_connection.close()
+
+ kw['changed'] = changed
+ kw['restart_required'] = restart_required
+ module.exit_json(**kw)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/postgresql_table.py b/test/support/integration/plugins/modules/postgresql_table.py
new file mode 100644
index 0000000000..3bef03b08f
--- /dev/null
+++ b/test/support/integration/plugins/modules/postgresql_table.py
@@ -0,0 +1,601 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ 'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'
+}
+
+DOCUMENTATION = r'''
+---
+module: postgresql_table
+short_description: Create, drop, or modify a PostgreSQL table
+description:
+- Allows to create, drop, rename, truncate a table, or change some table attributes.
+version_added: '2.8'
+options:
+ table:
+ description:
+ - Table name.
+ required: true
+ aliases:
+ - name
+ type: str
+ state:
+ description:
+ - The table state. I(state=absent) is mutually exclusive with I(tablespace), I(owner), I(unlogged),
+ I(like), I(including), I(columns), I(truncate), I(storage_params) and, I(rename).
+ type: str
+ default: present
+ choices: [ absent, present ]
+ tablespace:
+ description:
+ - Set a tablespace for the table.
+ required: false
+ type: str
+ owner:
+ description:
+ - Set a table owner.
+ type: str
+ unlogged:
+ description:
+ - Create an unlogged table.
+ type: bool
+ default: no
+ like:
+ description:
+ - Create a table like another table (with similar DDL).
+ Mutually exclusive with I(columns), I(rename), and I(truncate).
+ type: str
+ including:
+ description:
+ - Keywords that are used with like parameter, may be DEFAULTS, CONSTRAINTS, INDEXES, STORAGE, COMMENTS or ALL.
+ Needs I(like) specified. Mutually exclusive with I(columns), I(rename), and I(truncate).
+ type: str
+ columns:
+ description:
+ - Columns that are needed.
+ type: list
+ elements: str
+ rename:
+ description:
+ - New table name. Mutually exclusive with I(tablespace), I(owner),
+ I(unlogged), I(like), I(including), I(columns), I(truncate), and I(storage_params).
+ type: str
+ truncate:
+ description:
+ - Truncate a table. Mutually exclusive with I(tablespace), I(owner), I(unlogged),
+ I(like), I(including), I(columns), I(rename), and I(storage_params).
+ type: bool
+ default: no
+ storage_params:
+ description:
+ - Storage parameters like fillfactor, autovacuum_vacuum_treshold, etc.
+ Mutually exclusive with I(rename) and I(truncate).
+ type: list
+ elements: str
+ db:
+ description:
+ - Name of database to connect and where the table will be created.
+ type: str
+ aliases:
+ - login_db
+ session_role:
+ description:
+ - Switch to session_role after connecting.
+ The specified session_role must be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though
+ the session_role were the one that had logged in originally.
+ type: str
+ cascade:
+ description:
+ - Automatically drop objects that depend on the table (such as views).
+ Used with I(state=absent) only.
+ type: bool
+ default: no
+ version_added: '2.9'
+notes:
+- If you do not pass db parameter, tables will be created in the database
+ named postgres.
+- PostgreSQL allows to create columnless table, so columns param is optional.
+- Unlogged tables are available from PostgreSQL server version 9.1.
+seealso:
+- module: postgresql_sequence
+- module: postgresql_idx
+- module: postgresql_info
+- module: postgresql_tablespace
+- module: postgresql_owner
+- module: postgresql_privs
+- module: postgresql_copy
+- name: CREATE TABLE reference
+ description: Complete reference of the CREATE TABLE command documentation.
+ link: https://www.postgresql.org/docs/current/sql-createtable.html
+- name: ALTER TABLE reference
+ description: Complete reference of the ALTER TABLE command documentation.
+ link: https://www.postgresql.org/docs/current/sql-altertable.html
+- name: DROP TABLE reference
+ description: Complete reference of the DROP TABLE command documentation.
+ link: https://www.postgresql.org/docs/current/sql-droptable.html
+- name: PostgreSQL data types
+ description: Complete reference of the PostgreSQL data types documentation.
+ link: https://www.postgresql.org/docs/current/datatype.html
+author:
+- Andrei Klychkov (@Andersson007)
+extends_documentation_fragment: postgres
+'''
+
+EXAMPLES = r'''
+- name: Create tbl2 in the acme database with the DDL like tbl1 with testuser as an owner
+ postgresql_table:
+ db: acme
+ name: tbl2
+ like: tbl1
+ owner: testuser
+
+- name: Create tbl2 in the acme database and tablespace ssd with the DDL like tbl1 including comments and indexes
+ postgresql_table:
+ db: acme
+ table: tbl2
+ like: tbl1
+ including: comments, indexes
+ tablespace: ssd
+
+- name: Create test_table with several columns in ssd tablespace with fillfactor=10 and autovacuum_analyze_threshold=1
+ postgresql_table:
+ name: test_table
+ columns:
+ - id bigserial primary key
+ - num bigint
+ - stories text
+ tablespace: ssd
+ storage_params:
+ - fillfactor=10
+ - autovacuum_analyze_threshold=1
+
+- name: Create an unlogged table in schema acme
+ postgresql_table:
+ name: acme.useless_data
+ columns: waste_id int
+ unlogged: true
+
+- name: Rename table foo to bar
+ postgresql_table:
+ table: foo
+ rename: bar
+
+- name: Rename table foo from schema acme to bar
+ postgresql_table:
+ name: acme.foo
+ rename: bar
+
+- name: Set owner to someuser
+ postgresql_table:
+ name: foo
+ owner: someuser
+
+- name: Change tablespace of foo table to new_tablespace and set owner to new_user
+ postgresql_table:
+ name: foo
+ tablespace: new_tablespace
+ owner: new_user
+
+- name: Truncate table foo
+ postgresql_table:
+ name: foo
+ truncate: yes
+
+- name: Drop table foo from schema acme
+ postgresql_table:
+ name: acme.foo
+ state: absent
+
+- name: Drop table bar cascade
+ postgresql_table:
+ name: bar
+ state: absent
+ cascade: yes
+'''
+
+RETURN = r'''
+table:
+ description: Name of a table.
+ returned: always
+ type: str
+ sample: 'foo'
+state:
+ description: Table state.
+ returned: always
+ type: str
+ sample: 'present'
+owner:
+ description: Table owner.
+ returned: always
+ type: str
+ sample: 'postgres'
+tablespace:
+ description: Tablespace.
+ returned: always
+ type: str
+ sample: 'ssd_tablespace'
+queries:
+ description: List of executed queries.
+ returned: always
+ type: str
+ sample: [ 'CREATE TABLE "test_table" (id bigint)' ]
+storage_params:
+ description: Storage parameters.
+ returned: always
+ type: list
+ sample: [ "fillfactor=100", "autovacuum_analyze_threshold=1" ]
+'''
+
+try:
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.database import pg_quote_identifier
+from ansible.module_utils.postgres import (
+ connect_to_db,
+ exec_sql,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+
+
+# ===========================================
+# PostgreSQL module specific support methods.
+#
+
+class Table(object):
+ def __init__(self, name, module, cursor):
+ self.name = name
+ self.module = module
+ self.cursor = cursor
+ self.info = {
+ 'owner': '',
+ 'tblspace': '',
+ 'storage_params': [],
+ }
+ self.exists = False
+ self.__exists_in_db()
+ self.executed_queries = []
+
+ def get_info(self):
+ """Getter to refresh and get table info"""
+ self.__exists_in_db()
+
+ def __exists_in_db(self):
+ """Check table exists and refresh info"""
+ if "." in self.name:
+ schema = self.name.split('.')[-2]
+ tblname = self.name.split('.')[-1]
+ else:
+ schema = 'public'
+ tblname = self.name
+
+ query = ("SELECT t.tableowner, t.tablespace, c.reloptions "
+ "FROM pg_tables AS t "
+ "INNER JOIN pg_class AS c ON c.relname = t.tablename "
+ "INNER JOIN pg_namespace AS n ON c.relnamespace = n.oid "
+ "WHERE t.tablename = %(tblname)s "
+ "AND n.nspname = %(schema)s")
+ res = exec_sql(self, query, query_params={'tblname': tblname, 'schema': schema},
+ add_to_executed=False)
+ if res:
+ self.exists = True
+ self.info = dict(
+ owner=res[0][0],
+ tblspace=res[0][1] if res[0][1] else '',
+ storage_params=res[0][2] if res[0][2] else [],
+ )
+
+ return True
+ else:
+ self.exists = False
+ return False
+
+ def create(self, columns='', params='', tblspace='',
+ unlogged=False, owner=''):
+ """
+ Create table.
+ If table exists, check passed args (params, tblspace, owner) and,
+ if they're different from current, change them.
+ Arguments:
+ params - storage params (passed by "WITH (...)" in SQL),
+ comma separated.
+ tblspace - tablespace.
+ owner - table owner.
+ unlogged - create unlogged table.
+ columns - column string (comma separated).
+ """
+ name = pg_quote_identifier(self.name, 'table')
+
+ changed = False
+
+ if self.exists:
+ if tblspace == 'pg_default' and self.info['tblspace'] is None:
+ pass # Because they have the same meaning
+ elif tblspace and self.info['tblspace'] != tblspace:
+ self.set_tblspace(tblspace)
+ changed = True
+
+ if owner and self.info['owner'] != owner:
+ self.set_owner(owner)
+ changed = True
+
+ if params:
+ param_list = [p.strip(' ') for p in params.split(',')]
+
+ new_param = False
+ for p in param_list:
+ if p not in self.info['storage_params']:
+ new_param = True
+
+ if new_param:
+ self.set_stor_params(params)
+ changed = True
+
+ if changed:
+ return True
+ return False
+
+ query = "CREATE"
+ if unlogged:
+ query += " UNLOGGED TABLE %s" % name
+ else:
+ query += " TABLE %s" % name
+
+ if columns:
+ query += " (%s)" % columns
+ else:
+ query += " ()"
+
+ if params:
+ query += " WITH (%s)" % params
+
+ if tblspace:
+ query += " TABLESPACE %s" % pg_quote_identifier(tblspace, 'database')
+
+ if exec_sql(self, query, ddl=True):
+ changed = True
+
+ if owner:
+ changed = self.set_owner(owner)
+
+ return changed
+
+ def create_like(self, src_table, including='', tblspace='',
+ unlogged=False, params='', owner=''):
+ """
+ Create table like another table (with similar DDL).
+ Arguments:
+ src_table - source table.
+ including - corresponds to optional INCLUDING expression
+ in CREATE TABLE ... LIKE statement.
+ params - storage params (passed by "WITH (...)" in SQL),
+ comma separated.
+ tblspace - tablespace.
+ owner - table owner.
+ unlogged - create unlogged table.
+ """
+ changed = False
+
+ name = pg_quote_identifier(self.name, 'table')
+
+ query = "CREATE"
+ if unlogged:
+ query += " UNLOGGED TABLE %s" % name
+ else:
+ query += " TABLE %s" % name
+
+ query += " (LIKE %s" % pg_quote_identifier(src_table, 'table')
+
+ if including:
+ including = including.split(',')
+ for i in including:
+ query += " INCLUDING %s" % i
+
+ query += ')'
+
+ if params:
+ query += " WITH (%s)" % params
+
+ if tblspace:
+ query += " TABLESPACE %s" % pg_quote_identifier(tblspace, 'database')
+
+ if exec_sql(self, query, ddl=True):
+ changed = True
+
+ if owner:
+ changed = self.set_owner(owner)
+
+ return changed
+
+ def truncate(self):
+ query = "TRUNCATE TABLE %s" % pg_quote_identifier(self.name, 'table')
+ return exec_sql(self, query, ddl=True)
+
+ def rename(self, newname):
+ query = "ALTER TABLE %s RENAME TO %s" % (pg_quote_identifier(self.name, 'table'),
+ pg_quote_identifier(newname, 'table'))
+ return exec_sql(self, query, ddl=True)
+
+ def set_owner(self, username):
+ query = "ALTER TABLE %s OWNER TO %s" % (pg_quote_identifier(self.name, 'table'),
+ pg_quote_identifier(username, 'role'))
+ return exec_sql(self, query, ddl=True)
+
+ def drop(self, cascade=False):
+ if not self.exists:
+ return False
+
+ query = "DROP TABLE %s" % pg_quote_identifier(self.name, 'table')
+ if cascade:
+ query += " CASCADE"
+ return exec_sql(self, query, ddl=True)
+
+ def set_tblspace(self, tblspace):
+ query = "ALTER TABLE %s SET TABLESPACE %s" % (pg_quote_identifier(self.name, 'table'),
+ pg_quote_identifier(tblspace, 'database'))
+ return exec_sql(self, query, ddl=True)
+
+ def set_stor_params(self, params):
+ query = "ALTER TABLE %s SET (%s)" % (pg_quote_identifier(self.name, 'table'), params)
+ return exec_sql(self, query, ddl=True)
+
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ table=dict(type='str', required=True, aliases=['name']),
+ state=dict(type='str', default="present", choices=["absent", "present"]),
+ db=dict(type='str', default='', aliases=['login_db']),
+ tablespace=dict(type='str'),
+ owner=dict(type='str'),
+ unlogged=dict(type='bool', default=False),
+ like=dict(type='str'),
+ including=dict(type='str'),
+ rename=dict(type='str'),
+ truncate=dict(type='bool', default=False),
+ columns=dict(type='list', elements='str'),
+ storage_params=dict(type='list', elements='str'),
+ session_role=dict(type='str'),
+ cascade=dict(type='bool', default=False),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ table = module.params["table"]
+ state = module.params["state"]
+ tablespace = module.params["tablespace"]
+ owner = module.params["owner"]
+ unlogged = module.params["unlogged"]
+ like = module.params["like"]
+ including = module.params["including"]
+ newname = module.params["rename"]
+ storage_params = module.params["storage_params"]
+ truncate = module.params["truncate"]
+ columns = module.params["columns"]
+ cascade = module.params["cascade"]
+
+ if state == 'present' and cascade:
+ module.warn("cascade=true is ignored when state=present")
+
+ # Check mutual exclusive parameters:
+ if state == 'absent' and (truncate or newname or columns or tablespace or like or storage_params or unlogged or owner or including):
+ module.fail_json(msg="%s: state=absent is mutually exclusive with: "
+ "truncate, rename, columns, tablespace, "
+ "including, like, storage_params, unlogged, owner" % table)
+
+ if truncate and (newname or columns or like or unlogged or storage_params or owner or tablespace or including):
+ module.fail_json(msg="%s: truncate is mutually exclusive with: "
+ "rename, columns, like, unlogged, including, "
+ "storage_params, owner, tablespace" % table)
+
+ if newname and (columns or like or unlogged or storage_params or owner or tablespace or including):
+ module.fail_json(msg="%s: rename is mutually exclusive with: "
+ "columns, like, unlogged, including, "
+ "storage_params, owner, tablespace" % table)
+
+ if like and columns:
+ module.fail_json(msg="%s: like and columns params are mutually exclusive" % table)
+ if including and not like:
+ module.fail_json(msg="%s: including param needs like param specified" % table)
+
+ conn_params = get_conn_params(module, module.params)
+ db_connection = connect_to_db(module, conn_params, autocommit=False)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ if storage_params:
+ storage_params = ','.join(storage_params)
+
+ if columns:
+ columns = ','.join(columns)
+
+ ##############
+ # Do main job:
+ table_obj = Table(table, module, cursor)
+
+ # Set default returned values:
+ changed = False
+ kw = {}
+ kw['table'] = table
+ kw['state'] = ''
+ if table_obj.exists:
+ kw = dict(
+ table=table,
+ state='present',
+ owner=table_obj.info['owner'],
+ tablespace=table_obj.info['tblspace'],
+ storage_params=table_obj.info['storage_params'],
+ )
+
+ if state == 'absent':
+ changed = table_obj.drop(cascade=cascade)
+
+ elif truncate:
+ changed = table_obj.truncate()
+
+ elif newname:
+ changed = table_obj.rename(newname)
+ q = table_obj.executed_queries
+ table_obj = Table(newname, module, cursor)
+ table_obj.executed_queries = q
+
+ elif state == 'present' and not like:
+ changed = table_obj.create(columns, storage_params,
+ tablespace, unlogged, owner)
+
+ elif state == 'present' and like:
+ changed = table_obj.create_like(like, including, tablespace,
+ unlogged, storage_params)
+
+ if changed:
+ if module.check_mode:
+ db_connection.rollback()
+ else:
+ db_connection.commit()
+
+ # Refresh table info for RETURN.
+ # Note, if table has been renamed, it gets info by newname:
+ table_obj.get_info()
+ db_connection.commit()
+ if table_obj.exists:
+ kw = dict(
+ table=table,
+ state='present',
+ owner=table_obj.info['owner'],
+ tablespace=table_obj.info['tblspace'],
+ storage_params=table_obj.info['storage_params'],
+ )
+ else:
+ # We just change the table state here
+ # to keep other information about the dropped table:
+ kw['state'] = 'absent'
+
+ kw['queries'] = table_obj.executed_queries
+ kw['changed'] = changed
+ db_connection.close()
+ module.exit_json(**kw)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/postgresql_user.py b/test/support/integration/plugins/modules/postgresql_user.py
new file mode 100644
index 0000000000..10afd0a0d8
--- /dev/null
+++ b/test/support/integration/plugins/modules/postgresql_user.py
@@ -0,0 +1,927 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ 'metadata_version': '1.1',
+ 'status': ['stableinterface'],
+ 'supported_by': 'community'
+}
+
+DOCUMENTATION = r'''
+---
+module: postgresql_user
+short_description: Add or remove a user (role) from a PostgreSQL server instance
+description:
+- Adds or removes a user (role) from a PostgreSQL server instance
+ ("cluster" in PostgreSQL terminology) and, optionally,
+ grants the user access to an existing database or tables.
+- A user is a role with login privilege.
+- The fundamental function of the module is to create, or delete, users from
+ a PostgreSQL instances. Privilege assignment, or removal, is an optional
+ step, which works on one database at a time. This allows for the module to
+ be called several times in the same module to modify the permissions on
+ different databases, or to grant permissions to already existing users.
+- A user cannot be removed until all the privileges have been stripped from
+ the user. In such situation, if the module tries to remove the user it
+ will fail. To avoid this from happening the fail_on_user option signals
+ the module to try to remove the user, but if not possible keep going; the
+ module will report if changes happened and separately if the user was
+ removed or not.
+version_added: '0.6'
+options:
+ name:
+ description:
+ - Name of the user (role) to add or remove.
+ type: str
+ required: true
+ aliases:
+ - user
+ password:
+ description:
+ - Set the user's password, before 1.4 this was required.
+ - Password can be passed unhashed or hashed (MD5-hashed).
+ - Unhashed password will automatically be hashed when saved into the
+ database if C(encrypted) parameter is set, otherwise it will be save in
+ plain text format.
+ - When passing a hashed password it must be generated with the format
+ C('str["md5"] + md5[ password + username ]'), resulting in a total of
+ 35 characters. An easy way to do this is C(echo "md5$(echo -n
+ 'verysecretpasswordJOE' | md5sum | awk '{print $1}')").
+ - Note that if the provided password string is already in MD5-hashed
+ format, then it is used as-is, regardless of C(encrypted) parameter.
+ type: str
+ db:
+ description:
+ - Name of database to connect to and where user's permissions will be granted.
+ type: str
+ aliases:
+ - login_db
+ fail_on_user:
+ description:
+ - If C(yes), fail when user (role) can't be removed. Otherwise just log and continue.
+ default: 'yes'
+ type: bool
+ aliases:
+ - fail_on_role
+ priv:
+ description:
+ - "Slash-separated PostgreSQL privileges string: C(priv1/priv2), where
+ privileges can be defined for database ( allowed options - 'CREATE',
+ 'CONNECT', 'TEMPORARY', 'TEMP', 'ALL'. For example C(CONNECT) ) or
+ for table ( allowed options - 'SELECT', 'INSERT', 'UPDATE', 'DELETE',
+ 'TRUNCATE', 'REFERENCES', 'TRIGGER', 'ALL'. For example
+ C(table:SELECT) ). Mixed example of this string:
+ C(CONNECT/CREATE/table1:SELECT/table2:INSERT)."
+ type: str
+ role_attr_flags:
+ description:
+ - "PostgreSQL user attributes string in the format: CREATEDB,CREATEROLE,SUPERUSER."
+ - Note that '[NO]CREATEUSER' is deprecated.
+ - To create a simple role for using it like a group, use C(NOLOGIN) flag.
+ type: str
+ choices: [ '[NO]SUPERUSER', '[NO]CREATEROLE', '[NO]CREATEDB',
+ '[NO]INHERIT', '[NO]LOGIN', '[NO]REPLICATION', '[NO]BYPASSRLS' ]
+ session_role:
+ version_added: '2.8'
+ description:
+ - Switch to session_role after connecting.
+ - The specified session_role must be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though the session_role were the one that had logged in originally.
+ type: str
+ state:
+ description:
+ - The user (role) state.
+ type: str
+ default: present
+ choices: [ absent, present ]
+ encrypted:
+ description:
+ - Whether the password is stored hashed in the database.
+ - Passwords can be passed already hashed or unhashed, and postgresql
+ ensures the stored password is hashed when C(encrypted) is set.
+ - "Note: Postgresql 10 and newer doesn't support unhashed passwords."
+ - Previous to Ansible 2.6, this was C(no) by default.
+ default: 'yes'
+ type: bool
+ version_added: '1.4'
+ expires:
+ description:
+ - The date at which the user's password is to expire.
+ - If set to C('infinity'), user's password never expire.
+ - Note that this value should be a valid SQL date and time type.
+ type: str
+ version_added: '1.4'
+ no_password_changes:
+ description:
+ - If C(yes), don't inspect database for password changes. Effective when
+ C(pg_authid) is not accessible (such as AWS RDS). Otherwise, make
+ password changes as necessary.
+ default: 'no'
+ type: bool
+ version_added: '2.0'
+ conn_limit:
+ description:
+ - Specifies the user (role) connection limit.
+ type: int
+ version_added: '2.4'
+ ssl_mode:
+ description:
+ - Determines whether or with what priority a secure SSL TCP/IP connection will be negotiated with the server.
+ - See https://www.postgresql.org/docs/current/static/libpq-ssl.html for more information on the modes.
+ - Default of C(prefer) matches libpq default.
+ type: str
+ default: prefer
+ choices: [ allow, disable, prefer, require, verify-ca, verify-full ]
+ version_added: '2.3'
+ ca_cert:
+ description:
+ - Specifies the name of a file containing SSL certificate authority (CA) certificate(s).
+ - If the file exists, the server's certificate will be verified to be signed by one of these authorities.
+ type: str
+ aliases: [ ssl_rootcert ]
+ version_added: '2.3'
+ groups:
+ description:
+ - The list of groups (roles) that need to be granted to the user.
+ type: list
+ elements: str
+ version_added: '2.9'
+ comment:
+ description:
+ - Add a comment on the user (equal to the COMMENT ON ROLE statement result).
+ type: str
+ version_added: '2.10'
+notes:
+- The module creates a user (role) with login privilege by default.
+ Use NOLOGIN role_attr_flags to change this behaviour.
+- If you specify PUBLIC as the user (role), then the privilege changes will apply to all users (roles).
+ You may not specify password or role_attr_flags when the PUBLIC user is specified.
+seealso:
+- module: postgresql_privs
+- module: postgresql_membership
+- module: postgresql_owner
+- name: PostgreSQL database roles
+ description: Complete reference of the PostgreSQL database roles documentation.
+ link: https://www.postgresql.org/docs/current/user-manag.html
+author:
+- Ansible Core Team
+extends_documentation_fragment: postgres
+'''
+
+EXAMPLES = r'''
+- name: Connect to acme database, create django user, and grant access to database and products table
+ postgresql_user:
+ db: acme
+ name: django
+ password: ceec4eif7ya
+ priv: "CONNECT/products:ALL"
+ expires: "Jan 31 2020"
+
+- name: Add a comment on django user
+ postgresql_user:
+ db: acme
+ name: django
+ comment: This is a test user
+
+# Connect to default database, create rails user, set its password (MD5-hashed),
+# and grant privilege to create other databases and demote rails from super user status if user exists
+- name: Create rails user, set MD5-hashed password, grant privs
+ postgresql_user:
+ name: rails
+ password: md59543f1d82624df2b31672ec0f7050460
+ role_attr_flags: CREATEDB,NOSUPERUSER
+
+- name: Connect to acme database and remove test user privileges from there
+ postgresql_user:
+ db: acme
+ name: test
+ priv: "ALL/products:ALL"
+ state: absent
+ fail_on_user: no
+
+- name: Connect to test database, remove test user from cluster
+ postgresql_user:
+ db: test
+ name: test
+ priv: ALL
+ state: absent
+
+- name: Connect to acme database and set user's password with no expire date
+ postgresql_user:
+ db: acme
+ name: django
+ password: mysupersecretword
+ priv: "CONNECT/products:ALL"
+ expires: infinity
+
+# Example privileges string format
+# INSERT,UPDATE/table:SELECT/anothertable:ALL
+
+- name: Connect to test database and remove an existing user's password
+ postgresql_user:
+ db: test
+ user: test
+ password: ""
+
+- name: Create user test and grant group user_ro and user_rw to it
+ postgresql_user:
+ name: test
+ groups:
+ - user_ro
+ - user_rw
+'''
+
+RETURN = r'''
+queries:
+ description: List of executed queries.
+ returned: always
+ type: list
+ sample: ['CREATE USER "alice"', 'GRANT CONNECT ON DATABASE "acme" TO "alice"']
+ version_added: '2.8'
+'''
+
+import itertools
+import re
+import traceback
+from hashlib import md5
+
+try:
+ import psycopg2
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.database import pg_quote_identifier, SQLParseError
+from ansible.module_utils.postgres import (
+ connect_to_db,
+ get_conn_params,
+ PgMembership,
+ postgres_common_argument_spec,
+)
+from ansible.module_utils._text import to_bytes, to_native
+from ansible.module_utils.six import iteritems
+
+
+FLAGS = ('SUPERUSER', 'CREATEROLE', 'CREATEDB', 'INHERIT', 'LOGIN', 'REPLICATION')
+FLAGS_BY_VERSION = {'BYPASSRLS': 90500}
+
+VALID_PRIVS = dict(table=frozenset(('SELECT', 'INSERT', 'UPDATE', 'DELETE', 'TRUNCATE', 'REFERENCES', 'TRIGGER', 'ALL')),
+ database=frozenset(
+ ('CREATE', 'CONNECT', 'TEMPORARY', 'TEMP', 'ALL')),
+ )
+
+# map to cope with idiosyncracies of SUPERUSER and LOGIN
+PRIV_TO_AUTHID_COLUMN = dict(SUPERUSER='rolsuper', CREATEROLE='rolcreaterole',
+ CREATEDB='rolcreatedb', INHERIT='rolinherit', LOGIN='rolcanlogin',
+ REPLICATION='rolreplication', BYPASSRLS='rolbypassrls')
+
+executed_queries = []
+
+
+class InvalidFlagsError(Exception):
+ pass
+
+
+class InvalidPrivsError(Exception):
+ pass
+
+# ===========================================
+# PostgreSQL module specific support methods.
+#
+
+
+def user_exists(cursor, user):
+ # The PUBLIC user is a special case that is always there
+ if user == 'PUBLIC':
+ return True
+ query = "SELECT rolname FROM pg_roles WHERE rolname=%(user)s"
+ cursor.execute(query, {'user': user})
+ return cursor.rowcount > 0
+
+
+def user_add(cursor, user, password, role_attr_flags, encrypted, expires, conn_limit):
+ """Create a new database user (role)."""
+ # Note: role_attr_flags escaped by parse_role_attrs and encrypted is a
+ # literal
+ query_password_data = dict(password=password, expires=expires)
+ query = ['CREATE USER "%(user)s"' %
+ {"user": user}]
+ if password is not None and password != '':
+ query.append("WITH %(crypt)s" % {"crypt": encrypted})
+ query.append("PASSWORD %(password)s")
+ if expires is not None:
+ query.append("VALID UNTIL %(expires)s")
+ if conn_limit is not None:
+ query.append("CONNECTION LIMIT %(conn_limit)s" % {"conn_limit": conn_limit})
+ query.append(role_attr_flags)
+ query = ' '.join(query)
+ executed_queries.append(query)
+ cursor.execute(query, query_password_data)
+ return True
+
+
+def user_should_we_change_password(current_role_attrs, user, password, encrypted):
+ """Check if we should change the user's password.
+
+ Compare the proposed password with the existing one, comparing
+ hashes if encrypted. If we can't access it assume yes.
+ """
+
+ if current_role_attrs is None:
+ # on some databases, E.g. AWS RDS instances, there is no access to
+ # the pg_authid relation to check the pre-existing password, so we
+ # just assume password is different
+ return True
+
+ # Do we actually need to do anything?
+ pwchanging = False
+ if password is not None:
+ # Empty password means that the role shouldn't have a password, which
+ # means we need to check if the current password is None.
+ if password == '':
+ if current_role_attrs['rolpassword'] is not None:
+ pwchanging = True
+ # 32: MD5 hashes are represented as a sequence of 32 hexadecimal digits
+ # 3: The size of the 'md5' prefix
+ # When the provided password looks like a MD5-hash, value of
+ # 'encrypted' is ignored.
+ elif (password.startswith('md5') and len(password) == 32 + 3) or encrypted == 'UNENCRYPTED':
+ if password != current_role_attrs['rolpassword']:
+ pwchanging = True
+ elif encrypted == 'ENCRYPTED':
+ hashed_password = 'md5{0}'.format(md5(to_bytes(password) + to_bytes(user)).hexdigest())
+ if hashed_password != current_role_attrs['rolpassword']:
+ pwchanging = True
+
+ return pwchanging
+
+
+def user_alter(db_connection, module, user, password, role_attr_flags, encrypted, expires, no_password_changes, conn_limit):
+ """Change user password and/or attributes. Return True if changed, False otherwise."""
+ changed = False
+
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+ # Note: role_attr_flags escaped by parse_role_attrs and encrypted is a
+ # literal
+ if user == 'PUBLIC':
+ if password is not None:
+ module.fail_json(msg="cannot change the password for PUBLIC user")
+ elif role_attr_flags != '':
+ module.fail_json(msg="cannot change the role_attr_flags for PUBLIC user")
+ else:
+ return False
+
+ # Handle passwords.
+ if not no_password_changes and (password is not None or role_attr_flags != '' or expires is not None or conn_limit is not None):
+ # Select password and all flag-like columns in order to verify changes.
+ try:
+ select = "SELECT * FROM pg_authid where rolname=%(user)s"
+ cursor.execute(select, {"user": user})
+ # Grab current role attributes.
+ current_role_attrs = cursor.fetchone()
+ except psycopg2.ProgrammingError:
+ current_role_attrs = None
+ db_connection.rollback()
+
+ pwchanging = user_should_we_change_password(current_role_attrs, user, password, encrypted)
+
+ if current_role_attrs is None:
+ try:
+ # AWS RDS instances does not allow user to access pg_authid
+ # so try to get current_role_attrs from pg_roles tables
+ select = "SELECT * FROM pg_roles where rolname=%(user)s"
+ cursor.execute(select, {"user": user})
+ # Grab current role attributes from pg_roles
+ current_role_attrs = cursor.fetchone()
+ except psycopg2.ProgrammingError as e:
+ db_connection.rollback()
+ module.fail_json(msg="Failed to get role details for current user %s: %s" % (user, e))
+
+ role_attr_flags_changing = False
+ if role_attr_flags:
+ role_attr_flags_dict = {}
+ for r in role_attr_flags.split(' '):
+ if r.startswith('NO'):
+ role_attr_flags_dict[r.replace('NO', '', 1)] = False
+ else:
+ role_attr_flags_dict[r] = True
+
+ for role_attr_name, role_attr_value in role_attr_flags_dict.items():
+ if current_role_attrs[PRIV_TO_AUTHID_COLUMN[role_attr_name]] != role_attr_value:
+ role_attr_flags_changing = True
+
+ if expires is not None:
+ cursor.execute("SELECT %s::timestamptz;", (expires,))
+ expires_with_tz = cursor.fetchone()[0]
+ expires_changing = expires_with_tz != current_role_attrs.get('rolvaliduntil')
+ else:
+ expires_changing = False
+
+ conn_limit_changing = (conn_limit is not None and conn_limit != current_role_attrs['rolconnlimit'])
+
+ if not pwchanging and not role_attr_flags_changing and not expires_changing and not conn_limit_changing:
+ return False
+
+ alter = ['ALTER USER "%(user)s"' % {"user": user}]
+ if pwchanging:
+ if password != '':
+ alter.append("WITH %(crypt)s" % {"crypt": encrypted})
+ alter.append("PASSWORD %(password)s")
+ else:
+ alter.append("WITH PASSWORD NULL")
+ alter.append(role_attr_flags)
+ elif role_attr_flags:
+ alter.append('WITH %s' % role_attr_flags)
+ if expires is not None:
+ alter.append("VALID UNTIL %(expires)s")
+ if conn_limit is not None:
+ alter.append("CONNECTION LIMIT %(conn_limit)s" % {"conn_limit": conn_limit})
+
+ query_password_data = dict(password=password, expires=expires)
+ try:
+ cursor.execute(' '.join(alter), query_password_data)
+ changed = True
+ except psycopg2.InternalError as e:
+ if e.pgcode == '25006':
+ # Handle errors due to read-only transactions indicated by pgcode 25006
+ # ERROR: cannot execute ALTER ROLE in a read-only transaction
+ changed = False
+ module.fail_json(msg=e.pgerror, exception=traceback.format_exc())
+ return changed
+ else:
+ raise psycopg2.InternalError(e)
+ except psycopg2.NotSupportedError as e:
+ module.fail_json(msg=e.pgerror, exception=traceback.format_exc())
+
+ elif no_password_changes and role_attr_flags != '':
+ # Grab role information from pg_roles instead of pg_authid
+ select = "SELECT * FROM pg_roles where rolname=%(user)s"
+ cursor.execute(select, {"user": user})
+ # Grab current role attributes.
+ current_role_attrs = cursor.fetchone()
+
+ role_attr_flags_changing = False
+
+ if role_attr_flags:
+ role_attr_flags_dict = {}
+ for r in role_attr_flags.split(' '):
+ if r.startswith('NO'):
+ role_attr_flags_dict[r.replace('NO', '', 1)] = False
+ else:
+ role_attr_flags_dict[r] = True
+
+ for role_attr_name, role_attr_value in role_attr_flags_dict.items():
+ if current_role_attrs[PRIV_TO_AUTHID_COLUMN[role_attr_name]] != role_attr_value:
+ role_attr_flags_changing = True
+
+ if not role_attr_flags_changing:
+ return False
+
+ alter = ['ALTER USER "%(user)s"' %
+ {"user": user}]
+ if role_attr_flags:
+ alter.append('WITH %s' % role_attr_flags)
+
+ try:
+ cursor.execute(' '.join(alter))
+ except psycopg2.InternalError as e:
+ if e.pgcode == '25006':
+ # Handle errors due to read-only transactions indicated by pgcode 25006
+ # ERROR: cannot execute ALTER ROLE in a read-only transaction
+ changed = False
+ module.fail_json(msg=e.pgerror, exception=traceback.format_exc())
+ return changed
+ else:
+ raise psycopg2.InternalError(e)
+
+ # Grab new role attributes.
+ cursor.execute(select, {"user": user})
+ new_role_attrs = cursor.fetchone()
+
+ # Detect any differences between current_ and new_role_attrs.
+ changed = current_role_attrs != new_role_attrs
+
+ return changed
+
+
+def user_delete(cursor, user):
+ """Try to remove a user. Returns True if successful otherwise False"""
+ cursor.execute("SAVEPOINT ansible_pgsql_user_delete")
+ try:
+ query = 'DROP USER "%s"' % user
+ executed_queries.append(query)
+ cursor.execute(query)
+ except Exception:
+ cursor.execute("ROLLBACK TO SAVEPOINT ansible_pgsql_user_delete")
+ cursor.execute("RELEASE SAVEPOINT ansible_pgsql_user_delete")
+ return False
+
+ cursor.execute("RELEASE SAVEPOINT ansible_pgsql_user_delete")
+ return True
+
+
+def has_table_privileges(cursor, user, table, privs):
+ """
+ Return the difference between the privileges that a user already has and
+ the privileges that they desire to have.
+
+ :returns: tuple of:
+ * privileges that they have and were requested
+ * privileges they currently hold but were not requested
+ * privileges requested that they do not hold
+ """
+ cur_privs = get_table_privileges(cursor, user, table)
+ have_currently = cur_privs.intersection(privs)
+ other_current = cur_privs.difference(privs)
+ desired = privs.difference(cur_privs)
+ return (have_currently, other_current, desired)
+
+
+def get_table_privileges(cursor, user, table):
+ if '.' in table:
+ schema, table = table.split('.', 1)
+ else:
+ schema = 'public'
+ query = ("SELECT privilege_type FROM information_schema.role_table_grants "
+ "WHERE grantee=%(user)s AND table_name=%(table)s AND table_schema=%(schema)s")
+ cursor.execute(query, {'user': user, 'table': table, 'schema': schema})
+ return frozenset([x[0] for x in cursor.fetchall()])
+
+
+def grant_table_privileges(cursor, user, table, privs):
+ # Note: priv escaped by parse_privs
+ privs = ', '.join(privs)
+ query = 'GRANT %s ON TABLE %s TO "%s"' % (
+ privs, pg_quote_identifier(table, 'table'), user)
+ executed_queries.append(query)
+ cursor.execute(query)
+
+
+def revoke_table_privileges(cursor, user, table, privs):
+ # Note: priv escaped by parse_privs
+ privs = ', '.join(privs)
+ query = 'REVOKE %s ON TABLE %s FROM "%s"' % (
+ privs, pg_quote_identifier(table, 'table'), user)
+ executed_queries.append(query)
+ cursor.execute(query)
+
+
+def get_database_privileges(cursor, user, db):
+ priv_map = {
+ 'C': 'CREATE',
+ 'T': 'TEMPORARY',
+ 'c': 'CONNECT',
+ }
+ query = 'SELECT datacl FROM pg_database WHERE datname = %s'
+ cursor.execute(query, (db,))
+ datacl = cursor.fetchone()[0]
+ if datacl is None:
+ return set()
+ r = re.search(r'%s\\?"?=(C?T?c?)/[^,]+,?' % user, datacl)
+ if r is None:
+ return set()
+ o = set()
+ for v in r.group(1):
+ o.add(priv_map[v])
+ return normalize_privileges(o, 'database')
+
+
+def has_database_privileges(cursor, user, db, privs):
+ """
+ Return the difference between the privileges that a user already has and
+ the privileges that they desire to have.
+
+ :returns: tuple of:
+ * privileges that they have and were requested
+ * privileges they currently hold but were not requested
+ * privileges requested that they do not hold
+ """
+ cur_privs = get_database_privileges(cursor, user, db)
+ have_currently = cur_privs.intersection(privs)
+ other_current = cur_privs.difference(privs)
+ desired = privs.difference(cur_privs)
+ return (have_currently, other_current, desired)
+
+
+def grant_database_privileges(cursor, user, db, privs):
+ # Note: priv escaped by parse_privs
+ privs = ', '.join(privs)
+ if user == "PUBLIC":
+ query = 'GRANT %s ON DATABASE %s TO PUBLIC' % (
+ privs, pg_quote_identifier(db, 'database'))
+ else:
+ query = 'GRANT %s ON DATABASE %s TO "%s"' % (
+ privs, pg_quote_identifier(db, 'database'), user)
+
+ executed_queries.append(query)
+ cursor.execute(query)
+
+
+def revoke_database_privileges(cursor, user, db, privs):
+ # Note: priv escaped by parse_privs
+ privs = ', '.join(privs)
+ if user == "PUBLIC":
+ query = 'REVOKE %s ON DATABASE %s FROM PUBLIC' % (
+ privs, pg_quote_identifier(db, 'database'))
+ else:
+ query = 'REVOKE %s ON DATABASE %s FROM "%s"' % (
+ privs, pg_quote_identifier(db, 'database'), user)
+
+ executed_queries.append(query)
+ cursor.execute(query)
+
+
+def revoke_privileges(cursor, user, privs):
+ if privs is None:
+ return False
+
+ revoke_funcs = dict(table=revoke_table_privileges,
+ database=revoke_database_privileges)
+ check_funcs = dict(table=has_table_privileges,
+ database=has_database_privileges)
+
+ changed = False
+ for type_ in privs:
+ for name, privileges in iteritems(privs[type_]):
+ # Check that any of the privileges requested to be removed are
+ # currently granted to the user
+ differences = check_funcs[type_](cursor, user, name, privileges)
+ if differences[0]:
+ revoke_funcs[type_](cursor, user, name, privileges)
+ changed = True
+ return changed
+
+
+def grant_privileges(cursor, user, privs):
+ if privs is None:
+ return False
+
+ grant_funcs = dict(table=grant_table_privileges,
+ database=grant_database_privileges)
+ check_funcs = dict(table=has_table_privileges,
+ database=has_database_privileges)
+
+ changed = False
+ for type_ in privs:
+ for name, privileges in iteritems(privs[type_]):
+ # Check that any of the privileges requested for the user are
+ # currently missing
+ differences = check_funcs[type_](cursor, user, name, privileges)
+ if differences[2]:
+ grant_funcs[type_](cursor, user, name, privileges)
+ changed = True
+ return changed
+
+
+def parse_role_attrs(cursor, role_attr_flags):
+ """
+ Parse role attributes string for user creation.
+ Format:
+
+ attributes[,attributes,...]
+
+ Where:
+
+ attributes := CREATEDB,CREATEROLE,NOSUPERUSER,...
+ [ "[NO]SUPERUSER","[NO]CREATEROLE", "[NO]CREATEDB",
+ "[NO]INHERIT", "[NO]LOGIN", "[NO]REPLICATION",
+ "[NO]BYPASSRLS" ]
+
+ Note: "[NO]BYPASSRLS" role attribute introduced in 9.5
+ Note: "[NO]CREATEUSER" role attribute is deprecated.
+
+ """
+ flags = frozenset(role.upper() for role in role_attr_flags.split(',') if role)
+
+ valid_flags = frozenset(itertools.chain(FLAGS, get_valid_flags_by_version(cursor)))
+ valid_flags = frozenset(itertools.chain(valid_flags, ('NO%s' % flag for flag in valid_flags)))
+
+ if not flags.issubset(valid_flags):
+ raise InvalidFlagsError('Invalid role_attr_flags specified: %s' %
+ ' '.join(flags.difference(valid_flags)))
+
+ return ' '.join(flags)
+
+
+def normalize_privileges(privs, type_):
+ new_privs = set(privs)
+ if 'ALL' in new_privs:
+ new_privs.update(VALID_PRIVS[type_])
+ new_privs.remove('ALL')
+ if 'TEMP' in new_privs:
+ new_privs.add('TEMPORARY')
+ new_privs.remove('TEMP')
+
+ return new_privs
+
+
+def parse_privs(privs, db):
+ """
+ Parse privilege string to determine permissions for database db.
+ Format:
+
+ privileges[/privileges/...]
+
+ Where:
+
+ privileges := DATABASE_PRIVILEGES[,DATABASE_PRIVILEGES,...] |
+ TABLE_NAME:TABLE_PRIVILEGES[,TABLE_PRIVILEGES,...]
+ """
+ if privs is None:
+ return privs
+
+ o_privs = {
+ 'database': {},
+ 'table': {}
+ }
+ for token in privs.split('/'):
+ if ':' not in token:
+ type_ = 'database'
+ name = db
+ priv_set = frozenset(x.strip().upper()
+ for x in token.split(',') if x.strip())
+ else:
+ type_ = 'table'
+ name, privileges = token.split(':', 1)
+ priv_set = frozenset(x.strip().upper()
+ for x in privileges.split(',') if x.strip())
+
+ if not priv_set.issubset(VALID_PRIVS[type_]):
+ raise InvalidPrivsError('Invalid privs specified for %s: %s' %
+ (type_, ' '.join(priv_set.difference(VALID_PRIVS[type_]))))
+
+ priv_set = normalize_privileges(priv_set, type_)
+ o_privs[type_][name] = priv_set
+
+ return o_privs
+
+
+def get_valid_flags_by_version(cursor):
+ """
+ Some role attributes were introduced after certain versions. We want to
+ compile a list of valid flags against the current Postgres version.
+ """
+ current_version = cursor.connection.server_version
+
+ return [
+ flag
+ for flag, version_introduced in FLAGS_BY_VERSION.items()
+ if current_version >= version_introduced
+ ]
+
+
+def get_comment(cursor, user):
+ """Get user's comment."""
+ query = ("SELECT pg_catalog.shobj_description(r.oid, 'pg_authid') "
+ "FROM pg_catalog.pg_roles r "
+ "WHERE r.rolname = %(user)s")
+ cursor.execute(query, {'user': user})
+ return cursor.fetchone()[0]
+
+
+def add_comment(cursor, user, comment):
+ """Add comment on user."""
+ if comment != get_comment(cursor, user):
+ query = 'COMMENT ON ROLE "%s" IS ' % user
+ cursor.execute(query + '%(comment)s', {'comment': comment})
+ executed_queries.append(cursor.mogrify(query + '%(comment)s', {'comment': comment}))
+ return True
+ else:
+ return False
+
+
+# ===========================================
+# Module execution.
+#
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ user=dict(type='str', required=True, aliases=['name']),
+ password=dict(type='str', default=None, no_log=True),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ priv=dict(type='str', default=None),
+ db=dict(type='str', default='', aliases=['login_db']),
+ fail_on_user=dict(type='bool', default='yes', aliases=['fail_on_role']),
+ role_attr_flags=dict(type='str', default=''),
+ encrypted=dict(type='bool', default='yes'),
+ no_password_changes=dict(type='bool', default='no'),
+ expires=dict(type='str', default=None),
+ conn_limit=dict(type='int', default=None),
+ session_role=dict(type='str'),
+ groups=dict(type='list', elements='str'),
+ comment=dict(type='str', default=None),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ user = module.params["user"]
+ password = module.params["password"]
+ state = module.params["state"]
+ fail_on_user = module.params["fail_on_user"]
+ if module.params['db'] == '' and module.params["priv"] is not None:
+ module.fail_json(msg="privileges require a database to be specified")
+ privs = parse_privs(module.params["priv"], module.params["db"])
+ no_password_changes = module.params["no_password_changes"]
+ if module.params["encrypted"]:
+ encrypted = "ENCRYPTED"
+ else:
+ encrypted = "UNENCRYPTED"
+ expires = module.params["expires"]
+ conn_limit = module.params["conn_limit"]
+ role_attr_flags = module.params["role_attr_flags"]
+ groups = module.params["groups"]
+ if groups:
+ groups = [e.strip() for e in groups]
+ comment = module.params["comment"]
+
+ conn_params = get_conn_params(module, module.params, warn_db_default=False)
+ db_connection = connect_to_db(module, conn_params)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ try:
+ role_attr_flags = parse_role_attrs(cursor, role_attr_flags)
+ except InvalidFlagsError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ kw = dict(user=user)
+ changed = False
+ user_removed = False
+
+ if state == "present":
+ if user_exists(cursor, user):
+ try:
+ changed = user_alter(db_connection, module, user, password,
+ role_attr_flags, encrypted, expires, no_password_changes, conn_limit)
+ except SQLParseError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ else:
+ try:
+ changed = user_add(cursor, user, password,
+ role_attr_flags, encrypted, expires, conn_limit)
+ except psycopg2.ProgrammingError as e:
+ module.fail_json(msg="Unable to add user with given requirement "
+ "due to : %s" % to_native(e),
+ exception=traceback.format_exc())
+ except SQLParseError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ try:
+ changed = grant_privileges(cursor, user, privs) or changed
+ except SQLParseError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ if groups:
+ target_roles = []
+ target_roles.append(user)
+ pg_membership = PgMembership(module, cursor, groups, target_roles)
+ changed = pg_membership.grant() or changed
+ executed_queries.extend(pg_membership.executed_queries)
+
+ if comment is not None:
+ try:
+ changed = add_comment(cursor, user, comment) or changed
+ except Exception as e:
+ module.fail_json(msg='Unable to add comment on role: %s' % to_native(e),
+ exception=traceback.format_exc())
+
+ else:
+ if user_exists(cursor, user):
+ if module.check_mode:
+ changed = True
+ kw['user_removed'] = True
+ else:
+ try:
+ changed = revoke_privileges(cursor, user, privs)
+ user_removed = user_delete(cursor, user)
+ except SQLParseError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ changed = changed or user_removed
+ if fail_on_user and not user_removed:
+ msg = "Unable to remove user"
+ module.fail_json(msg=msg)
+ kw['user_removed'] = user_removed
+
+ if changed:
+ if module.check_mode:
+ db_connection.rollback()
+ else:
+ db_connection.commit()
+
+ kw['changed'] = changed
+ kw['queries'] = executed_queries
+ module.exit_json(**kw)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/rabbitmq_plugin.py b/test/support/integration/plugins/modules/rabbitmq_plugin.py
new file mode 100644
index 0000000000..301bbfe282
--- /dev/null
+++ b/test/support/integration/plugins/modules/rabbitmq_plugin.py
@@ -0,0 +1,180 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2013, Chatham Financial <oss@chathamfinancial.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {
+ 'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'
+}
+
+
+DOCUMENTATION = '''
+---
+module: rabbitmq_plugin
+short_description: Manage RabbitMQ plugins
+description:
+ - This module can be used to enable or disable RabbitMQ plugins.
+version_added: "1.1"
+author:
+ - Chris Hoffman (@chrishoffman)
+options:
+ names:
+ description:
+ - Comma-separated list of plugin names. Also, accepts plugin name.
+ required: true
+ aliases: [name]
+ new_only:
+ description:
+ - Only enable missing plugins.
+ - Does not disable plugins that are not in the names list.
+ type: bool
+ default: "no"
+ state:
+ description:
+ - Specify if plugins are to be enabled or disabled.
+ default: enabled
+ choices: [enabled, disabled]
+ prefix:
+ description:
+ - Specify a custom install prefix to a Rabbit.
+ version_added: "1.3"
+'''
+
+EXAMPLES = '''
+- name: Enables the rabbitmq_management plugin
+ rabbitmq_plugin:
+ names: rabbitmq_management
+ state: enabled
+
+- name: Enable multiple rabbitmq plugins
+ rabbitmq_plugin:
+ names: rabbitmq_management,rabbitmq_management_visualiser
+ state: enabled
+
+- name: Disable plugin
+ rabbitmq_plugin:
+ names: rabbitmq_management
+ state: disabled
+
+- name: Enable every plugin in list with existing plugins
+ rabbitmq_plugin:
+ names: rabbitmq_management,rabbitmq_management_visualiser,rabbitmq_shovel,rabbitmq_shovel_management
+ state: enabled
+ new_only: 'yes'
+'''
+
+RETURN = '''
+enabled:
+ description: list of plugins enabled during task run
+ returned: always
+ type: list
+ sample: ["rabbitmq_management"]
+disabled:
+ description: list of plugins disabled during task run
+ returned: always
+ type: list
+ sample: ["rabbitmq_management"]
+'''
+
+import os
+from ansible.module_utils.basic import AnsibleModule
+
+
+class RabbitMqPlugins(object):
+
+ def __init__(self, module):
+ self.module = module
+ bin_path = ''
+ if module.params['prefix']:
+ if os.path.isdir(os.path.join(module.params['prefix'], 'bin')):
+ bin_path = os.path.join(module.params['prefix'], 'bin')
+ elif os.path.isdir(os.path.join(module.params['prefix'], 'sbin')):
+ bin_path = os.path.join(module.params['prefix'], 'sbin')
+ else:
+ # No such path exists.
+ module.fail_json(msg="No binary folder in prefix %s" % module.params['prefix'])
+
+ self._rabbitmq_plugins = os.path.join(bin_path, "rabbitmq-plugins")
+ else:
+ self._rabbitmq_plugins = module.get_bin_path('rabbitmq-plugins', True)
+
+ def _exec(self, args, run_in_check_mode=False):
+ if not self.module.check_mode or (self.module.check_mode and run_in_check_mode):
+ cmd = [self._rabbitmq_plugins]
+ rc, out, err = self.module.run_command(cmd + args, check_rc=True)
+ return out.splitlines()
+ return list()
+
+ def get_all(self):
+ list_output = self._exec(['list', '-E', '-m'], True)
+ plugins = []
+ for plugin in list_output:
+ if not plugin:
+ break
+ plugins.append(plugin)
+
+ return plugins
+
+ def enable(self, name):
+ self._exec(['enable', name])
+
+ def disable(self, name):
+ self._exec(['disable', name])
+
+
+def main():
+ arg_spec = dict(
+ names=dict(required=True, aliases=['name']),
+ new_only=dict(default='no', type='bool'),
+ state=dict(default='enabled', choices=['enabled', 'disabled']),
+ prefix=dict(required=False, default=None)
+ )
+ module = AnsibleModule(
+ argument_spec=arg_spec,
+ supports_check_mode=True
+ )
+
+ result = dict()
+ names = module.params['names'].split(',')
+ new_only = module.params['new_only']
+ state = module.params['state']
+
+ rabbitmq_plugins = RabbitMqPlugins(module)
+ enabled_plugins = rabbitmq_plugins.get_all()
+
+ enabled = []
+ disabled = []
+ if state == 'enabled':
+ if not new_only:
+ for plugin in enabled_plugins:
+ if " " in plugin:
+ continue
+ if plugin not in names:
+ rabbitmq_plugins.disable(plugin)
+ disabled.append(plugin)
+
+ for name in names:
+ if name not in enabled_plugins:
+ rabbitmq_plugins.enable(name)
+ enabled.append(name)
+ else:
+ for plugin in enabled_plugins:
+ if plugin in names:
+ rabbitmq_plugins.disable(plugin)
+ disabled.append(plugin)
+
+ result['changed'] = len(enabled) > 0 or len(disabled) > 0
+ result['enabled'] = enabled
+ result['disabled'] = disabled
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/rabbitmq_queue.py b/test/support/integration/plugins/modules/rabbitmq_queue.py
new file mode 100644
index 0000000000..567ec8130d
--- /dev/null
+++ b/test/support/integration/plugins/modules/rabbitmq_queue.py
@@ -0,0 +1,257 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Manuel Sousa <manuel.sousa@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+---
+module: rabbitmq_queue
+author: Manuel Sousa (@manuel-sousa)
+version_added: "2.0"
+
+short_description: Manage rabbitMQ queues
+description:
+ - This module uses rabbitMQ Rest API to create/delete queues
+requirements: [ "requests >= 1.0.0" ]
+options:
+ name:
+ description:
+ - Name of the queue
+ required: true
+ state:
+ description:
+ - Whether the queue should be present or absent
+ choices: [ "present", "absent" ]
+ default: present
+ durable:
+ description:
+ - whether queue is durable or not
+ type: bool
+ default: 'yes'
+ auto_delete:
+ description:
+ - if the queue should delete itself after all queues/queues unbound from it
+ type: bool
+ default: 'no'
+ message_ttl:
+ description:
+ - How long a message can live in queue before it is discarded (milliseconds)
+ default: forever
+ auto_expires:
+ description:
+ - How long a queue can be unused before it is automatically deleted (milliseconds)
+ default: forever
+ max_length:
+ description:
+ - How many messages can the queue contain before it starts rejecting
+ default: no limit
+ dead_letter_exchange:
+ description:
+ - Optional name of an exchange to which messages will be republished if they
+ - are rejected or expire
+ dead_letter_routing_key:
+ description:
+ - Optional replacement routing key to use when a message is dead-lettered.
+ - Original routing key will be used if unset
+ max_priority:
+ description:
+ - Maximum number of priority levels for the queue to support.
+ - If not set, the queue will not support message priorities.
+ - Larger numbers indicate higher priority.
+ version_added: "2.4"
+ arguments:
+ description:
+ - extra arguments for queue. If defined this argument is a key/value dictionary
+ default: {}
+extends_documentation_fragment:
+ - rabbitmq
+'''
+
+EXAMPLES = '''
+# Create a queue
+- rabbitmq_queue:
+ name: myQueue
+
+# Create a queue on remote host
+- rabbitmq_queue:
+ name: myRemoteQueue
+ login_user: user
+ login_password: secret
+ login_host: remote.example.org
+'''
+
+import json
+import traceback
+
+REQUESTS_IMP_ERR = None
+try:
+ import requests
+ HAS_REQUESTS = True
+except ImportError:
+ REQUESTS_IMP_ERR = traceback.format_exc()
+ HAS_REQUESTS = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.six.moves.urllib import parse as urllib_parse
+from ansible.module_utils.rabbitmq import rabbitmq_argument_spec
+
+
+def main():
+
+ argument_spec = rabbitmq_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(default='present', choices=['present', 'absent'], type='str'),
+ name=dict(required=True, type='str'),
+ durable=dict(default=True, type='bool'),
+ auto_delete=dict(default=False, type='bool'),
+ message_ttl=dict(default=None, type='int'),
+ auto_expires=dict(default=None, type='int'),
+ max_length=dict(default=None, type='int'),
+ dead_letter_exchange=dict(default=None, type='str'),
+ dead_letter_routing_key=dict(default=None, type='str'),
+ arguments=dict(default=dict(), type='dict'),
+ max_priority=dict(default=None, type='int')
+ )
+ )
+ module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ url = "%s://%s:%s/api/queues/%s/%s" % (
+ module.params['login_protocol'],
+ module.params['login_host'],
+ module.params['login_port'],
+ urllib_parse.quote(module.params['vhost'], ''),
+ module.params['name']
+ )
+
+ if not HAS_REQUESTS:
+ module.fail_json(msg=missing_required_lib("requests"), exception=REQUESTS_IMP_ERR)
+
+ result = dict(changed=False, name=module.params['name'])
+
+ # Check if queue already exists
+ r = requests.get(url, auth=(module.params['login_user'], module.params['login_password']),
+ verify=module.params['ca_cert'], cert=(module.params['client_cert'], module.params['client_key']))
+
+ if r.status_code == 200:
+ queue_exists = True
+ response = r.json()
+ elif r.status_code == 404:
+ queue_exists = False
+ response = r.text
+ else:
+ module.fail_json(
+ msg="Invalid response from RESTAPI when trying to check if queue exists",
+ details=r.text
+ )
+
+ if module.params['state'] == 'present':
+ change_required = not queue_exists
+ else:
+ change_required = queue_exists
+
+ # Check if attributes change on existing queue
+ if not change_required and r.status_code == 200 and module.params['state'] == 'present':
+ if not (
+ response['durable'] == module.params['durable'] and
+ response['auto_delete'] == module.params['auto_delete'] and
+ (
+ ('x-message-ttl' in response['arguments'] and response['arguments']['x-message-ttl'] == module.params['message_ttl']) or
+ ('x-message-ttl' not in response['arguments'] and module.params['message_ttl'] is None)
+ ) and
+ (
+ ('x-expires' in response['arguments'] and response['arguments']['x-expires'] == module.params['auto_expires']) or
+ ('x-expires' not in response['arguments'] and module.params['auto_expires'] is None)
+ ) and
+ (
+ ('x-max-length' in response['arguments'] and response['arguments']['x-max-length'] == module.params['max_length']) or
+ ('x-max-length' not in response['arguments'] and module.params['max_length'] is None)
+ ) and
+ (
+ ('x-dead-letter-exchange' in response['arguments'] and
+ response['arguments']['x-dead-letter-exchange'] == module.params['dead_letter_exchange']) or
+ ('x-dead-letter-exchange' not in response['arguments'] and module.params['dead_letter_exchange'] is None)
+ ) and
+ (
+ ('x-dead-letter-routing-key' in response['arguments'] and
+ response['arguments']['x-dead-letter-routing-key'] == module.params['dead_letter_routing_key']) or
+ ('x-dead-letter-routing-key' not in response['arguments'] and module.params['dead_letter_routing_key'] is None)
+ ) and
+ (
+ ('x-max-priority' in response['arguments'] and
+ response['arguments']['x-max-priority'] == module.params['max_priority']) or
+ ('x-max-priority' not in response['arguments'] and module.params['max_priority'] is None)
+ )
+ ):
+ module.fail_json(
+ msg="RabbitMQ RESTAPI doesn't support attribute changes for existing queues",
+ )
+
+ # Copy parameters to arguments as used by RabbitMQ
+ for k, v in {
+ 'message_ttl': 'x-message-ttl',
+ 'auto_expires': 'x-expires',
+ 'max_length': 'x-max-length',
+ 'dead_letter_exchange': 'x-dead-letter-exchange',
+ 'dead_letter_routing_key': 'x-dead-letter-routing-key',
+ 'max_priority': 'x-max-priority'
+ }.items():
+ if module.params[k] is not None:
+ module.params['arguments'][v] = module.params[k]
+
+ # Exit if check_mode
+ if module.check_mode:
+ result['changed'] = change_required
+ result['details'] = response
+ result['arguments'] = module.params['arguments']
+ module.exit_json(**result)
+
+ # Do changes
+ if change_required:
+ if module.params['state'] == 'present':
+ r = requests.put(
+ url,
+ auth=(module.params['login_user'], module.params['login_password']),
+ headers={"content-type": "application/json"},
+ data=json.dumps({
+ "durable": module.params['durable'],
+ "auto_delete": module.params['auto_delete'],
+ "arguments": module.params['arguments']
+ }),
+ verify=module.params['ca_cert'],
+ cert=(module.params['client_cert'], module.params['client_key'])
+ )
+ elif module.params['state'] == 'absent':
+ r = requests.delete(url, auth=(module.params['login_user'], module.params['login_password']),
+ verify=module.params['ca_cert'], cert=(module.params['client_cert'], module.params['client_key']))
+
+ # RabbitMQ 3.6.7 changed this response code from 204 to 201
+ if r.status_code == 204 or r.status_code == 201:
+ result['changed'] = True
+ module.exit_json(**result)
+ else:
+ module.fail_json(
+ msg="Error creating queue",
+ status=r.status_code,
+ details=r.text
+ )
+
+ else:
+ module.exit_json(
+ changed=False,
+ name=module.params['name']
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/selinux.py b/test/support/integration/plugins/modules/selinux.py
new file mode 100644
index 0000000000..775c87104b
--- /dev/null
+++ b/test/support/integration/plugins/modules/selinux.py
@@ -0,0 +1,266 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2012, Derek Carter<goozbach@friocorte.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ 'metadata_version': '1.1',
+ 'status': ['stableinterface'],
+ 'supported_by': 'core'
+}
+
+DOCUMENTATION = r'''
+---
+module: selinux
+short_description: Change policy and state of SELinux
+description:
+ - Configures the SELinux mode and policy.
+ - A reboot may be required after usage.
+ - Ansible will not issue this reboot but will let you know when it is required.
+version_added: "0.7"
+options:
+ policy:
+ description:
+ - The name of the SELinux policy to use (e.g. C(targeted)) will be required if state is not C(disabled).
+ state:
+ description:
+ - The SELinux mode.
+ required: true
+ choices: [ disabled, enforcing, permissive ]
+ configfile:
+ description:
+ - The path to the SELinux configuration file, if non-standard.
+ default: /etc/selinux/config
+ aliases: [ conf, file ]
+requirements: [ libselinux-python ]
+author:
+- Derek Carter (@goozbach) <goozbach@friocorte.com>
+'''
+
+EXAMPLES = r'''
+- name: Enable SELinux
+ selinux:
+ policy: targeted
+ state: enforcing
+
+- name: Put SELinux in permissive mode, logging actions that would be blocked.
+ selinux:
+ policy: targeted
+ state: permissive
+
+- name: Disable SELinux
+ selinux:
+ state: disabled
+'''
+
+RETURN = r'''
+msg:
+ description: Messages that describe changes that were made.
+ returned: always
+ type: str
+ sample: Config SELinux state changed from 'disabled' to 'permissive'
+configfile:
+ description: Path to SELinux configuration file.
+ returned: always
+ type: str
+ sample: /etc/selinux/config
+policy:
+ description: Name of the SELinux policy.
+ returned: always
+ type: str
+ sample: targeted
+state:
+ description: SELinux mode.
+ returned: always
+ type: str
+ sample: enforcing
+reboot_required:
+ description: Whether or not an reboot is required for the changes to take effect.
+ returned: always
+ type: bool
+ sample: true
+'''
+
+import os
+import re
+import tempfile
+import traceback
+
+SELINUX_IMP_ERR = None
+try:
+ import selinux
+ HAS_SELINUX = True
+except ImportError:
+ SELINUX_IMP_ERR = traceback.format_exc()
+ HAS_SELINUX = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.facts.utils import get_file_lines
+
+
+# getter subroutines
+def get_config_state(configfile):
+ lines = get_file_lines(configfile, strip=False)
+
+ for line in lines:
+ stateline = re.match(r'^SELINUX=.*$', line)
+ if stateline:
+ return line.split('=')[1].strip()
+
+
+def get_config_policy(configfile):
+ lines = get_file_lines(configfile, strip=False)
+
+ for line in lines:
+ stateline = re.match(r'^SELINUXTYPE=.*$', line)
+ if stateline:
+ return line.split('=')[1].strip()
+
+
+# setter subroutines
+def set_config_state(module, state, configfile):
+ # SELINUX=permissive
+ # edit config file with state value
+ stateline = 'SELINUX=%s' % state
+ lines = get_file_lines(configfile, strip=False)
+
+ tmpfd, tmpfile = tempfile.mkstemp()
+
+ with open(tmpfile, "w") as write_file:
+ for line in lines:
+ write_file.write(re.sub(r'^SELINUX=.*', stateline, line) + '\n')
+
+ module.atomic_move(tmpfile, configfile)
+
+
+def set_state(module, state):
+ if state == 'enforcing':
+ selinux.security_setenforce(1)
+ elif state == 'permissive':
+ selinux.security_setenforce(0)
+ elif state == 'disabled':
+ pass
+ else:
+ msg = 'trying to set invalid runtime state %s' % state
+ module.fail_json(msg=msg)
+
+
+def set_config_policy(module, policy, configfile):
+ if not os.path.exists('/etc/selinux/%s/policy' % policy):
+ module.fail_json(msg='Policy %s does not exist in /etc/selinux/' % policy)
+
+ # edit config file with state value
+ # SELINUXTYPE=targeted
+ policyline = 'SELINUXTYPE=%s' % policy
+ lines = get_file_lines(configfile, strip=False)
+
+ tmpfd, tmpfile = tempfile.mkstemp()
+
+ with open(tmpfile, "w") as write_file:
+ for line in lines:
+ write_file.write(re.sub(r'^SELINUXTYPE=.*', policyline, line) + '\n')
+
+ module.atomic_move(tmpfile, configfile)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ policy=dict(type='str'),
+ state=dict(type='str', required='True', choices=['enforcing', 'permissive', 'disabled']),
+ configfile=dict(type='str', default='/etc/selinux/config', aliases=['conf', 'file']),
+ ),
+ supports_check_mode=True,
+ )
+
+ if not HAS_SELINUX:
+ module.fail_json(msg=missing_required_lib('libselinux-python'), exception=SELINUX_IMP_ERR)
+
+ # global vars
+ changed = False
+ msgs = []
+ configfile = module.params['configfile']
+ policy = module.params['policy']
+ state = module.params['state']
+ runtime_enabled = selinux.is_selinux_enabled()
+ runtime_policy = selinux.selinux_getpolicytype()[1]
+ runtime_state = 'disabled'
+ reboot_required = False
+
+ if runtime_enabled:
+ # enabled means 'enforcing' or 'permissive'
+ if selinux.security_getenforce():
+ runtime_state = 'enforcing'
+ else:
+ runtime_state = 'permissive'
+
+ if not os.path.isfile(configfile):
+ module.fail_json(msg="Unable to find file {0}".format(configfile),
+ details="Please install SELinux-policy package, "
+ "if this package is not installed previously.")
+
+ config_policy = get_config_policy(configfile)
+ config_state = get_config_state(configfile)
+
+ # check to see if policy is set if state is not 'disabled'
+ if state != 'disabled':
+ if not policy:
+ module.fail_json(msg="Policy is required if state is not 'disabled'")
+ else:
+ if not policy:
+ policy = config_policy
+
+ # check changed values and run changes
+ if policy != runtime_policy:
+ if module.check_mode:
+ module.exit_json(changed=True)
+ # cannot change runtime policy
+ msgs.append("Running SELinux policy changed from '%s' to '%s'" % (runtime_policy, policy))
+ changed = True
+
+ if policy != config_policy:
+ if module.check_mode:
+ module.exit_json(changed=True)
+ set_config_policy(module, policy, configfile)
+ msgs.append("SELinux policy configuration in '%s' changed from '%s' to '%s'" % (configfile, config_policy, policy))
+ changed = True
+
+ if state != runtime_state:
+ if runtime_enabled:
+ if state == 'disabled':
+ if runtime_state != 'permissive':
+ # Temporarily set state to permissive
+ if not module.check_mode:
+ set_state(module, 'permissive')
+ module.warn("SELinux state temporarily changed from '%s' to 'permissive'. State change will take effect next reboot." % (runtime_state))
+ changed = True
+ else:
+ module.warn('SELinux state change will take effect next reboot')
+ reboot_required = True
+ else:
+ if not module.check_mode:
+ set_state(module, state)
+ msgs.append("SELinux state changed from '%s' to '%s'" % (runtime_state, state))
+
+ # Only report changes if the file is changed.
+ # This prevents the task from reporting changes every time the task is run.
+ changed = True
+ else:
+ module.warn("Reboot is required to set SELinux state to '%s'" % state)
+ reboot_required = True
+
+ if state != config_state:
+ if not module.check_mode:
+ set_config_state(module, state, configfile)
+ msgs.append("Config SELinux state changed from '%s' to '%s'" % (config_state, state))
+ changed = True
+
+ module.exit_json(changed=changed, msg=', '.join(msgs), configfile=configfile, policy=policy, state=state, reboot_required=reboot_required)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/selogin.py b/test/support/integration/plugins/modules/selogin.py
new file mode 100644
index 0000000000..6429ef36e1
--- /dev/null
+++ b/test/support/integration/plugins/modules/selogin.py
@@ -0,0 +1,260 @@
+#!/usr/bin/python
+
+# (c) 2017, Petr Lautrbach <plautrba@redhat.com>
+# Based on seport.py module (c) 2014, Dan Keder <dan.keder@gmail.com>
+
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = '''
+---
+module: selogin
+short_description: Manages linux user to SELinux user mapping
+description:
+ - Manages linux user to SELinux user mapping
+version_added: "2.8"
+options:
+ login:
+ description:
+ - a Linux user
+ required: true
+ seuser:
+ description:
+ - SELinux user name
+ required: true
+ selevel:
+ aliases: [ serange ]
+ description:
+ - MLS/MCS Security Range (MLS/MCS Systems only) SELinux Range for SELinux login mapping defaults to the SELinux user record range.
+ default: s0
+ state:
+ description:
+ - Desired mapping value.
+ required: true
+ default: present
+ choices: [ 'present', 'absent' ]
+ reload:
+ description:
+ - Reload SELinux policy after commit.
+ default: yes
+ ignore_selinux_state:
+ description:
+ - Run independent of selinux runtime state
+ type: bool
+ default: false
+notes:
+ - The changes are persistent across reboots
+ - Not tested on any debian based system
+requirements: [ 'libselinux', 'policycoreutils' ]
+author:
+- Dan Keder (@dankeder)
+- Petr Lautrbach (@bachradsusi)
+- James Cassell (@jamescassell)
+'''
+
+EXAMPLES = '''
+# Modify the default user on the system to the guest_u user
+- selogin:
+ login: __default__
+ seuser: guest_u
+ state: present
+
+# Assign gijoe user on an MLS machine a range and to the staff_u user
+- selogin:
+ login: gijoe
+ seuser: staff_u
+ serange: SystemLow-Secret
+ state: present
+
+# Assign all users in the engineering group to the staff_u user
+- selogin:
+ login: '%engineering'
+ seuser: staff_u
+ state: present
+'''
+
+RETURN = r'''
+# Default return values
+'''
+
+
+import traceback
+
+SELINUX_IMP_ERR = None
+try:
+ import selinux
+ HAVE_SELINUX = True
+except ImportError:
+ SELINUX_IMP_ERR = traceback.format_exc()
+ HAVE_SELINUX = False
+
+SEOBJECT_IMP_ERR = None
+try:
+ import seobject
+ HAVE_SEOBJECT = True
+except ImportError:
+ SEOBJECT_IMP_ERR = traceback.format_exc()
+ HAVE_SEOBJECT = False
+
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+def semanage_login_add(module, login, seuser, do_reload, serange='s0', sestore=''):
+ """ Add linux user to SELinux user mapping
+
+ :type module: AnsibleModule
+ :param module: Ansible module
+
+ :type login: str
+ :param login: a Linux User or a Linux group if it begins with %
+
+ :type seuser: str
+ :param proto: An SELinux user ('__default__', 'unconfined_u', 'staff_u', ...), see 'semanage login -l'
+
+ :type serange: str
+ :param serange: SELinux MLS/MCS range (defaults to 's0')
+
+ :type do_reload: bool
+ :param do_reload: Whether to reload SELinux policy after commit
+
+ :type sestore: str
+ :param sestore: SELinux store
+
+ :rtype: bool
+ :return: True if the policy was changed, otherwise False
+ """
+ try:
+ selogin = seobject.loginRecords(sestore)
+ selogin.set_reload(do_reload)
+ change = False
+ all_logins = selogin.get_all()
+ # module.fail_json(msg="%s: %s %s" % (all_logins, login, sestore))
+ # for local_login in all_logins:
+ if login not in all_logins.keys():
+ change = True
+ if not module.check_mode:
+ selogin.add(login, seuser, serange)
+ else:
+ if all_logins[login][0] != seuser or all_logins[login][1] != serange:
+ change = True
+ if not module.check_mode:
+ selogin.modify(login, seuser, serange)
+
+ except (ValueError, KeyError, OSError, RuntimeError) as e:
+ module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, to_native(e)), exception=traceback.format_exc())
+
+ return change
+
+
+def semanage_login_del(module, login, seuser, do_reload, sestore=''):
+ """ Delete linux user to SELinux user mapping
+
+ :type module: AnsibleModule
+ :param module: Ansible module
+
+ :type login: str
+ :param login: a Linux User or a Linux group if it begins with %
+
+ :type seuser: str
+ :param proto: An SELinux user ('__default__', 'unconfined_u', 'staff_u', ...), see 'semanage login -l'
+
+ :type do_reload: bool
+ :param do_reload: Whether to reload SELinux policy after commit
+
+ :type sestore: str
+ :param sestore: SELinux store
+
+ :rtype: bool
+ :return: True if the policy was changed, otherwise False
+ """
+ try:
+ selogin = seobject.loginRecords(sestore)
+ selogin.set_reload(do_reload)
+ change = False
+ all_logins = selogin.get_all()
+ # module.fail_json(msg="%s: %s %s" % (all_logins, login, sestore))
+ if login in all_logins.keys():
+ change = True
+ if not module.check_mode:
+ selogin.delete(login)
+
+ except (ValueError, KeyError, OSError, RuntimeError) as e:
+ module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, to_native(e)), exception=traceback.format_exc())
+
+ return change
+
+
+def get_runtime_status(ignore_selinux_state=False):
+ return True if ignore_selinux_state is True else selinux.is_selinux_enabled()
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ ignore_selinux_state=dict(type='bool', default=False),
+ login=dict(type='str', required=True),
+ seuser=dict(type='str'),
+ selevel=dict(type='str', aliases=['serange'], default='s0'),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ reload=dict(type='bool', default=True),
+ ),
+ required_if=[
+ ["state", "present", ["seuser"]]
+ ],
+ supports_check_mode=True
+ )
+ if not HAVE_SELINUX:
+ module.fail_json(msg=missing_required_lib("libselinux"), exception=SELINUX_IMP_ERR)
+
+ if not HAVE_SEOBJECT:
+ module.fail_json(msg=missing_required_lib("seobject from policycoreutils"), exception=SEOBJECT_IMP_ERR)
+
+ ignore_selinux_state = module.params['ignore_selinux_state']
+
+ if not get_runtime_status(ignore_selinux_state):
+ module.fail_json(msg="SELinux is disabled on this host.")
+
+ login = module.params['login']
+ seuser = module.params['seuser']
+ serange = module.params['selevel']
+ state = module.params['state']
+ do_reload = module.params['reload']
+
+ result = {
+ 'login': login,
+ 'seuser': seuser,
+ 'serange': serange,
+ 'state': state,
+ }
+
+ if state == 'present':
+ result['changed'] = semanage_login_add(module, login, seuser, do_reload, serange)
+ elif state == 'absent':
+ result['changed'] = semanage_login_del(module, login, seuser, do_reload)
+ else:
+ module.fail_json(msg='Invalid value of argument "state": {0}'.format(state))
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/synchronize.py b/test/support/integration/plugins/modules/synchronize.py
new file mode 100644
index 0000000000..e4c520b7cf
--- /dev/null
+++ b/test/support/integration/plugins/modules/synchronize.py
@@ -0,0 +1,618 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2012-2013, Timothy Appnel <tim@appnel.com>
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'core'}
+
+DOCUMENTATION = r'''
+---
+module: synchronize
+version_added: "1.4"
+short_description: A wrapper around rsync to make common tasks in your playbooks quick and easy
+description:
+ - C(synchronize) is a wrapper around rsync to make common tasks in your playbooks quick and easy.
+ - It is run and originates on the local host where Ansible is being run.
+ - Of course, you could just use the C(command) action to call rsync yourself, but you also have to add a fair number of
+ boilerplate options and host facts.
+ - This module is not intended to provide access to the full power of rsync, but does make the most common
+ invocations easier to implement. You `still` may need to call rsync directly via C(command) or C(shell) depending on your use case.
+options:
+ src:
+ description:
+ - Path on the source host that will be synchronized to the destination.
+ - The path can be absolute or relative.
+ type: str
+ required: true
+ dest:
+ description:
+ - Path on the destination host that will be synchronized from the source.
+ - The path can be absolute or relative.
+ type: str
+ required: true
+ dest_port:
+ description:
+ - Port number for ssh on the destination host.
+ - Prior to Ansible 2.0, the ansible_ssh_port inventory var took precedence over this value.
+ - This parameter defaults to the value of C(ansible_ssh_port) or C(ansible_port),
+ the C(remote_port) config setting or the value from ssh client configuration
+ if none of the former have been set.
+ type: int
+ version_added: "1.5"
+ mode:
+ description:
+ - Specify the direction of the synchronization.
+ - In push mode the localhost or delegate is the source.
+ - In pull mode the remote host in context is the source.
+ type: str
+ choices: [ pull, push ]
+ default: push
+ archive:
+ description:
+ - Mirrors the rsync archive flag, enables recursive, links, perms, times, owner, group flags and -D.
+ type: bool
+ default: yes
+ checksum:
+ description:
+ - Skip based on checksum, rather than mod-time & size; Note that that "archive" option is still enabled by default - the "checksum" option will
+ not disable it.
+ type: bool
+ default: no
+ version_added: "1.6"
+ compress:
+ description:
+ - Compress file data during the transfer.
+ - In most cases, leave this enabled unless it causes problems.
+ type: bool
+ default: yes
+ version_added: "1.7"
+ existing_only:
+ description:
+ - Skip creating new files on receiver.
+ type: bool
+ default: no
+ version_added: "1.5"
+ delete:
+ description:
+ - Delete files in C(dest) that don't exist (after transfer, not before) in the C(src) path.
+ - This option requires C(recursive=yes).
+ - This option ignores excluded files and behaves like the rsync opt --delete-excluded.
+ type: bool
+ default: no
+ dirs:
+ description:
+ - Transfer directories without recursing.
+ type: bool
+ default: no
+ recursive:
+ description:
+ - Recurse into directories.
+ - This parameter defaults to the value of the archive option.
+ type: bool
+ links:
+ description:
+ - Copy symlinks as symlinks.
+ - This parameter defaults to the value of the archive option.
+ type: bool
+ copy_links:
+ description:
+ - Copy symlinks as the item that they point to (the referent) is copied, rather than the symlink.
+ type: bool
+ default: no
+ perms:
+ description:
+ - Preserve permissions.
+ - This parameter defaults to the value of the archive option.
+ type: bool
+ times:
+ description:
+ - Preserve modification times.
+ - This parameter defaults to the value of the archive option.
+ type: bool
+ owner:
+ description:
+ - Preserve owner (super user only).
+ - This parameter defaults to the value of the archive option.
+ type: bool
+ group:
+ description:
+ - Preserve group.
+ - This parameter defaults to the value of the archive option.
+ type: bool
+ rsync_path:
+ description:
+ - Specify the rsync command to run on the remote host. See C(--rsync-path) on the rsync man page.
+ - To specify the rsync command to run on the local host, you need to set this your task var C(ansible_rsync_path).
+ type: str
+ rsync_timeout:
+ description:
+ - Specify a C(--timeout) for the rsync command in seconds.
+ type: int
+ default: 0
+ set_remote_user:
+ description:
+ - Put user@ for the remote paths.
+ - If you have a custom ssh config to define the remote user for a host
+ that does not match the inventory user, you should set this parameter to C(no).
+ type: bool
+ default: yes
+ use_ssh_args:
+ description:
+ - Use the ssh_args specified in ansible.cfg.
+ type: bool
+ default: no
+ version_added: "2.0"
+ rsync_opts:
+ description:
+ - Specify additional rsync options by passing in an array.
+ - Note that an empty string in C(rsync_opts) will end up transfer the current working directory.
+ type: list
+ default:
+ version_added: "1.6"
+ partial:
+ description:
+ - Tells rsync to keep the partial file which should make a subsequent transfer of the rest of the file much faster.
+ type: bool
+ default: no
+ version_added: "2.0"
+ verify_host:
+ description:
+ - Verify destination host key.
+ type: bool
+ default: no
+ version_added: "2.0"
+ private_key:
+ description:
+ - Specify the private key to use for SSH-based rsync connections (e.g. C(~/.ssh/id_rsa)).
+ type: path
+ version_added: "1.6"
+ link_dest:
+ description:
+ - Add a destination to hard link against during the rsync.
+ type: list
+ default:
+ version_added: "2.5"
+notes:
+ - rsync must be installed on both the local and remote host.
+ - For the C(synchronize) module, the "local host" is the host `the synchronize task originates on`, and the "destination host" is the host
+ `synchronize is connecting to`.
+ - The "local host" can be changed to a different host by using `delegate_to`. This enables copying between two remote hosts or entirely on one
+ remote machine.
+ - >
+ The user and permissions for the synchronize `src` are those of the user running the Ansible task on the local host (or the remote_user for a
+ delegate_to host when delegate_to is used).
+ - The user and permissions for the synchronize `dest` are those of the `remote_user` on the destination host or the `become_user` if `become=yes` is active.
+ - In Ansible 2.0 a bug in the synchronize module made become occur on the "local host". This was fixed in Ansible 2.0.1.
+ - Currently, synchronize is limited to elevating permissions via passwordless sudo. This is because rsync itself is connecting to the remote machine
+ and rsync doesn't give us a way to pass sudo credentials in.
+ - Currently there are only a few connection types which support synchronize (ssh, paramiko, local, and docker) because a sync strategy has been
+ determined for those connection types. Note that the connection for these must not need a password as rsync itself is making the connection and
+ rsync does not provide us a way to pass a password to the connection.
+ - Expect that dest=~/x will be ~<remote_user>/x even if using sudo.
+ - Inspect the verbose output to validate the destination user/host/path are what was expected.
+ - To exclude files and directories from being synchronized, you may add C(.rsync-filter) files to the source directory.
+ - rsync daemon must be up and running with correct permission when using rsync protocol in source or destination path.
+ - The C(synchronize) module forces `--delay-updates` to avoid leaving a destination in a broken in-between state if the underlying rsync process
+ encounters an error. Those synchronizing large numbers of files that are willing to trade safety for performance should call rsync directly.
+ - link_destination is subject to the same limitations as the underlying rsync daemon. Hard links are only preserved if the relative subtrees
+ of the source and destination are the same. Attempts to hardlink into a directory that is a subdirectory of the source will be prevented.
+seealso:
+- module: copy
+- module: win_robocopy
+author:
+- Timothy Appnel (@tima)
+'''
+
+EXAMPLES = '''
+- name: Synchronization of src on the control machine to dest on the remote hosts
+ synchronize:
+ src: some/relative/path
+ dest: /some/absolute/path
+
+- name: Synchronization using rsync protocol (push)
+ synchronize:
+ src: some/relative/path/
+ dest: rsync://somehost.com/path/
+
+- name: Synchronization using rsync protocol (pull)
+ synchronize:
+ mode: pull
+ src: rsync://somehost.com/path/
+ dest: /some/absolute/path/
+
+- name: Synchronization using rsync protocol on delegate host (push)
+ synchronize:
+ src: /some/absolute/path/
+ dest: rsync://somehost.com/path/
+ delegate_to: delegate.host
+
+- name: Synchronization using rsync protocol on delegate host (pull)
+ synchronize:
+ mode: pull
+ src: rsync://somehost.com/path/
+ dest: /some/absolute/path/
+ delegate_to: delegate.host
+
+- name: Synchronization without any --archive options enabled
+ synchronize:
+ src: some/relative/path
+ dest: /some/absolute/path
+ archive: no
+
+- name: Synchronization with --archive options enabled except for --recursive
+ synchronize:
+ src: some/relative/path
+ dest: /some/absolute/path
+ recursive: no
+
+- name: Synchronization with --archive options enabled except for --times, with --checksum option enabled
+ synchronize:
+ src: some/relative/path
+ dest: /some/absolute/path
+ checksum: yes
+ times: no
+
+- name: Synchronization without --archive options enabled except use --links
+ synchronize:
+ src: some/relative/path
+ dest: /some/absolute/path
+ archive: no
+ links: yes
+
+- name: Synchronization of two paths both on the control machine
+ synchronize:
+ src: some/relative/path
+ dest: /some/absolute/path
+ delegate_to: localhost
+
+- name: Synchronization of src on the inventory host to the dest on the localhost in pull mode
+ synchronize:
+ mode: pull
+ src: some/relative/path
+ dest: /some/absolute/path
+
+- name: Synchronization of src on delegate host to dest on the current inventory host.
+ synchronize:
+ src: /first/absolute/path
+ dest: /second/absolute/path
+ delegate_to: delegate.host
+
+- name: Synchronize two directories on one remote host.
+ synchronize:
+ src: /first/absolute/path
+ dest: /second/absolute/path
+ delegate_to: "{{ inventory_hostname }}"
+
+- name: Synchronize and delete files in dest on the remote host that are not found in src of localhost.
+ synchronize:
+ src: some/relative/path
+ dest: /some/absolute/path
+ delete: yes
+ recursive: yes
+
+# This specific command is granted su privileges on the destination
+- name: Synchronize using an alternate rsync command
+ synchronize:
+ src: some/relative/path
+ dest: /some/absolute/path
+ rsync_path: su -c rsync
+
+# Example .rsync-filter file in the source directory
+# - var # exclude any path whose last part is 'var'
+# - /var # exclude any path starting with 'var' starting at the source directory
+# + /var/conf # include /var/conf even though it was previously excluded
+
+- name: Synchronize passing in extra rsync options
+ synchronize:
+ src: /tmp/helloworld
+ dest: /var/www/helloworld
+ rsync_opts:
+ - "--no-motd"
+ - "--exclude=.git"
+
+# Hardlink files if they didn't change
+- name: Use hardlinks when synchronizing filesystems
+ synchronize:
+ src: /tmp/path_a/foo.txt
+ dest: /tmp/path_b/foo.txt
+ link_dest: /tmp/path_a/
+
+# Specify the rsync binary to use on remote host and on local host
+- hosts: groupofhosts
+ vars:
+ ansible_rsync_path: /usr/gnu/bin/rsync
+
+ tasks:
+ - name: copy /tmp/localpath/ to remote location /tmp/remotepath
+ synchronize:
+ src: /tmp/localpath/
+ dest: /tmp/remotepath
+ rsync_path: /usr/gnu/bin/rsync
+'''
+
+
+import os
+import errno
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_bytes, to_native
+from ansible.module_utils.six.moves import shlex_quote
+
+
+client_addr = None
+
+
+def substitute_controller(path):
+ global client_addr
+ if not client_addr:
+ ssh_env_string = os.environ.get('SSH_CLIENT', None)
+ try:
+ client_addr, _ = ssh_env_string.split(None, 1)
+ except AttributeError:
+ ssh_env_string = os.environ.get('SSH_CONNECTION', None)
+ try:
+ client_addr, _ = ssh_env_string.split(None, 1)
+ except AttributeError:
+ pass
+ if not client_addr:
+ raise ValueError
+
+ if path.startswith('localhost:'):
+ path = path.replace('localhost', client_addr, 1)
+ return path
+
+
+def is_rsh_needed(source, dest):
+ if source.startswith('rsync://') or dest.startswith('rsync://'):
+ return False
+ if ':' in source or ':' in dest:
+ return True
+ return False
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ src=dict(type='str', required=True),
+ dest=dict(type='str', required=True),
+ dest_port=dict(type='int'),
+ delete=dict(type='bool', default=False),
+ private_key=dict(type='path'),
+ rsync_path=dict(type='str'),
+ _local_rsync_path=dict(type='path', default='rsync'),
+ _local_rsync_password=dict(type='str', no_log=True),
+ _substitute_controller=dict(type='bool', default=False),
+ archive=dict(type='bool', default=True),
+ checksum=dict(type='bool', default=False),
+ compress=dict(type='bool', default=True),
+ existing_only=dict(type='bool', default=False),
+ dirs=dict(type='bool', default=False),
+ recursive=dict(type='bool'),
+ links=dict(type='bool'),
+ copy_links=dict(type='bool', default=False),
+ perms=dict(type='bool'),
+ times=dict(type='bool'),
+ owner=dict(type='bool'),
+ group=dict(type='bool'),
+ set_remote_user=dict(type='bool', default=True),
+ rsync_timeout=dict(type='int', default=0),
+ rsync_opts=dict(type='list', default=[]),
+ ssh_args=dict(type='str'),
+ partial=dict(type='bool', default=False),
+ verify_host=dict(type='bool', default=False),
+ mode=dict(type='str', default='push', choices=['pull', 'push']),
+ link_dest=dict(type='list')
+ ),
+ supports_check_mode=True,
+ )
+
+ if module.params['_substitute_controller']:
+ try:
+ source = substitute_controller(module.params['src'])
+ dest = substitute_controller(module.params['dest'])
+ except ValueError:
+ module.fail_json(msg='Could not determine controller hostname for rsync to send to')
+ else:
+ source = module.params['src']
+ dest = module.params['dest']
+ dest_port = module.params['dest_port']
+ delete = module.params['delete']
+ private_key = module.params['private_key']
+ rsync_path = module.params['rsync_path']
+ rsync = module.params.get('_local_rsync_path', 'rsync')
+ rsync_password = module.params.get('_local_rsync_password')
+ rsync_timeout = module.params.get('rsync_timeout', 'rsync_timeout')
+ archive = module.params['archive']
+ checksum = module.params['checksum']
+ compress = module.params['compress']
+ existing_only = module.params['existing_only']
+ dirs = module.params['dirs']
+ partial = module.params['partial']
+ # the default of these params depends on the value of archive
+ recursive = module.params['recursive']
+ links = module.params['links']
+ copy_links = module.params['copy_links']
+ perms = module.params['perms']
+ times = module.params['times']
+ owner = module.params['owner']
+ group = module.params['group']
+ rsync_opts = module.params['rsync_opts']
+ ssh_args = module.params['ssh_args']
+ verify_host = module.params['verify_host']
+ link_dest = module.params['link_dest']
+
+ if '/' not in rsync:
+ rsync = module.get_bin_path(rsync, required=True)
+
+ cmd = [rsync, '--delay-updates', '-F']
+ _sshpass_pipe = None
+ if rsync_password:
+ try:
+ module.run_command(["sshpass"])
+ except OSError:
+ module.fail_json(
+ msg="to use rsync connection with passwords, you must install the sshpass program"
+ )
+ _sshpass_pipe = os.pipe()
+ cmd = ['sshpass', '-d' + to_native(_sshpass_pipe[0], errors='surrogate_or_strict')] + cmd
+ if compress:
+ cmd.append('--compress')
+ if rsync_timeout:
+ cmd.append('--timeout=%s' % rsync_timeout)
+ if module.check_mode:
+ cmd.append('--dry-run')
+ if delete:
+ cmd.append('--delete-after')
+ if existing_only:
+ cmd.append('--existing')
+ if checksum:
+ cmd.append('--checksum')
+ if copy_links:
+ cmd.append('--copy-links')
+ if archive:
+ cmd.append('--archive')
+ if recursive is False:
+ cmd.append('--no-recursive')
+ if links is False:
+ cmd.append('--no-links')
+ if perms is False:
+ cmd.append('--no-perms')
+ if times is False:
+ cmd.append('--no-times')
+ if owner is False:
+ cmd.append('--no-owner')
+ if group is False:
+ cmd.append('--no-group')
+ else:
+ if recursive is True:
+ cmd.append('--recursive')
+ if links is True:
+ cmd.append('--links')
+ if perms is True:
+ cmd.append('--perms')
+ if times is True:
+ cmd.append('--times')
+ if owner is True:
+ cmd.append('--owner')
+ if group is True:
+ cmd.append('--group')
+ if dirs:
+ cmd.append('--dirs')
+
+ if source.startswith('rsync://') and dest.startswith('rsync://'):
+ module.fail_json(msg='either src or dest must be a localhost', rc=1)
+
+ if is_rsh_needed(source, dest):
+
+ # https://github.com/ansible/ansible/issues/15907
+ has_rsh = False
+ for rsync_opt in rsync_opts:
+ if '--rsh' in rsync_opt:
+ has_rsh = True
+ break
+
+ # if the user has not supplied an --rsh option go ahead and add ours
+ if not has_rsh:
+ ssh_cmd = [module.get_bin_path('ssh', required=True), '-S', 'none']
+ if private_key is not None:
+ ssh_cmd.extend(['-i', private_key])
+ # If the user specified a port value
+ # Note: The action plugin takes care of setting this to a port from
+ # inventory if the user didn't specify an explicit dest_port
+ if dest_port is not None:
+ ssh_cmd.extend(['-o', 'Port=%s' % dest_port])
+ if not verify_host:
+ ssh_cmd.extend(['-o', 'StrictHostKeyChecking=no', '-o', 'UserKnownHostsFile=/dev/null'])
+ ssh_cmd_str = ' '.join(shlex_quote(arg) for arg in ssh_cmd)
+ if ssh_args:
+ ssh_cmd_str += ' %s' % ssh_args
+ cmd.append('--rsh=%s' % ssh_cmd_str)
+
+ if rsync_path:
+ cmd.append('--rsync-path=%s' % rsync_path)
+
+ if rsync_opts:
+ if '' in rsync_opts:
+ module.warn('The empty string is present in rsync_opts which will cause rsync to'
+ ' transfer the current working directory. If this is intended, use "."'
+ ' instead to get rid of this warning. If this is unintended, check for'
+ ' problems in your playbook leading to empty string in rsync_opts.')
+ cmd.extend(rsync_opts)
+
+ if partial:
+ cmd.append('--partial')
+
+ if link_dest:
+ cmd.append('-H')
+ # verbose required because rsync does not believe that adding a
+ # hardlink is actually a change
+ cmd.append('-vv')
+ for x in link_dest:
+ link_path = os.path.abspath(os.path.expanduser(x))
+ destination_path = os.path.abspath(os.path.dirname(dest))
+ if destination_path.find(link_path) == 0:
+ module.fail_json(msg='Hardlinking into a subdirectory of the source would cause recursion. %s and %s' % (destination_path, dest))
+ cmd.append('--link-dest=%s' % link_path)
+
+ changed_marker = '<<CHANGED>>'
+ cmd.append('--out-format=' + changed_marker + '%i %n%L')
+
+ # expand the paths
+ if '@' not in source:
+ source = os.path.expanduser(source)
+ if '@' not in dest:
+ dest = os.path.expanduser(dest)
+
+ cmd.append(source)
+ cmd.append(dest)
+ cmdstr = ' '.join(cmd)
+
+ # If we are using password authentication, write the password into the pipe
+ if rsync_password:
+ def _write_password_to_pipe(proc):
+ os.close(_sshpass_pipe[0])
+ try:
+ os.write(_sshpass_pipe[1], to_bytes(rsync_password) + b'\n')
+ except OSError as exc:
+ # Ignore broken pipe errors if the sshpass process has exited.
+ if exc.errno != errno.EPIPE or proc.poll() is None:
+ raise
+
+ (rc, out, err) = module.run_command(
+ cmd, pass_fds=_sshpass_pipe,
+ before_communicate_callback=_write_password_to_pipe)
+ else:
+ (rc, out, err) = module.run_command(cmd)
+
+ if rc:
+ return module.fail_json(msg=err, rc=rc, cmd=cmdstr)
+
+ if link_dest:
+ # a leading period indicates no change
+ changed = (changed_marker + '.') not in out
+ else:
+ changed = changed_marker in out
+
+ out_clean = out.replace(changed_marker, '')
+ out_lines = out_clean.split('\n')
+ while '' in out_lines:
+ out_lines.remove('')
+ if module._diff:
+ diff = {'prepared': out_clean}
+ return module.exit_json(changed=changed, msg=out_clean,
+ rc=rc, cmd=cmdstr, stdout_lines=out_lines,
+ diff=diff)
+
+ return module.exit_json(changed=changed, msg=out_clean,
+ rc=rc, cmd=cmdstr, stdout_lines=out_lines)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/timezone.py b/test/support/integration/plugins/modules/timezone.py
new file mode 100644
index 0000000000..d6fc1c76e7
--- /dev/null
+++ b/test/support/integration/plugins/modules/timezone.py
@@ -0,0 +1,909 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Shinichi TAMURA (@tmshn)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+module: timezone
+short_description: Configure timezone setting
+description:
+ - This module configures the timezone setting, both of the system clock and of the hardware clock. If you want to set up the NTP, use M(service) module.
+ - It is recommended to restart C(crond) after changing the timezone, otherwise the jobs may run at the wrong time.
+ - Several different tools are used depending on the OS/Distribution involved.
+ For Linux it can use C(timedatectl) or edit C(/etc/sysconfig/clock) or C(/etc/timezone) and C(hwclock).
+ On SmartOS, C(sm-set-timezone), for macOS, C(systemsetup), for BSD, C(/etc/localtime) is modified.
+ On AIX, C(chtz) is used.
+ - As of Ansible 2.3 support was added for SmartOS and BSDs.
+ - As of Ansible 2.4 support was added for macOS.
+ - As of Ansible 2.9 support was added for AIX 6.1+
+ - Windows and HPUX are not supported, please let us know if you find any other OS/distro in which this fails.
+version_added: "2.2"
+options:
+ name:
+ description:
+ - Name of the timezone for the system clock.
+ - Default is to keep current setting.
+ - B(At least one of name and hwclock are required.)
+ type: str
+ hwclock:
+ description:
+ - Whether the hardware clock is in UTC or in local timezone.
+ - Default is to keep current setting.
+ - Note that this option is recommended not to change and may fail
+ to configure, especially on virtual environments such as AWS.
+ - B(At least one of name and hwclock are required.)
+ - I(Only used on Linux.)
+ type: str
+ aliases: [ rtc ]
+ choices: [ local, UTC ]
+notes:
+ - On SmartOS the C(sm-set-timezone) utility (part of the smtools package) is required to set the zone timezone
+ - On AIX only Olson/tz database timezones are useable (POSIX is not supported).
+ - An OS reboot is also required on AIX for the new timezone setting to take effect.
+author:
+ - Shinichi TAMURA (@tmshn)
+ - Jasper Lievisse Adriaanse (@jasperla)
+ - Indrajit Raychaudhuri (@indrajitr)
+'''
+
+RETURN = r'''
+diff:
+ description: The differences about the given arguments.
+ returned: success
+ type: complex
+ contains:
+ before:
+ description: The values before change
+ type: dict
+ after:
+ description: The values after change
+ type: dict
+'''
+
+EXAMPLES = r'''
+- name: Set timezone to Asia/Tokyo
+ timezone:
+ name: Asia/Tokyo
+'''
+
+import errno
+import os
+import platform
+import random
+import re
+import string
+import filecmp
+
+from ansible.module_utils.basic import AnsibleModule, get_distribution
+from ansible.module_utils.six import iteritems
+
+
+class Timezone(object):
+ """This is a generic Timezone manipulation class that is subclassed based on platform.
+
+ A subclass may wish to override the following action methods:
+ - get(key, phase) ... get the value from the system at `phase`
+ - set(key, value) ... set the value to the current system
+ """
+
+ def __new__(cls, module):
+ """Return the platform-specific subclass.
+
+ It does not use load_platform_subclass() because it needs to judge based
+ on whether the `timedatectl` command exists and is available.
+
+ Args:
+ module: The AnsibleModule.
+ """
+ if platform.system() == 'Linux':
+ timedatectl = module.get_bin_path('timedatectl')
+ if timedatectl is not None:
+ rc, stdout, stderr = module.run_command(timedatectl)
+ if rc == 0:
+ return super(Timezone, SystemdTimezone).__new__(SystemdTimezone)
+ else:
+ module.warn('timedatectl command was found but not usable: %s. using other method.' % stderr)
+ return super(Timezone, NosystemdTimezone).__new__(NosystemdTimezone)
+ else:
+ return super(Timezone, NosystemdTimezone).__new__(NosystemdTimezone)
+ elif re.match('^joyent_.*Z', platform.version()):
+ # platform.system() returns SunOS, which is too broad. So look at the
+ # platform version instead. However we have to ensure that we're not
+ # running in the global zone where changing the timezone has no effect.
+ zonename_cmd = module.get_bin_path('zonename')
+ if zonename_cmd is not None:
+ (rc, stdout, _) = module.run_command(zonename_cmd)
+ if rc == 0 and stdout.strip() == 'global':
+ module.fail_json(msg='Adjusting timezone is not supported in Global Zone')
+
+ return super(Timezone, SmartOSTimezone).__new__(SmartOSTimezone)
+ elif re.match('^Darwin', platform.platform()):
+ return super(Timezone, DarwinTimezone).__new__(DarwinTimezone)
+ elif re.match('^(Free|Net|Open)BSD', platform.platform()):
+ return super(Timezone, BSDTimezone).__new__(BSDTimezone)
+ elif platform.system() == 'AIX':
+ AIXoslevel = int(platform.version() + platform.release())
+ if AIXoslevel >= 61:
+ return super(Timezone, AIXTimezone).__new__(AIXTimezone)
+ else:
+ module.fail_json(msg='AIX os level must be >= 61 for timezone module (Target: %s).' % AIXoslevel)
+ else:
+ # Not supported yet
+ return super(Timezone, Timezone).__new__(Timezone)
+
+ def __init__(self, module):
+ """Initialize of the class.
+
+ Args:
+ module: The AnsibleModule.
+ """
+ super(Timezone, self).__init__()
+ self.msg = []
+ # `self.value` holds the values for each params on each phases.
+ # Initially there's only info of "planned" phase, but the
+ # `self.check()` function will fill out it.
+ self.value = dict()
+ for key in module.argument_spec:
+ value = module.params[key]
+ if value is not None:
+ self.value[key] = dict(planned=value)
+ self.module = module
+
+ def abort(self, msg):
+ """Abort the process with error message.
+
+ This is just the wrapper of module.fail_json().
+
+ Args:
+ msg: The error message.
+ """
+ error_msg = ['Error message:', msg]
+ if len(self.msg) > 0:
+ error_msg.append('Other message(s):')
+ error_msg.extend(self.msg)
+ self.module.fail_json(msg='\n'.join(error_msg))
+
+ def execute(self, *commands, **kwargs):
+ """Execute the shell command.
+
+ This is just the wrapper of module.run_command().
+
+ Args:
+ *commands: The command to execute.
+ It will be concatenated with single space.
+ **kwargs: Only 'log' key is checked.
+ If kwargs['log'] is true, record the command to self.msg.
+
+ Returns:
+ stdout: Standard output of the command.
+ """
+ command = ' '.join(commands)
+ (rc, stdout, stderr) = self.module.run_command(command, check_rc=True)
+ if kwargs.get('log', False):
+ self.msg.append('executed `%s`' % command)
+ return stdout
+
+ def diff(self, phase1='before', phase2='after'):
+ """Calculate the difference between given 2 phases.
+
+ Args:
+ phase1, phase2: The names of phase to compare.
+
+ Returns:
+ diff: The difference of value between phase1 and phase2.
+ This is in the format which can be used with the
+ `--diff` option of ansible-playbook.
+ """
+ diff = {phase1: {}, phase2: {}}
+ for key, value in iteritems(self.value):
+ diff[phase1][key] = value[phase1]
+ diff[phase2][key] = value[phase2]
+ return diff
+
+ def check(self, phase):
+ """Check the state in given phase and set it to `self.value`.
+
+ Args:
+ phase: The name of the phase to check.
+
+ Returns:
+ NO RETURN VALUE
+ """
+ if phase == 'planned':
+ return
+ for key, value in iteritems(self.value):
+ value[phase] = self.get(key, phase)
+
+ def change(self):
+ """Make the changes effect based on `self.value`."""
+ for key, value in iteritems(self.value):
+ if value['before'] != value['planned']:
+ self.set(key, value['planned'])
+
+ # ===========================================
+ # Platform specific methods (must be replaced by subclass).
+
+ def get(self, key, phase):
+ """Get the value for the key at the given phase.
+
+ Called from self.check().
+
+ Args:
+ key: The key to get the value
+ phase: The phase to get the value
+
+ Return:
+ value: The value for the key at the given phase.
+ """
+ self.abort('get(key, phase) is not implemented on target platform')
+
+ def set(self, key, value):
+ """Set the value for the key (of course, for the phase 'after').
+
+ Called from self.change().
+
+ Args:
+ key: Key to set the value
+ value: Value to set
+ """
+ self.abort('set(key, value) is not implemented on target platform')
+
+ def _verify_timezone(self):
+ tz = self.value['name']['planned']
+ tzfile = '/usr/share/zoneinfo/%s' % tz
+ if not os.path.isfile(tzfile):
+ self.abort('given timezone "%s" is not available' % tz)
+ return tzfile
+
+
+class SystemdTimezone(Timezone):
+ """This is a Timezone manipulation class for systemd-powered Linux.
+
+ It uses the `timedatectl` command to check/set all arguments.
+ """
+
+ regexps = dict(
+ hwclock=re.compile(r'^\s*RTC in local TZ\s*:\s*([^\s]+)', re.MULTILINE),
+ name=re.compile(r'^\s*Time ?zone\s*:\s*([^\s]+)', re.MULTILINE)
+ )
+
+ subcmds = dict(
+ hwclock='set-local-rtc',
+ name='set-timezone'
+ )
+
+ def __init__(self, module):
+ super(SystemdTimezone, self).__init__(module)
+ self.timedatectl = module.get_bin_path('timedatectl', required=True)
+ self.status = dict()
+ # Validate given timezone
+ if 'name' in self.value:
+ self._verify_timezone()
+
+ def _get_status(self, phase):
+ if phase not in self.status:
+ self.status[phase] = self.execute(self.timedatectl, 'status')
+ return self.status[phase]
+
+ def get(self, key, phase):
+ status = self._get_status(phase)
+ value = self.regexps[key].search(status).group(1)
+ if key == 'hwclock':
+ # For key='hwclock'; convert yes/no -> local/UTC
+ if self.module.boolean(value):
+ value = 'local'
+ else:
+ value = 'UTC'
+ return value
+
+ def set(self, key, value):
+ # For key='hwclock'; convert UTC/local -> yes/no
+ if key == 'hwclock':
+ if value == 'local':
+ value = 'yes'
+ else:
+ value = 'no'
+ self.execute(self.timedatectl, self.subcmds[key], value, log=True)
+
+
+class NosystemdTimezone(Timezone):
+ """This is a Timezone manipulation class for non systemd-powered Linux.
+
+ For timezone setting, it edits the following file and reflect changes:
+ - /etc/sysconfig/clock ... RHEL/CentOS
+ - /etc/timezone ... Debian/Ubuntu
+ For hwclock setting, it executes `hwclock --systohc` command with the
+ '--utc' or '--localtime' option.
+ """
+
+ conf_files = dict(
+ name=None, # To be set in __init__
+ hwclock=None, # To be set in __init__
+ adjtime='/etc/adjtime'
+ )
+
+ # It's fine if all tree config files don't exist
+ allow_no_file = dict(
+ name=True,
+ hwclock=True,
+ adjtime=True
+ )
+
+ regexps = dict(
+ name=None, # To be set in __init__
+ hwclock=re.compile(r'^UTC\s*=\s*([^\s]+)', re.MULTILINE),
+ adjtime=re.compile(r'^(UTC|LOCAL)$', re.MULTILINE)
+ )
+
+ dist_regexps = dict(
+ SuSE=re.compile(r'^TIMEZONE\s*=\s*"?([^"\s]+)"?', re.MULTILINE),
+ redhat=re.compile(r'^ZONE\s*=\s*"?([^"\s]+)"?', re.MULTILINE)
+ )
+
+ dist_tzline_format = dict(
+ SuSE='TIMEZONE="%s"\n',
+ redhat='ZONE="%s"\n'
+ )
+
+ def __init__(self, module):
+ super(NosystemdTimezone, self).__init__(module)
+ # Validate given timezone
+ if 'name' in self.value:
+ tzfile = self._verify_timezone()
+ # `--remove-destination` is needed if /etc/localtime is a symlink so
+ # that it overwrites it instead of following it.
+ self.update_timezone = ['%s --remove-destination %s /etc/localtime' % (self.module.get_bin_path('cp', required=True), tzfile)]
+ self.update_hwclock = self.module.get_bin_path('hwclock', required=True)
+ # Distribution-specific configurations
+ if self.module.get_bin_path('dpkg-reconfigure') is not None:
+ # Debian/Ubuntu
+ if 'name' in self.value:
+ self.update_timezone = ['%s -sf %s /etc/localtime' % (self.module.get_bin_path('ln', required=True), tzfile),
+ '%s --frontend noninteractive tzdata' % self.module.get_bin_path('dpkg-reconfigure', required=True)]
+ self.conf_files['name'] = '/etc/timezone'
+ self.conf_files['hwclock'] = '/etc/default/rcS'
+ self.regexps['name'] = re.compile(r'^([^\s]+)', re.MULTILINE)
+ self.tzline_format = '%s\n'
+ else:
+ # RHEL/CentOS/SUSE
+ if self.module.get_bin_path('tzdata-update') is not None:
+ # tzdata-update cannot update the timezone if /etc/localtime is
+ # a symlink so we have to use cp to update the time zone which
+ # was set above.
+ if not os.path.islink('/etc/localtime'):
+ self.update_timezone = [self.module.get_bin_path('tzdata-update', required=True)]
+ # else:
+ # self.update_timezone = 'cp --remove-destination ...' <- configured above
+ self.conf_files['name'] = '/etc/sysconfig/clock'
+ self.conf_files['hwclock'] = '/etc/sysconfig/clock'
+ try:
+ f = open(self.conf_files['name'], 'r')
+ except IOError as err:
+ if self._allow_ioerror(err, 'name'):
+ # If the config file doesn't exist detect the distribution and set regexps.
+ distribution = get_distribution()
+ if distribution == 'SuSE':
+ # For SUSE
+ self.regexps['name'] = self.dist_regexps['SuSE']
+ self.tzline_format = self.dist_tzline_format['SuSE']
+ else:
+ # For RHEL/CentOS
+ self.regexps['name'] = self.dist_regexps['redhat']
+ self.tzline_format = self.dist_tzline_format['redhat']
+ else:
+ self.abort('could not read configuration file "%s"' % self.conf_files['name'])
+ else:
+ # The key for timezone might be `ZONE` or `TIMEZONE`
+ # (the former is used in RHEL/CentOS and the latter is used in SUSE linux).
+ # So check the content of /etc/sysconfig/clock and decide which key to use.
+ sysconfig_clock = f.read()
+ f.close()
+ if re.search(r'^TIMEZONE\s*=', sysconfig_clock, re.MULTILINE):
+ # For SUSE
+ self.regexps['name'] = self.dist_regexps['SuSE']
+ self.tzline_format = self.dist_tzline_format['SuSE']
+ else:
+ # For RHEL/CentOS
+ self.regexps['name'] = self.dist_regexps['redhat']
+ self.tzline_format = self.dist_tzline_format['redhat']
+
+ def _allow_ioerror(self, err, key):
+ # In some cases, even if the target file does not exist,
+ # simply creating it may solve the problem.
+ # In such cases, we should continue the configuration rather than aborting.
+ if err.errno != errno.ENOENT:
+ # If the error is not ENOENT ("No such file or directory"),
+ # (e.g., permission error, etc), we should abort.
+ return False
+ return self.allow_no_file.get(key, False)
+
+ def _edit_file(self, filename, regexp, value, key):
+ """Replace the first matched line with given `value`.
+
+ If `regexp` matched more than once, other than the first line will be deleted.
+
+ Args:
+ filename: The name of the file to edit.
+ regexp: The regular expression to search with.
+ value: The line which will be inserted.
+ key: For what key the file is being editted.
+ """
+ # Read the file
+ try:
+ file = open(filename, 'r')
+ except IOError as err:
+ if self._allow_ioerror(err, key):
+ lines = []
+ else:
+ self.abort('tried to configure %s using a file "%s", but could not read it' % (key, filename))
+ else:
+ lines = file.readlines()
+ file.close()
+ # Find the all matched lines
+ matched_indices = []
+ for i, line in enumerate(lines):
+ if regexp.search(line):
+ matched_indices.append(i)
+ if len(matched_indices) > 0:
+ insert_line = matched_indices[0]
+ else:
+ insert_line = 0
+ # Remove all matched lines
+ for i in matched_indices[::-1]:
+ del lines[i]
+ # ...and insert the value
+ lines.insert(insert_line, value)
+ # Write the changes
+ try:
+ file = open(filename, 'w')
+ except IOError:
+ self.abort('tried to configure %s using a file "%s", but could not write to it' % (key, filename))
+ else:
+ file.writelines(lines)
+ file.close()
+ self.msg.append('Added 1 line and deleted %s line(s) on %s' % (len(matched_indices), filename))
+
+ def _get_value_from_config(self, key, phase):
+ filename = self.conf_files[key]
+ try:
+ file = open(filename, mode='r')
+ except IOError as err:
+ if self._allow_ioerror(err, key):
+ if key == 'hwclock':
+ return 'n/a'
+ elif key == 'adjtime':
+ return 'UTC'
+ elif key == 'name':
+ return 'n/a'
+ else:
+ self.abort('tried to configure %s using a file "%s", but could not read it' % (key, filename))
+ else:
+ status = file.read()
+ file.close()
+ try:
+ value = self.regexps[key].search(status).group(1)
+ except AttributeError:
+ if key == 'hwclock':
+ # If we cannot find UTC in the config that's fine.
+ return 'n/a'
+ elif key == 'adjtime':
+ # If we cannot find UTC/LOCAL in /etc/cannot that means UTC
+ # will be used by default.
+ return 'UTC'
+ elif key == 'name':
+ if phase == 'before':
+ # In 'before' phase UTC/LOCAL doesn't need to be set in
+ # the timezone config file, so we ignore this error.
+ return 'n/a'
+ else:
+ self.abort('tried to configure %s using a file "%s", but could not find a valid value in it' % (key, filename))
+ else:
+ if key == 'hwclock':
+ # convert yes/no -> UTC/local
+ if self.module.boolean(value):
+ value = 'UTC'
+ else:
+ value = 'local'
+ elif key == 'adjtime':
+ # convert LOCAL -> local
+ if value != 'UTC':
+ value = value.lower()
+ return value
+
+ def get(self, key, phase):
+ planned = self.value[key]['planned']
+ if key == 'hwclock':
+ value = self._get_value_from_config(key, phase)
+ if value == planned:
+ # If the value in the config file is the same as the 'planned'
+ # value, we need to check /etc/adjtime.
+ value = self._get_value_from_config('adjtime', phase)
+ elif key == 'name':
+ value = self._get_value_from_config(key, phase)
+ if value == planned:
+ # If the planned values is the same as the one in the config file
+ # we need to check if /etc/localtime is also set to the 'planned' zone.
+ if os.path.islink('/etc/localtime'):
+ # If /etc/localtime is a symlink and is not set to the TZ we 'planned'
+ # to set, we need to return the TZ which the symlink points to.
+ if os.path.exists('/etc/localtime'):
+ # We use readlink() because on some distros zone files are symlinks
+ # to other zone files, so it's hard to get which TZ is actually set
+ # if we follow the symlink.
+ path = os.readlink('/etc/localtime')
+ linktz = re.search(r'/usr/share/zoneinfo/(.*)', path, re.MULTILINE)
+ if linktz:
+ valuelink = linktz.group(1)
+ if valuelink != planned:
+ value = valuelink
+ else:
+ # Set current TZ to 'n/a' if the symlink points to a path
+ # which isn't a zone file.
+ value = 'n/a'
+ else:
+ # Set current TZ to 'n/a' if the symlink to the zone file is broken.
+ value = 'n/a'
+ else:
+ # If /etc/localtime is not a symlink best we can do is compare it with
+ # the 'planned' zone info file and return 'n/a' if they are different.
+ try:
+ if not filecmp.cmp('/etc/localtime', '/usr/share/zoneinfo/' + planned):
+ return 'n/a'
+ except Exception:
+ return 'n/a'
+ else:
+ self.abort('unknown parameter "%s"' % key)
+ return value
+
+ def set_timezone(self, value):
+ self._edit_file(filename=self.conf_files['name'],
+ regexp=self.regexps['name'],
+ value=self.tzline_format % value,
+ key='name')
+ for cmd in self.update_timezone:
+ self.execute(cmd)
+
+ def set_hwclock(self, value):
+ if value == 'local':
+ option = '--localtime'
+ utc = 'no'
+ else:
+ option = '--utc'
+ utc = 'yes'
+ if self.conf_files['hwclock'] is not None:
+ self._edit_file(filename=self.conf_files['hwclock'],
+ regexp=self.regexps['hwclock'],
+ value='UTC=%s\n' % utc,
+ key='hwclock')
+ self.execute(self.update_hwclock, '--systohc', option, log=True)
+
+ def set(self, key, value):
+ if key == 'name':
+ self.set_timezone(value)
+ elif key == 'hwclock':
+ self.set_hwclock(value)
+ else:
+ self.abort('unknown parameter "%s"' % key)
+
+
+class SmartOSTimezone(Timezone):
+ """This is a Timezone manipulation class for SmartOS instances.
+
+ It uses the C(sm-set-timezone) utility to set the timezone, and
+ inspects C(/etc/default/init) to determine the current timezone.
+
+ NB: A zone needs to be rebooted in order for the change to be
+ activated.
+ """
+
+ def __init__(self, module):
+ super(SmartOSTimezone, self).__init__(module)
+ self.settimezone = self.module.get_bin_path('sm-set-timezone', required=False)
+ if not self.settimezone:
+ module.fail_json(msg='sm-set-timezone not found. Make sure the smtools package is installed.')
+
+ def get(self, key, phase):
+ """Lookup the current timezone name in `/etc/default/init`. If anything else
+ is requested, or if the TZ field is not set we fail.
+ """
+ if key == 'name':
+ try:
+ f = open('/etc/default/init', 'r')
+ for line in f:
+ m = re.match('^TZ=(.*)$', line.strip())
+ if m:
+ return m.groups()[0]
+ except Exception:
+ self.module.fail_json(msg='Failed to read /etc/default/init')
+ else:
+ self.module.fail_json(msg='%s is not a supported option on target platform' % key)
+
+ def set(self, key, value):
+ """Set the requested timezone through sm-set-timezone, an invalid timezone name
+ will be rejected and we have no further input validation to perform.
+ """
+ if key == 'name':
+ cmd = 'sm-set-timezone %s' % value
+
+ (rc, stdout, stderr) = self.module.run_command(cmd)
+
+ if rc != 0:
+ self.module.fail_json(msg=stderr)
+
+ # sm-set-timezone knows no state and will always set the timezone.
+ # XXX: https://github.com/joyent/smtools/pull/2
+ m = re.match(r'^\* Changed (to)? timezone (to)? (%s).*' % value, stdout.splitlines()[1])
+ if not (m and m.groups()[-1] == value):
+ self.module.fail_json(msg='Failed to set timezone')
+ else:
+ self.module.fail_json(msg='%s is not a supported option on target platform' % key)
+
+
+class DarwinTimezone(Timezone):
+ """This is the timezone implementation for Darwin which, unlike other *BSD
+ implementations, uses the `systemsetup` command on Darwin to check/set
+ the timezone.
+ """
+
+ regexps = dict(
+ name=re.compile(r'^\s*Time ?Zone\s*:\s*([^\s]+)', re.MULTILINE)
+ )
+
+ def __init__(self, module):
+ super(DarwinTimezone, self).__init__(module)
+ self.systemsetup = module.get_bin_path('systemsetup', required=True)
+ self.status = dict()
+ # Validate given timezone
+ if 'name' in self.value:
+ self._verify_timezone()
+
+ def _get_current_timezone(self, phase):
+ """Lookup the current timezone via `systemsetup -gettimezone`."""
+ if phase not in self.status:
+ self.status[phase] = self.execute(self.systemsetup, '-gettimezone')
+ return self.status[phase]
+
+ def _verify_timezone(self):
+ tz = self.value['name']['planned']
+ # Lookup the list of supported timezones via `systemsetup -listtimezones`.
+ # Note: Skip the first line that contains the label 'Time Zones:'
+ out = self.execute(self.systemsetup, '-listtimezones').splitlines()[1:]
+ tz_list = list(map(lambda x: x.strip(), out))
+ if tz not in tz_list:
+ self.abort('given timezone "%s" is not available' % tz)
+ return tz
+
+ def get(self, key, phase):
+ if key == 'name':
+ status = self._get_current_timezone(phase)
+ value = self.regexps[key].search(status).group(1)
+ return value
+ else:
+ self.module.fail_json(msg='%s is not a supported option on target platform' % key)
+
+ def set(self, key, value):
+ if key == 'name':
+ self.execute(self.systemsetup, '-settimezone', value, log=True)
+ else:
+ self.module.fail_json(msg='%s is not a supported option on target platform' % key)
+
+
+class BSDTimezone(Timezone):
+ """This is the timezone implementation for *BSD which works simply through
+ updating the `/etc/localtime` symlink to point to a valid timezone name under
+ `/usr/share/zoneinfo`.
+ """
+
+ def __init__(self, module):
+ super(BSDTimezone, self).__init__(module)
+
+ def __get_timezone(self):
+ zoneinfo_dir = '/usr/share/zoneinfo/'
+ localtime_file = '/etc/localtime'
+
+ # Strategy 1:
+ # If /etc/localtime does not exist, assum the timezone is UTC.
+ if not os.path.exists(localtime_file):
+ self.module.warn('Could not read /etc/localtime. Assuming UTC.')
+ return 'UTC'
+
+ # Strategy 2:
+ # Follow symlink of /etc/localtime
+ zoneinfo_file = localtime_file
+ while not zoneinfo_file.startswith(zoneinfo_dir):
+ try:
+ zoneinfo_file = os.readlink(localtime_file)
+ except OSError:
+ # OSError means "end of symlink chain" or broken link.
+ break
+ else:
+ return zoneinfo_file.replace(zoneinfo_dir, '')
+
+ # Strategy 3:
+ # (If /etc/localtime is not symlinked)
+ # Check all files in /usr/share/zoneinfo and return first non-link match.
+ for dname, _, fnames in sorted(os.walk(zoneinfo_dir)):
+ for fname in sorted(fnames):
+ zoneinfo_file = os.path.join(dname, fname)
+ if not os.path.islink(zoneinfo_file) and filecmp.cmp(zoneinfo_file, localtime_file):
+ return zoneinfo_file.replace(zoneinfo_dir, '')
+
+ # Strategy 4:
+ # As a fall-back, return 'UTC' as default assumption.
+ self.module.warn('Could not identify timezone name from /etc/localtime. Assuming UTC.')
+ return 'UTC'
+
+ def get(self, key, phase):
+ """Lookup the current timezone by resolving `/etc/localtime`."""
+ if key == 'name':
+ return self.__get_timezone()
+ else:
+ self.module.fail_json(msg='%s is not a supported option on target platform' % key)
+
+ def set(self, key, value):
+ if key == 'name':
+ # First determine if the requested timezone is valid by looking in
+ # the zoneinfo directory.
+ zonefile = '/usr/share/zoneinfo/' + value
+ try:
+ if not os.path.isfile(zonefile):
+ self.module.fail_json(msg='%s is not a recognized timezone' % value)
+ except Exception:
+ self.module.fail_json(msg='Failed to stat %s' % zonefile)
+
+ # Now (somewhat) atomically update the symlink by creating a new
+ # symlink and move it into place. Otherwise we have to remove the
+ # original symlink and create the new symlink, however that would
+ # create a race condition in case another process tries to read
+ # /etc/localtime between removal and creation.
+ suffix = "".join([random.choice(string.ascii_letters + string.digits) for x in range(0, 10)])
+ new_localtime = '/etc/localtime.' + suffix
+
+ try:
+ os.symlink(zonefile, new_localtime)
+ os.rename(new_localtime, '/etc/localtime')
+ except Exception:
+ os.remove(new_localtime)
+ self.module.fail_json(msg='Could not update /etc/localtime')
+ else:
+ self.module.fail_json(msg='%s is not a supported option on target platform' % key)
+
+
+class AIXTimezone(Timezone):
+ """This is a Timezone manipulation class for AIX instances.
+
+ It uses the C(chtz) utility to set the timezone, and
+ inspects C(/etc/environment) to determine the current timezone.
+
+ While AIX time zones can be set using two formats (POSIX and
+ Olson) the prefered method is Olson.
+ See the following article for more information:
+ https://developer.ibm.com/articles/au-aix-posix/
+
+ NB: AIX needs to be rebooted in order for the change to be
+ activated.
+ """
+
+ def __init__(self, module):
+ super(AIXTimezone, self).__init__(module)
+ self.settimezone = self.module.get_bin_path('chtz', required=True)
+
+ def __get_timezone(self):
+ """ Return the current value of TZ= in /etc/environment """
+ try:
+ f = open('/etc/environment', 'r')
+ etcenvironment = f.read()
+ f.close()
+ except Exception:
+ self.module.fail_json(msg='Issue reading contents of /etc/environment')
+
+ match = re.search(r'^TZ=(.*)$', etcenvironment, re.MULTILINE)
+ if match:
+ return match.group(1)
+ else:
+ return None
+
+ def get(self, key, phase):
+ """Lookup the current timezone name in `/etc/environment`. If anything else
+ is requested, or if the TZ field is not set we fail.
+ """
+ if key == 'name':
+ return self.__get_timezone()
+ else:
+ self.module.fail_json(msg='%s is not a supported option on target platform' % key)
+
+ def set(self, key, value):
+ """Set the requested timezone through chtz, an invalid timezone name
+ will be rejected and we have no further input validation to perform.
+ """
+ if key == 'name':
+ # chtz seems to always return 0 on AIX 7.2, even for invalid timezone values.
+ # It will only return non-zero if the chtz command itself fails, it does not check for
+ # valid timezones. We need to perform a basic check to confirm that the timezone
+ # definition exists in /usr/share/lib/zoneinfo
+ # This does mean that we can only support Olson for now. The below commented out regex
+ # detects Olson date formats, so in the future we could detect Posix or Olson and
+ # act accordingly.
+
+ # regex_olson = re.compile('^([a-z0-9_\-\+]+\/?)+$', re.IGNORECASE)
+ # if not regex_olson.match(value):
+ # msg = 'Supplied timezone (%s) does not appear to a be valid Olson string' % value
+ # self.module.fail_json(msg=msg)
+
+ # First determine if the requested timezone is valid by looking in the zoneinfo
+ # directory.
+ zonefile = '/usr/share/lib/zoneinfo/' + value
+ try:
+ if not os.path.isfile(zonefile):
+ self.module.fail_json(msg='%s is not a recognized timezone.' % value)
+ except Exception:
+ self.module.fail_json(msg='Failed to check %s.' % zonefile)
+
+ # Now set the TZ using chtz
+ cmd = 'chtz %s' % value
+ (rc, stdout, stderr) = self.module.run_command(cmd)
+
+ if rc != 0:
+ self.module.fail_json(msg=stderr)
+
+ # The best condition check we can do is to check the value of TZ after making the
+ # change.
+ TZ = self.__get_timezone()
+ if TZ != value:
+ msg = 'TZ value does not match post-change (Actual: %s, Expected: %s).' % (TZ, value)
+ self.module.fail_json(msg=msg)
+
+ else:
+ self.module.fail_json(msg='%s is not a supported option on target platform' % key)
+
+
+def main():
+ # Construct 'module' and 'tz'
+ module = AnsibleModule(
+ argument_spec=dict(
+ hwclock=dict(type='str', choices=['local', 'UTC'], aliases=['rtc']),
+ name=dict(type='str'),
+ ),
+ required_one_of=[
+ ['hwclock', 'name']
+ ],
+ supports_check_mode=True,
+ )
+ tz = Timezone(module)
+
+ # Check the current state
+ tz.check(phase='before')
+ if module.check_mode:
+ diff = tz.diff('before', 'planned')
+ # In check mode, 'planned' state is treated as 'after' state
+ diff['after'] = diff.pop('planned')
+ else:
+ # Make change
+ tz.change()
+ # Check the current state
+ tz.check(phase='after')
+ # Examine if the current state matches planned state
+ (after, planned) = tz.diff('after', 'planned').values()
+ if after != planned:
+ tz.abort('still not desired state, though changes have made - '
+ 'planned: %s, after: %s' % (str(planned), str(after)))
+ diff = tz.diff('before', 'after')
+
+ changed = (diff['before'] != diff['after'])
+ if len(tz.msg) > 0:
+ module.exit_json(changed=changed, diff=diff, msg='\n'.join(tz.msg))
+ else:
+ module.exit_json(changed=changed, diff=diff)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/ufw.py b/test/support/integration/plugins/modules/ufw.py
new file mode 100644
index 0000000000..6452f7c910
--- /dev/null
+++ b/test/support/integration/plugins/modules/ufw.py
@@ -0,0 +1,598 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2014, Ahti Kitsik <ak@ahtik.com>
+# Copyright: (c) 2014, Jarno Keskikangas <jarno.keskikangas@gmail.com>
+# Copyright: (c) 2013, Aleksey Ovcharenko <aleksey.ovcharenko@gmail.com>
+# Copyright: (c) 2013, James Martin <jmartin@basho.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+module: ufw
+short_description: Manage firewall with UFW
+description:
+ - Manage firewall with UFW.
+version_added: 1.6
+author:
+ - Aleksey Ovcharenko (@ovcharenko)
+ - Jarno Keskikangas (@pyykkis)
+ - Ahti Kitsik (@ahtik)
+notes:
+ - See C(man ufw) for more examples.
+requirements:
+ - C(ufw) package
+options:
+ state:
+ description:
+ - C(enabled) reloads firewall and enables firewall on boot.
+ - C(disabled) unloads firewall and disables firewall on boot.
+ - C(reloaded) reloads firewall.
+ - C(reset) disables and resets firewall to installation defaults.
+ type: str
+ choices: [ disabled, enabled, reloaded, reset ]
+ default:
+ description:
+ - Change the default policy for incoming or outgoing traffic.
+ type: str
+ choices: [ allow, deny, reject ]
+ aliases: [ policy ]
+ direction:
+ description:
+ - Select direction for a rule or default policy command. Mutually
+ exclusive with I(interface_in) and I(interface_out).
+ type: str
+ choices: [ in, incoming, out, outgoing, routed ]
+ logging:
+ description:
+ - Toggles logging. Logged packets use the LOG_KERN syslog facility.
+ type: str
+ choices: [ 'on', 'off', low, medium, high, full ]
+ insert:
+ description:
+ - Insert the corresponding rule as rule number NUM.
+ - Note that ufw numbers rules starting with 1.
+ type: int
+ insert_relative_to:
+ description:
+ - Allows to interpret the index in I(insert) relative to a position.
+ - C(zero) interprets the rule number as an absolute index (i.e. 1 is
+ the first rule).
+ - C(first-ipv4) interprets the rule number relative to the index of the
+ first IPv4 rule, or relative to the position where the first IPv4 rule
+ would be if there is currently none.
+ - C(last-ipv4) interprets the rule number relative to the index of the
+ last IPv4 rule, or relative to the position where the last IPv4 rule
+ would be if there is currently none.
+ - C(first-ipv6) interprets the rule number relative to the index of the
+ first IPv6 rule, or relative to the position where the first IPv6 rule
+ would be if there is currently none.
+ - C(last-ipv6) interprets the rule number relative to the index of the
+ last IPv6 rule, or relative to the position where the last IPv6 rule
+ would be if there is currently none.
+ type: str
+ choices: [ first-ipv4, first-ipv6, last-ipv4, last-ipv6, zero ]
+ default: zero
+ version_added: "2.8"
+ rule:
+ description:
+ - Add firewall rule
+ type: str
+ choices: [ allow, deny, limit, reject ]
+ log:
+ description:
+ - Log new connections matched to this rule
+ type: bool
+ from_ip:
+ description:
+ - Source IP address.
+ type: str
+ default: any
+ aliases: [ from, src ]
+ from_port:
+ description:
+ - Source port.
+ type: str
+ to_ip:
+ description:
+ - Destination IP address.
+ type: str
+ default: any
+ aliases: [ dest, to]
+ to_port:
+ description:
+ - Destination port.
+ type: str
+ aliases: [ port ]
+ proto:
+ description:
+ - TCP/IP protocol.
+ type: str
+ choices: [ any, tcp, udp, ipv6, esp, ah, gre, igmp ]
+ aliases: [ protocol ]
+ name:
+ description:
+ - Use profile located in C(/etc/ufw/applications.d).
+ type: str
+ aliases: [ app ]
+ delete:
+ description:
+ - Delete rule.
+ type: bool
+ interface:
+ description:
+ - Specify interface for the rule. The direction (in or out) used
+ for the interface depends on the value of I(direction). See
+ I(interface_in) and I(interface_out) for routed rules that needs
+ to supply both an input and output interface. Mutually
+ exclusive with I(interface_in) and I(interface_out).
+ type: str
+ aliases: [ if ]
+ interface_in:
+ description:
+ - Specify input interface for the rule. This is mutually
+ exclusive with I(direction) and I(interface). However, it is
+ compatible with I(interface_out) for routed rules.
+ type: str
+ aliases: [ if_in ]
+ version_added: "2.10"
+ interface_out:
+ description:
+ - Specify output interface for the rule. This is mutually
+ exclusive with I(direction) and I(interface). However, it is
+ compatible with I(interface_in) for routed rules.
+ type: str
+ aliases: [ if_out ]
+ version_added: "2.10"
+ route:
+ description:
+ - Apply the rule to routed/forwarded packets.
+ type: bool
+ comment:
+ description:
+ - Add a comment to the rule. Requires UFW version >=0.35.
+ type: str
+ version_added: "2.4"
+'''
+
+EXAMPLES = r'''
+- name: Allow everything and enable UFW
+ ufw:
+ state: enabled
+ policy: allow
+
+- name: Set logging
+ ufw:
+ logging: 'on'
+
+# Sometimes it is desirable to let the sender know when traffic is
+# being denied, rather than simply ignoring it. In these cases, use
+# reject instead of deny. In addition, log rejected connections:
+- ufw:
+ rule: reject
+ port: auth
+ log: yes
+
+# ufw supports connection rate limiting, which is useful for protecting
+# against brute-force login attacks. ufw will deny connections if an IP
+# address has attempted to initiate 6 or more connections in the last
+# 30 seconds. See http://www.debian-administration.org/articles/187
+# for details. Typical usage is:
+- ufw:
+ rule: limit
+ port: ssh
+ proto: tcp
+
+# Allow OpenSSH. (Note that as ufw manages its own state, simply removing
+# a rule=allow task can leave those ports exposed. Either use delete=yes
+# or a separate state=reset task)
+- ufw:
+ rule: allow
+ name: OpenSSH
+
+- name: Delete OpenSSH rule
+ ufw:
+ rule: allow
+ name: OpenSSH
+ delete: yes
+
+- name: Deny all access to port 53
+ ufw:
+ rule: deny
+ port: '53'
+
+- name: Allow port range 60000-61000
+ ufw:
+ rule: allow
+ port: 60000:61000
+ proto: tcp
+
+- name: Allow all access to tcp port 80
+ ufw:
+ rule: allow
+ port: '80'
+ proto: tcp
+
+- name: Allow all access from RFC1918 networks to this host
+ ufw:
+ rule: allow
+ src: '{{ item }}'
+ loop:
+ - 10.0.0.0/8
+ - 172.16.0.0/12
+ - 192.168.0.0/16
+
+- name: Deny access to udp port 514 from host 1.2.3.4 and include a comment
+ ufw:
+ rule: deny
+ proto: udp
+ src: 1.2.3.4
+ port: '514'
+ comment: Block syslog
+
+- name: Allow incoming access to eth0 from 1.2.3.5 port 5469 to 1.2.3.4 port 5469
+ ufw:
+ rule: allow
+ interface: eth0
+ direction: in
+ proto: udp
+ src: 1.2.3.5
+ from_port: '5469'
+ dest: 1.2.3.4
+ to_port: '5469'
+
+# Note that IPv6 must be enabled in /etc/default/ufw for IPv6 firewalling to work.
+- name: Deny all traffic from the IPv6 2001:db8::/32 to tcp port 25 on this host
+ ufw:
+ rule: deny
+ proto: tcp
+ src: 2001:db8::/32
+ port: '25'
+
+- name: Deny all IPv6 traffic to tcp port 20 on this host
+ # this should be the first IPv6 rule
+ ufw:
+ rule: deny
+ proto: tcp
+ port: '20'
+ to_ip: "::"
+ insert: 0
+ insert_relative_to: first-ipv6
+
+- name: Deny all IPv4 traffic to tcp port 20 on this host
+ # This should be the third to last IPv4 rule
+ # (insert: -1 addresses the second to last IPv4 rule;
+ # so the new rule will be inserted before the second
+ # to last IPv4 rule, and will be come the third to last
+ # IPv4 rule.)
+ ufw:
+ rule: deny
+ proto: tcp
+ port: '20'
+ to_ip: "::"
+ insert: -1
+ insert_relative_to: last-ipv4
+
+# Can be used to further restrict a global FORWARD policy set to allow
+- name: Deny forwarded/routed traffic from subnet 1.2.3.0/24 to subnet 4.5.6.0/24
+ ufw:
+ rule: deny
+ route: yes
+ src: 1.2.3.0/24
+ dest: 4.5.6.0/24
+'''
+
+import re
+
+from operator import itemgetter
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def compile_ipv4_regexp():
+ r = r"((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}"
+ r += r"(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])"
+ return re.compile(r)
+
+
+def compile_ipv6_regexp():
+ """
+ validation pattern provided by :
+ https://stackoverflow.com/questions/53497/regular-expression-that-matches-
+ valid-ipv6-addresses#answer-17871737
+ """
+ r = r"(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:"
+ r += r"|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}"
+ r += r"(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4})"
+ r += r"{1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]"
+ r += r"{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]"
+ r += r"{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4})"
+ r += r"{0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]"
+ r += r"|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}"
+ r += r"[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}"
+ r += r"[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))"
+ return re.compile(r)
+
+
+def main():
+ command_keys = ['state', 'default', 'rule', 'logging']
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(type='str', choices=['enabled', 'disabled', 'reloaded', 'reset']),
+ default=dict(type='str', aliases=['policy'], choices=['allow', 'deny', 'reject']),
+ logging=dict(type='str', choices=['full', 'high', 'low', 'medium', 'off', 'on']),
+ direction=dict(type='str', choices=['in', 'incoming', 'out', 'outgoing', 'routed']),
+ delete=dict(type='bool', default=False),
+ route=dict(type='bool', default=False),
+ insert=dict(type='int'),
+ insert_relative_to=dict(choices=['zero', 'first-ipv4', 'last-ipv4', 'first-ipv6', 'last-ipv6'], default='zero'),
+ rule=dict(type='str', choices=['allow', 'deny', 'limit', 'reject']),
+ interface=dict(type='str', aliases=['if']),
+ interface_in=dict(type='str', aliases=['if_in']),
+ interface_out=dict(type='str', aliases=['if_out']),
+ log=dict(type='bool', default=False),
+ from_ip=dict(type='str', default='any', aliases=['from', 'src']),
+ from_port=dict(type='str'),
+ to_ip=dict(type='str', default='any', aliases=['dest', 'to']),
+ to_port=dict(type='str', aliases=['port']),
+ proto=dict(type='str', aliases=['protocol'], choices=['ah', 'any', 'esp', 'ipv6', 'tcp', 'udp', 'gre', 'igmp']),
+ name=dict(type='str', aliases=['app']),
+ comment=dict(type='str'),
+ ),
+ supports_check_mode=True,
+ mutually_exclusive=[
+ ['name', 'proto', 'logging'],
+ # Mutual exclusivity with `interface` implied by `required_by`.
+ ['direction', 'interface_in'],
+ ['direction', 'interface_out'],
+ ],
+ required_one_of=([command_keys]),
+ required_by=dict(
+ interface=('direction', ),
+ ),
+ )
+
+ cmds = []
+
+ ipv4_regexp = compile_ipv4_regexp()
+ ipv6_regexp = compile_ipv6_regexp()
+
+ def filter_line_that_not_start_with(pattern, content):
+ return ''.join([line for line in content.splitlines(True) if line.startswith(pattern)])
+
+ def filter_line_that_contains(pattern, content):
+ return [line for line in content.splitlines(True) if pattern in line]
+
+ def filter_line_that_not_contains(pattern, content):
+ return ''.join([line for line in content.splitlines(True) if not line.contains(pattern)])
+
+ def filter_line_that_match_func(match_func, content):
+ return ''.join([line for line in content.splitlines(True) if match_func(line) is not None])
+
+ def filter_line_that_contains_ipv4(content):
+ return filter_line_that_match_func(ipv4_regexp.search, content)
+
+ def filter_line_that_contains_ipv6(content):
+ return filter_line_that_match_func(ipv6_regexp.search, content)
+
+ def is_starting_by_ipv4(ip):
+ return ipv4_regexp.match(ip) is not None
+
+ def is_starting_by_ipv6(ip):
+ return ipv6_regexp.match(ip) is not None
+
+ def execute(cmd, ignore_error=False):
+ cmd = ' '.join(map(itemgetter(-1), filter(itemgetter(0), cmd)))
+
+ cmds.append(cmd)
+ (rc, out, err) = module.run_command(cmd, environ_update={"LANG": "C"})
+
+ if rc != 0 and not ignore_error:
+ module.fail_json(msg=err or out, commands=cmds)
+
+ return out
+
+ def get_current_rules():
+ user_rules_files = ["/lib/ufw/user.rules",
+ "/lib/ufw/user6.rules",
+ "/etc/ufw/user.rules",
+ "/etc/ufw/user6.rules",
+ "/var/lib/ufw/user.rules",
+ "/var/lib/ufw/user6.rules"]
+
+ cmd = [[grep_bin], ["-h"], ["'^### tuple'"]]
+
+ cmd.extend([[f] for f in user_rules_files])
+ return execute(cmd, ignore_error=True)
+
+ def ufw_version():
+ """
+ Returns the major and minor version of ufw installed on the system.
+ """
+ out = execute([[ufw_bin], ["--version"]])
+
+ lines = [x for x in out.split('\n') if x.strip() != '']
+ if len(lines) == 0:
+ module.fail_json(msg="Failed to get ufw version.", rc=0, out=out)
+
+ matches = re.search(r'^ufw.+(\d+)\.(\d+)(?:\.(\d+))?.*$', lines[0])
+ if matches is None:
+ module.fail_json(msg="Failed to get ufw version.", rc=0, out=out)
+
+ # Convert version to numbers
+ major = int(matches.group(1))
+ minor = int(matches.group(2))
+ rev = 0
+ if matches.group(3) is not None:
+ rev = int(matches.group(3))
+
+ return major, minor, rev
+
+ params = module.params
+
+ commands = dict((key, params[key]) for key in command_keys if params[key])
+
+ # Ensure ufw is available
+ ufw_bin = module.get_bin_path('ufw', True)
+ grep_bin = module.get_bin_path('grep', True)
+
+ # Save the pre state and rules in order to recognize changes
+ pre_state = execute([[ufw_bin], ['status verbose']])
+ pre_rules = get_current_rules()
+
+ changed = False
+
+ # Execute filter
+ for (command, value) in commands.items():
+
+ cmd = [[ufw_bin], [module.check_mode, '--dry-run']]
+
+ if command == 'state':
+ states = {'enabled': 'enable', 'disabled': 'disable',
+ 'reloaded': 'reload', 'reset': 'reset'}
+
+ if value in ['reloaded', 'reset']:
+ changed = True
+
+ if module.check_mode:
+ # "active" would also match "inactive", hence the space
+ ufw_enabled = pre_state.find(" active") != -1
+ if (value == 'disabled' and ufw_enabled) or (value == 'enabled' and not ufw_enabled):
+ changed = True
+ else:
+ execute(cmd + [['-f'], [states[value]]])
+
+ elif command == 'logging':
+ extract = re.search(r'Logging: (on|off)(?: \(([a-z]+)\))?', pre_state)
+ if extract:
+ current_level = extract.group(2)
+ current_on_off_value = extract.group(1)
+ if value != "off":
+ if current_on_off_value == "off":
+ changed = True
+ elif value != "on" and value != current_level:
+ changed = True
+ elif current_on_off_value != "off":
+ changed = True
+ else:
+ changed = True
+
+ if not module.check_mode:
+ execute(cmd + [[command], [value]])
+
+ elif command == 'default':
+ if params['direction'] not in ['outgoing', 'incoming', 'routed', None]:
+ module.fail_json(msg='For default, direction must be one of "outgoing", "incoming" and "routed", or direction must not be specified.')
+ if module.check_mode:
+ regexp = r'Default: (deny|allow|reject) \(incoming\), (deny|allow|reject) \(outgoing\), (deny|allow|reject|disabled) \(routed\)'
+ extract = re.search(regexp, pre_state)
+ if extract is not None:
+ current_default_values = {}
+ current_default_values["incoming"] = extract.group(1)
+ current_default_values["outgoing"] = extract.group(2)
+ current_default_values["routed"] = extract.group(3)
+ v = current_default_values[params['direction'] or 'incoming']
+ if v not in (value, 'disabled'):
+ changed = True
+ else:
+ changed = True
+ else:
+ execute(cmd + [[command], [value], [params['direction']]])
+
+ elif command == 'rule':
+ if params['direction'] not in ['in', 'out', None]:
+ module.fail_json(msg='For rules, direction must be one of "in" and "out", or direction must not be specified.')
+ if not params['route'] and params['interface_in'] and params['interface_out']:
+ module.fail_json(msg='Only route rules can combine '
+ 'interface_in and interface_out')
+ # Rules are constructed according to the long format
+ #
+ # ufw [--dry-run] [route] [delete] [insert NUM] allow|deny|reject|limit [in|out on INTERFACE] [log|log-all] \
+ # [from ADDRESS [port PORT]] [to ADDRESS [port PORT]] \
+ # [proto protocol] [app application] [comment COMMENT]
+ cmd.append([module.boolean(params['route']), 'route'])
+ cmd.append([module.boolean(params['delete']), 'delete'])
+ if params['insert'] is not None:
+ relative_to_cmd = params['insert_relative_to']
+ if relative_to_cmd == 'zero':
+ insert_to = params['insert']
+ else:
+ (dummy, numbered_state, dummy) = module.run_command([ufw_bin, 'status', 'numbered'])
+ numbered_line_re = re.compile(R'^\[ *([0-9]+)\] ')
+ lines = [(numbered_line_re.match(line), '(v6)' in line) for line in numbered_state.splitlines()]
+ lines = [(int(matcher.group(1)), ipv6) for (matcher, ipv6) in lines if matcher]
+ last_number = max([no for (no, ipv6) in lines]) if lines else 0
+ has_ipv4 = any([not ipv6 for (no, ipv6) in lines])
+ has_ipv6 = any([ipv6 for (no, ipv6) in lines])
+ if relative_to_cmd == 'first-ipv4':
+ relative_to = 1
+ elif relative_to_cmd == 'last-ipv4':
+ relative_to = max([no for (no, ipv6) in lines if not ipv6]) if has_ipv4 else 1
+ elif relative_to_cmd == 'first-ipv6':
+ relative_to = max([no for (no, ipv6) in lines if not ipv6]) + 1 if has_ipv4 else 1
+ elif relative_to_cmd == 'last-ipv6':
+ relative_to = last_number if has_ipv6 else last_number + 1
+ insert_to = params['insert'] + relative_to
+ if insert_to > last_number:
+ # ufw does not like it when the insert number is larger than the
+ # maximal rule number for IPv4/IPv6.
+ insert_to = None
+ cmd.append([insert_to is not None, "insert %s" % insert_to])
+ cmd.append([value])
+ cmd.append([params['direction'], "%s" % params['direction']])
+ cmd.append([params['interface'], "on %s" % params['interface']])
+ cmd.append([params['interface_in'], "in on %s" % params['interface_in']])
+ cmd.append([params['interface_out'], "out on %s" % params['interface_out']])
+ cmd.append([module.boolean(params['log']), 'log'])
+
+ for (key, template) in [('from_ip', "from %s"), ('from_port', "port %s"),
+ ('to_ip', "to %s"), ('to_port', "port %s"),
+ ('proto', "proto %s"), ('name', "app '%s'")]:
+ value = params[key]
+ cmd.append([value, template % (value)])
+
+ ufw_major, ufw_minor, dummy = ufw_version()
+ # comment is supported only in ufw version after 0.35
+ if (ufw_major == 0 and ufw_minor >= 35) or ufw_major > 0:
+ cmd.append([params['comment'], "comment '%s'" % params['comment']])
+
+ rules_dry = execute(cmd)
+
+ if module.check_mode:
+
+ nb_skipping_line = len(filter_line_that_contains("Skipping", rules_dry))
+
+ if not (nb_skipping_line > 0 and nb_skipping_line == len(rules_dry.splitlines(True))):
+
+ rules_dry = filter_line_that_not_start_with("### tuple", rules_dry)
+ # ufw dry-run doesn't send all rules so have to compare ipv4 or ipv6 rules
+ if is_starting_by_ipv4(params['from_ip']) or is_starting_by_ipv4(params['to_ip']):
+ if filter_line_that_contains_ipv4(pre_rules) != filter_line_that_contains_ipv4(rules_dry):
+ changed = True
+ elif is_starting_by_ipv6(params['from_ip']) or is_starting_by_ipv6(params['to_ip']):
+ if filter_line_that_contains_ipv6(pre_rules) != filter_line_that_contains_ipv6(rules_dry):
+ changed = True
+ elif pre_rules != rules_dry:
+ changed = True
+
+ # Get the new state
+ if module.check_mode:
+ return module.exit_json(changed=changed, commands=cmds)
+ else:
+ post_state = execute([[ufw_bin], ['status'], ['verbose']])
+ if not changed:
+ post_rules = get_current_rules()
+ changed = (pre_state != post_state) or (pre_rules != post_rules)
+ return module.exit_json(changed=changed, commands=cmds, msg=post_state.rstrip())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/x509_crl.py b/test/support/integration/plugins/modules/x509_crl.py
new file mode 100644
index 0000000000..ef601edadc
--- /dev/null
+++ b/test/support/integration/plugins/modules/x509_crl.py
@@ -0,0 +1,783 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Felix Fontein <felix@fontein.de>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+module: x509_crl
+version_added: "2.10"
+short_description: Generate Certificate Revocation Lists (CRLs)
+description:
+ - This module allows one to (re)generate or update Certificate Revocation Lists (CRLs).
+ - Certificates on the revocation list can be either specified via serial number and (optionally) their issuer,
+ or as a path to a certificate file in PEM format.
+requirements:
+ - cryptography >= 1.2
+author:
+ - Felix Fontein (@felixfontein)
+options:
+ state:
+ description:
+ - Whether the CRL file should exist or not, taking action if the state is different from what is stated.
+ type: str
+ default: present
+ choices: [ absent, present ]
+
+ mode:
+ description:
+ - Defines how to process entries of existing CRLs.
+ - If set to C(generate), makes sure that the CRL has the exact set of revoked certificates
+ as specified in I(revoked_certificates).
+ - If set to C(update), makes sure that the CRL contains the revoked certificates from
+ I(revoked_certificates), but can also contain other revoked certificates. If the CRL file
+ already exists, all entries from the existing CRL will also be included in the new CRL.
+ When using C(update), you might be interested in setting I(ignore_timestamps) to C(yes).
+ type: str
+ default: generate
+ choices: [ generate, update ]
+
+ force:
+ description:
+ - Should the CRL be forced to be regenerated.
+ type: bool
+ default: no
+
+ backup:
+ description:
+ - Create a backup file including a timestamp so you can get the original
+ CRL back if you overwrote it with a new one by accident.
+ type: bool
+ default: no
+
+ path:
+ description:
+ - Remote absolute path where the generated CRL file should be created or is already located.
+ type: path
+ required: yes
+
+ privatekey_path:
+ description:
+ - Path to the CA's private key to use when signing the CRL.
+ - Either I(privatekey_path) or I(privatekey_content) must be specified if I(state) is C(present), but not both.
+ type: path
+
+ privatekey_content:
+ description:
+ - The content of the CA's private key to use when signing the CRL.
+ - Either I(privatekey_path) or I(privatekey_content) must be specified if I(state) is C(present), but not both.
+ type: str
+
+ privatekey_passphrase:
+ description:
+ - The passphrase for the I(privatekey_path).
+ - This is required if the private key is password protected.
+ type: str
+
+ issuer:
+ description:
+ - Key/value pairs that will be present in the issuer name field of the CRL.
+ - If you need to specify more than one value with the same key, use a list as value.
+ - Required if I(state) is C(present).
+ type: dict
+
+ last_update:
+ description:
+ - The point in time from which this CRL can be trusted.
+ - Time can be specified either as relative time or as absolute timestamp.
+ - Time will always be interpreted as UTC.
+ - Valid format is C([+-]timespec | ASN.1 TIME) where timespec can be an integer
+ + C([w | d | h | m | s]) (e.g. C(+32w1d2h).
+ - Note that if using relative time this module is NOT idempotent, except when
+ I(ignore_timestamps) is set to C(yes).
+ type: str
+ default: "+0s"
+
+ next_update:
+ description:
+ - "The absolute latest point in time by which this I(issuer) is expected to have issued
+ another CRL. Many clients will treat a CRL as expired once I(next_update) occurs."
+ - Time can be specified either as relative time or as absolute timestamp.
+ - Time will always be interpreted as UTC.
+ - Valid format is C([+-]timespec | ASN.1 TIME) where timespec can be an integer
+ + C([w | d | h | m | s]) (e.g. C(+32w1d2h).
+ - Note that if using relative time this module is NOT idempotent, except when
+ I(ignore_timestamps) is set to C(yes).
+ - Required if I(state) is C(present).
+ type: str
+
+ digest:
+ description:
+ - Digest algorithm to be used when signing the CRL.
+ type: str
+ default: sha256
+
+ revoked_certificates:
+ description:
+ - List of certificates to be revoked.
+ - Required if I(state) is C(present).
+ type: list
+ elements: dict
+ suboptions:
+ path:
+ description:
+ - Path to a certificate in PEM format.
+ - The serial number and issuer will be extracted from the certificate.
+ - Mutually exclusive with I(content) and I(serial_number). One of these three options
+ must be specified.
+ type: path
+ content:
+ description:
+ - Content of a certificate in PEM format.
+ - The serial number and issuer will be extracted from the certificate.
+ - Mutually exclusive with I(path) and I(serial_number). One of these three options
+ must be specified.
+ type: str
+ serial_number:
+ description:
+ - Serial number of the certificate.
+ - Mutually exclusive with I(path) and I(content). One of these three options must
+ be specified.
+ type: int
+ revocation_date:
+ description:
+ - The point in time the certificate was revoked.
+ - Time can be specified either as relative time or as absolute timestamp.
+ - Time will always be interpreted as UTC.
+ - Valid format is C([+-]timespec | ASN.1 TIME) where timespec can be an integer
+ + C([w | d | h | m | s]) (e.g. C(+32w1d2h).
+ - Note that if using relative time this module is NOT idempotent, except when
+ I(ignore_timestamps) is set to C(yes).
+ type: str
+ default: "+0s"
+ issuer:
+ description:
+ - The certificate's issuer.
+ - "Example: C(DNS:ca.example.org)"
+ type: list
+ elements: str
+ issuer_critical:
+ description:
+ - Whether the certificate issuer extension should be critical.
+ type: bool
+ default: no
+ reason:
+ description:
+ - The value for the revocation reason extension.
+ type: str
+ choices:
+ - unspecified
+ - key_compromise
+ - ca_compromise
+ - affiliation_changed
+ - superseded
+ - cessation_of_operation
+ - certificate_hold
+ - privilege_withdrawn
+ - aa_compromise
+ - remove_from_crl
+ reason_critical:
+ description:
+ - Whether the revocation reason extension should be critical.
+ type: bool
+ default: no
+ invalidity_date:
+ description:
+ - The point in time it was known/suspected that the private key was compromised
+ or that the certificate otherwise became invalid.
+ - Time can be specified either as relative time or as absolute timestamp.
+ - Time will always be interpreted as UTC.
+ - Valid format is C([+-]timespec | ASN.1 TIME) where timespec can be an integer
+ + C([w | d | h | m | s]) (e.g. C(+32w1d2h).
+ - Note that if using relative time this module is NOT idempotent. This will NOT
+ change when I(ignore_timestamps) is set to C(yes).
+ type: str
+ invalidity_date_critical:
+ description:
+ - Whether the invalidity date extension should be critical.
+ type: bool
+ default: no
+
+ ignore_timestamps:
+ description:
+ - Whether the timestamps I(last_update), I(next_update) and I(revocation_date) (in
+ I(revoked_certificates)) should be ignored for idempotency checks. The timestamp
+ I(invalidity_date) in I(revoked_certificates) will never be ignored.
+ - Use this in combination with relative timestamps for these values to get idempotency.
+ type: bool
+ default: no
+
+ return_content:
+ description:
+ - If set to C(yes), will return the (current or generated) CRL's content as I(crl).
+ type: bool
+ default: no
+
+extends_documentation_fragment:
+ - files
+
+notes:
+ - All ASN.1 TIME values should be specified following the YYYYMMDDHHMMSSZ pattern.
+ - Date specified should be UTC. Minutes and seconds are mandatory.
+'''
+
+EXAMPLES = r'''
+- name: Generate a CRL
+ x509_crl:
+ path: /etc/ssl/my-ca.crl
+ privatekey_path: /etc/ssl/private/my-ca.pem
+ issuer:
+ CN: My CA
+ last_update: "+0s"
+ next_update: "+7d"
+ revoked_certificates:
+ - serial_number: 1234
+ revocation_date: 20190331202428Z
+ issuer:
+ CN: My CA
+ - serial_number: 2345
+ revocation_date: 20191013152910Z
+ reason: affiliation_changed
+ invalidity_date: 20191001000000Z
+ - path: /etc/ssl/crt/revoked-cert.pem
+ revocation_date: 20191010010203Z
+'''
+
+RETURN = r'''
+filename:
+ description: Path to the generated CRL
+ returned: changed or success
+ type: str
+ sample: /path/to/my-ca.crl
+backup_file:
+ description: Name of backup file created.
+ returned: changed and if I(backup) is C(yes)
+ type: str
+ sample: /path/to/my-ca.crl.2019-03-09@11:22~
+privatekey:
+ description: Path to the private CA key
+ returned: changed or success
+ type: str
+ sample: /path/to/my-ca.pem
+issuer:
+ description:
+ - The CRL's issuer.
+ - Note that for repeated values, only the last one will be returned.
+ returned: success
+ type: dict
+ sample: '{"organizationName": "Ansible", "commonName": "ca.example.com"}'
+issuer_ordered:
+ description: The CRL's issuer as an ordered list of tuples.
+ returned: success
+ type: list
+ elements: list
+ sample: '[["organizationName", "Ansible"], ["commonName": "ca.example.com"]]'
+last_update:
+ description: The point in time from which this CRL can be trusted as ASN.1 TIME.
+ returned: success
+ type: str
+ sample: 20190413202428Z
+next_update:
+ description: The point in time from which a new CRL will be issued and the client has to check for it as ASN.1 TIME.
+ returned: success
+ type: str
+ sample: 20190413202428Z
+digest:
+ description: The signature algorithm used to sign the CRL.
+ returned: success
+ type: str
+ sample: sha256WithRSAEncryption
+revoked_certificates:
+ description: List of certificates to be revoked.
+ returned: success
+ type: list
+ elements: dict
+ contains:
+ serial_number:
+ description: Serial number of the certificate.
+ type: int
+ sample: 1234
+ revocation_date:
+ description: The point in time the certificate was revoked as ASN.1 TIME.
+ type: str
+ sample: 20190413202428Z
+ issuer:
+ description: The certificate's issuer.
+ type: list
+ elements: str
+ sample: '["DNS:ca.example.org"]'
+ issuer_critical:
+ description: Whether the certificate issuer extension is critical.
+ type: bool
+ sample: no
+ reason:
+ description:
+ - The value for the revocation reason extension.
+ - One of C(unspecified), C(key_compromise), C(ca_compromise), C(affiliation_changed), C(superseded),
+ C(cessation_of_operation), C(certificate_hold), C(privilege_withdrawn), C(aa_compromise), and
+ C(remove_from_crl).
+ type: str
+ sample: key_compromise
+ reason_critical:
+ description: Whether the revocation reason extension is critical.
+ type: bool
+ sample: no
+ invalidity_date:
+ description: |
+ The point in time it was known/suspected that the private key was compromised
+ or that the certificate otherwise became invalid as ASN.1 TIME.
+ type: str
+ sample: 20190413202428Z
+ invalidity_date_critical:
+ description: Whether the invalidity date extension is critical.
+ type: bool
+ sample: no
+crl:
+ description: The (current or generated) CRL's content.
+ returned: if I(state) is C(present) and I(return_content) is C(yes)
+ type: str
+'''
+
+
+import os
+import traceback
+from distutils.version import LooseVersion
+
+from ansible.module_utils import crypto as crypto_utils
+from ansible.module_utils._text import to_native, to_text
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+MINIMAL_CRYPTOGRAPHY_VERSION = '1.2'
+
+CRYPTOGRAPHY_IMP_ERR = None
+try:
+ import cryptography
+ from cryptography import x509
+ from cryptography.hazmat.backends import default_backend
+ from cryptography.hazmat.primitives.serialization import Encoding
+ from cryptography.x509 import (
+ CertificateRevocationListBuilder,
+ RevokedCertificateBuilder,
+ NameAttribute,
+ Name,
+ )
+ CRYPTOGRAPHY_VERSION = LooseVersion(cryptography.__version__)
+except ImportError:
+ CRYPTOGRAPHY_IMP_ERR = traceback.format_exc()
+ CRYPTOGRAPHY_FOUND = False
+else:
+ CRYPTOGRAPHY_FOUND = True
+
+
+TIMESTAMP_FORMAT = "%Y%m%d%H%M%SZ"
+
+
+class CRLError(crypto_utils.OpenSSLObjectError):
+ pass
+
+
+class CRL(crypto_utils.OpenSSLObject):
+
+ def __init__(self, module):
+ super(CRL, self).__init__(
+ module.params['path'],
+ module.params['state'],
+ module.params['force'],
+ module.check_mode
+ )
+
+ self.update = module.params['mode'] == 'update'
+ self.ignore_timestamps = module.params['ignore_timestamps']
+ self.return_content = module.params['return_content']
+ self.crl_content = None
+
+ self.privatekey_path = module.params['privatekey_path']
+ self.privatekey_content = module.params['privatekey_content']
+ if self.privatekey_content is not None:
+ self.privatekey_content = self.privatekey_content.encode('utf-8')
+ self.privatekey_passphrase = module.params['privatekey_passphrase']
+
+ self.issuer = crypto_utils.parse_name_field(module.params['issuer'])
+ self.issuer = [(entry[0], entry[1]) for entry in self.issuer if entry[1]]
+
+ self.last_update = crypto_utils.get_relative_time_option(module.params['last_update'], 'last_update')
+ self.next_update = crypto_utils.get_relative_time_option(module.params['next_update'], 'next_update')
+
+ self.digest = crypto_utils.select_message_digest(module.params['digest'])
+ if self.digest is None:
+ raise CRLError('The digest "{0}" is not supported'.format(module.params['digest']))
+
+ self.revoked_certificates = []
+ for i, rc in enumerate(module.params['revoked_certificates']):
+ result = {
+ 'serial_number': None,
+ 'revocation_date': None,
+ 'issuer': None,
+ 'issuer_critical': False,
+ 'reason': None,
+ 'reason_critical': False,
+ 'invalidity_date': None,
+ 'invalidity_date_critical': False,
+ }
+ path_prefix = 'revoked_certificates[{0}].'.format(i)
+ if rc['path'] is not None or rc['content'] is not None:
+ # Load certificate from file or content
+ try:
+ if rc['content'] is not None:
+ rc['content'] = rc['content'].encode('utf-8')
+ cert = crypto_utils.load_certificate(rc['path'], content=rc['content'], backend='cryptography')
+ try:
+ result['serial_number'] = cert.serial_number
+ except AttributeError:
+ # The property was called "serial" before cryptography 1.4
+ result['serial_number'] = cert.serial
+ except crypto_utils.OpenSSLObjectError as e:
+ if rc['content'] is not None:
+ module.fail_json(
+ msg='Cannot parse certificate from {0}content: {1}'.format(path_prefix, to_native(e))
+ )
+ else:
+ module.fail_json(
+ msg='Cannot read certificate "{1}" from {0}path: {2}'.format(path_prefix, rc['path'], to_native(e))
+ )
+ else:
+ # Specify serial_number (and potentially issuer) directly
+ result['serial_number'] = rc['serial_number']
+ # All other options
+ if rc['issuer']:
+ result['issuer'] = [crypto_utils.cryptography_get_name(issuer) for issuer in rc['issuer']]
+ result['issuer_critical'] = rc['issuer_critical']
+ result['revocation_date'] = crypto_utils.get_relative_time_option(
+ rc['revocation_date'],
+ path_prefix + 'revocation_date'
+ )
+ if rc['reason']:
+ result['reason'] = crypto_utils.REVOCATION_REASON_MAP[rc['reason']]
+ result['reason_critical'] = rc['reason_critical']
+ if rc['invalidity_date']:
+ result['invalidity_date'] = crypto_utils.get_relative_time_option(
+ rc['invalidity_date'],
+ path_prefix + 'invalidity_date'
+ )
+ result['invalidity_date_critical'] = rc['invalidity_date_critical']
+ self.revoked_certificates.append(result)
+
+ self.module = module
+
+ self.backup = module.params['backup']
+ self.backup_file = None
+
+ try:
+ self.privatekey = crypto_utils.load_privatekey(
+ path=self.privatekey_path,
+ content=self.privatekey_content,
+ passphrase=self.privatekey_passphrase,
+ backend='cryptography'
+ )
+ except crypto_utils.OpenSSLBadPassphraseError as exc:
+ raise CRLError(exc)
+
+ self.crl = None
+ try:
+ with open(self.path, 'rb') as f:
+ data = f.read()
+ self.crl = x509.load_pem_x509_crl(data, default_backend())
+ if self.return_content:
+ self.crl_content = data
+ except Exception as dummy:
+ self.crl_content = None
+
+ def remove(self):
+ if self.backup:
+ self.backup_file = self.module.backup_local(self.path)
+ super(CRL, self).remove(self.module)
+
+ def _compress_entry(self, entry):
+ if self.ignore_timestamps:
+ # Throw out revocation_date
+ return (
+ entry['serial_number'],
+ tuple(entry['issuer']) if entry['issuer'] is not None else None,
+ entry['issuer_critical'],
+ entry['reason'],
+ entry['reason_critical'],
+ entry['invalidity_date'],
+ entry['invalidity_date_critical'],
+ )
+ else:
+ return (
+ entry['serial_number'],
+ entry['revocation_date'],
+ tuple(entry['issuer']) if entry['issuer'] is not None else None,
+ entry['issuer_critical'],
+ entry['reason'],
+ entry['reason_critical'],
+ entry['invalidity_date'],
+ entry['invalidity_date_critical'],
+ )
+
+ def check(self, perms_required=True):
+ """Ensure the resource is in its desired state."""
+
+ state_and_perms = super(CRL, self).check(self.module, perms_required)
+
+ if not state_and_perms:
+ return False
+
+ if self.crl is None:
+ return False
+
+ if self.last_update != self.crl.last_update and not self.ignore_timestamps:
+ return False
+ if self.next_update != self.crl.next_update and not self.ignore_timestamps:
+ return False
+ if self.digest.name != self.crl.signature_hash_algorithm.name:
+ return False
+
+ want_issuer = [(crypto_utils.cryptography_name_to_oid(entry[0]), entry[1]) for entry in self.issuer]
+ if want_issuer != [(sub.oid, sub.value) for sub in self.crl.issuer]:
+ return False
+
+ old_entries = [self._compress_entry(crypto_utils.cryptography_decode_revoked_certificate(cert)) for cert in self.crl]
+ new_entries = [self._compress_entry(cert) for cert in self.revoked_certificates]
+ if self.update:
+ # We don't simply use a set so that duplicate entries are treated correctly
+ for entry in new_entries:
+ try:
+ old_entries.remove(entry)
+ except ValueError:
+ return False
+ else:
+ if old_entries != new_entries:
+ return False
+
+ return True
+
+ def _generate_crl(self):
+ backend = default_backend()
+ crl = CertificateRevocationListBuilder()
+
+ try:
+ crl = crl.issuer_name(Name([
+ NameAttribute(crypto_utils.cryptography_name_to_oid(entry[0]), to_text(entry[1]))
+ for entry in self.issuer
+ ]))
+ except ValueError as e:
+ raise CRLError(e)
+
+ crl = crl.last_update(self.last_update)
+ crl = crl.next_update(self.next_update)
+
+ if self.update and self.crl:
+ new_entries = set([self._compress_entry(entry) for entry in self.revoked_certificates])
+ for entry in self.crl:
+ decoded_entry = self._compress_entry(crypto_utils.cryptography_decode_revoked_certificate(entry))
+ if decoded_entry not in new_entries:
+ crl = crl.add_revoked_certificate(entry)
+ for entry in self.revoked_certificates:
+ revoked_cert = RevokedCertificateBuilder()
+ revoked_cert = revoked_cert.serial_number(entry['serial_number'])
+ revoked_cert = revoked_cert.revocation_date(entry['revocation_date'])
+ if entry['issuer'] is not None:
+ revoked_cert = revoked_cert.add_extension(
+ x509.CertificateIssuer([
+ crypto_utils.cryptography_get_name(name) for name in self.entry['issuer']
+ ]),
+ entry['issuer_critical']
+ )
+ if entry['reason'] is not None:
+ revoked_cert = revoked_cert.add_extension(
+ x509.CRLReason(entry['reason']),
+ entry['reason_critical']
+ )
+ if entry['invalidity_date'] is not None:
+ revoked_cert = revoked_cert.add_extension(
+ x509.InvalidityDate(entry['invalidity_date']),
+ entry['invalidity_date_critical']
+ )
+ crl = crl.add_revoked_certificate(revoked_cert.build(backend))
+
+ self.crl = crl.sign(self.privatekey, self.digest, backend=backend)
+ return self.crl.public_bytes(Encoding.PEM)
+
+ def generate(self):
+ if not self.check(perms_required=False) or self.force:
+ result = self._generate_crl()
+ if self.return_content:
+ self.crl_content = result
+ if self.backup:
+ self.backup_file = self.module.backup_local(self.path)
+ crypto_utils.write_file(self.module, result)
+ self.changed = True
+
+ file_args = self.module.load_file_common_arguments(self.module.params)
+ if self.module.set_fs_attributes_if_different(file_args, False):
+ self.changed = True
+
+ def _dump_revoked(self, entry):
+ return {
+ 'serial_number': entry['serial_number'],
+ 'revocation_date': entry['revocation_date'].strftime(TIMESTAMP_FORMAT),
+ 'issuer':
+ [crypto_utils.cryptography_decode_name(issuer) for issuer in entry['issuer']]
+ if entry['issuer'] is not None else None,
+ 'issuer_critical': entry['issuer_critical'],
+ 'reason': crypto_utils.REVOCATION_REASON_MAP_INVERSE.get(entry['reason']) if entry['reason'] is not None else None,
+ 'reason_critical': entry['reason_critical'],
+ 'invalidity_date':
+ entry['invalidity_date'].strftime(TIMESTAMP_FORMAT)
+ if entry['invalidity_date'] is not None else None,
+ 'invalidity_date_critical': entry['invalidity_date_critical'],
+ }
+
+ def dump(self, check_mode=False):
+ result = {
+ 'changed': self.changed,
+ 'filename': self.path,
+ 'privatekey': self.privatekey_path,
+ 'last_update': None,
+ 'next_update': None,
+ 'digest': None,
+ 'issuer_ordered': None,
+ 'issuer': None,
+ 'revoked_certificates': [],
+ }
+ if self.backup_file:
+ result['backup_file'] = self.backup_file
+
+ if check_mode:
+ result['last_update'] = self.last_update.strftime(TIMESTAMP_FORMAT)
+ result['next_update'] = self.next_update.strftime(TIMESTAMP_FORMAT)
+ # result['digest'] = crypto_utils.cryptography_oid_to_name(self.crl.signature_algorithm_oid)
+ result['digest'] = self.module.params['digest']
+ result['issuer_ordered'] = self.issuer
+ result['issuer'] = {}
+ for k, v in self.issuer:
+ result['issuer'][k] = v
+ result['revoked_certificates'] = []
+ for entry in self.revoked_certificates:
+ result['revoked_certificates'].append(self._dump_revoked(entry))
+ elif self.crl:
+ result['last_update'] = self.crl.last_update.strftime(TIMESTAMP_FORMAT)
+ result['next_update'] = self.crl.next_update.strftime(TIMESTAMP_FORMAT)
+ try:
+ result['digest'] = crypto_utils.cryptography_oid_to_name(self.crl.signature_algorithm_oid)
+ except AttributeError:
+ # Older cryptography versions don't have signature_algorithm_oid yet
+ dotted = crypto_utils._obj2txt(
+ self.crl._backend._lib,
+ self.crl._backend._ffi,
+ self.crl._x509_crl.sig_alg.algorithm
+ )
+ oid = x509.oid.ObjectIdentifier(dotted)
+ result['digest'] = crypto_utils.cryptography_oid_to_name(oid)
+ issuer = []
+ for attribute in self.crl.issuer:
+ issuer.append([crypto_utils.cryptography_oid_to_name(attribute.oid), attribute.value])
+ result['issuer_ordered'] = issuer
+ result['issuer'] = {}
+ for k, v in issuer:
+ result['issuer'][k] = v
+ result['revoked_certificates'] = []
+ for cert in self.crl:
+ entry = crypto_utils.cryptography_decode_revoked_certificate(cert)
+ result['revoked_certificates'].append(self._dump_revoked(entry))
+
+ if self.return_content:
+ result['crl'] = self.crl_content
+
+ return result
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ mode=dict(type='str', default='generate', choices=['generate', 'update']),
+ force=dict(type='bool', default=False),
+ backup=dict(type='bool', default=False),
+ path=dict(type='path', required=True),
+ privatekey_path=dict(type='path'),
+ privatekey_content=dict(type='str'),
+ privatekey_passphrase=dict(type='str', no_log=True),
+ issuer=dict(type='dict'),
+ last_update=dict(type='str', default='+0s'),
+ next_update=dict(type='str'),
+ digest=dict(type='str', default='sha256'),
+ ignore_timestamps=dict(type='bool', default=False),
+ return_content=dict(type='bool', default=False),
+ revoked_certificates=dict(
+ type='list',
+ elements='dict',
+ options=dict(
+ path=dict(type='path'),
+ content=dict(type='str'),
+ serial_number=dict(type='int'),
+ revocation_date=dict(type='str', default='+0s'),
+ issuer=dict(type='list', elements='str'),
+ issuer_critical=dict(type='bool', default=False),
+ reason=dict(
+ type='str',
+ choices=[
+ 'unspecified', 'key_compromise', 'ca_compromise', 'affiliation_changed',
+ 'superseded', 'cessation_of_operation', 'certificate_hold',
+ 'privilege_withdrawn', 'aa_compromise', 'remove_from_crl'
+ ]
+ ),
+ reason_critical=dict(type='bool', default=False),
+ invalidity_date=dict(type='str'),
+ invalidity_date_critical=dict(type='bool', default=False),
+ ),
+ required_one_of=[['path', 'content', 'serial_number']],
+ mutually_exclusive=[['path', 'content', 'serial_number']],
+ ),
+ ),
+ required_if=[
+ ('state', 'present', ['privatekey_path', 'privatekey_content'], True),
+ ('state', 'present', ['issuer', 'next_update', 'revoked_certificates'], False),
+ ],
+ mutually_exclusive=(
+ ['privatekey_path', 'privatekey_content'],
+ ),
+ supports_check_mode=True,
+ add_file_common_args=True,
+ )
+
+ if not CRYPTOGRAPHY_FOUND:
+ module.fail_json(msg=missing_required_lib('cryptography >= {0}'.format(MINIMAL_CRYPTOGRAPHY_VERSION)),
+ exception=CRYPTOGRAPHY_IMP_ERR)
+
+ try:
+ crl = CRL(module)
+
+ if module.params['state'] == 'present':
+ if module.check_mode:
+ result = crl.dump(check_mode=True)
+ result['changed'] = module.params['force'] or not crl.check()
+ module.exit_json(**result)
+
+ crl.generate()
+ else:
+ if module.check_mode:
+ result = crl.dump(check_mode=True)
+ result['changed'] = os.path.exists(module.params['path'])
+ module.exit_json(**result)
+
+ crl.remove()
+
+ result = crl.dump()
+ module.exit_json(**result)
+ except crypto_utils.OpenSSLObjectError as exc:
+ module.fail_json(msg=to_native(exc))
+
+
+if __name__ == "__main__":
+ main()
diff --git a/test/support/integration/plugins/modules/x509_crl_info.py b/test/support/integration/plugins/modules/x509_crl_info.py
new file mode 100644
index 0000000000..b61db26ff1
--- /dev/null
+++ b/test/support/integration/plugins/modules/x509_crl_info.py
@@ -0,0 +1,281 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2020, Felix Fontein <felix@fontein.de>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+module: x509_crl_info
+version_added: "2.10"
+short_description: Retrieve information on Certificate Revocation Lists (CRLs)
+description:
+ - This module allows one to retrieve information on Certificate Revocation Lists (CRLs).
+requirements:
+ - cryptography >= 1.2
+author:
+ - Felix Fontein (@felixfontein)
+options:
+ path:
+ description:
+ - Remote absolute path where the generated CRL file should be created or is already located.
+ - Either I(path) or I(content) must be specified, but not both.
+ type: path
+ content:
+ description:
+ - Content of the X.509 certificate in PEM format.
+ - Either I(path) or I(content) must be specified, but not both.
+ type: str
+
+notes:
+ - All timestamp values are provided in ASN.1 TIME format, i.e. following the C(YYYYMMDDHHMMSSZ) pattern.
+ They are all in UTC.
+seealso:
+ - module: x509_crl
+'''
+
+EXAMPLES = r'''
+- name: Get information on CRL
+ x509_crl_info:
+ path: /etc/ssl/my-ca.crl
+ register: result
+
+- debug:
+ msg: "{{ result }}"
+'''
+
+RETURN = r'''
+issuer:
+ description:
+ - The CRL's issuer.
+ - Note that for repeated values, only the last one will be returned.
+ returned: success
+ type: dict
+ sample: '{"organizationName": "Ansible", "commonName": "ca.example.com"}'
+issuer_ordered:
+ description: The CRL's issuer as an ordered list of tuples.
+ returned: success
+ type: list
+ elements: list
+ sample: '[["organizationName", "Ansible"], ["commonName": "ca.example.com"]]'
+last_update:
+ description: The point in time from which this CRL can be trusted as ASN.1 TIME.
+ returned: success
+ type: str
+ sample: 20190413202428Z
+next_update:
+ description: The point in time from which a new CRL will be issued and the client has to check for it as ASN.1 TIME.
+ returned: success
+ type: str
+ sample: 20190413202428Z
+digest:
+ description: The signature algorithm used to sign the CRL.
+ returned: success
+ type: str
+ sample: sha256WithRSAEncryption
+revoked_certificates:
+ description: List of certificates to be revoked.
+ returned: success
+ type: list
+ elements: dict
+ contains:
+ serial_number:
+ description: Serial number of the certificate.
+ type: int
+ sample: 1234
+ revocation_date:
+ description: The point in time the certificate was revoked as ASN.1 TIME.
+ type: str
+ sample: 20190413202428Z
+ issuer:
+ description: The certificate's issuer.
+ type: list
+ elements: str
+ sample: '["DNS:ca.example.org"]'
+ issuer_critical:
+ description: Whether the certificate issuer extension is critical.
+ type: bool
+ sample: no
+ reason:
+ description:
+ - The value for the revocation reason extension.
+ - One of C(unspecified), C(key_compromise), C(ca_compromise), C(affiliation_changed), C(superseded),
+ C(cessation_of_operation), C(certificate_hold), C(privilege_withdrawn), C(aa_compromise), and
+ C(remove_from_crl).
+ type: str
+ sample: key_compromise
+ reason_critical:
+ description: Whether the revocation reason extension is critical.
+ type: bool
+ sample: no
+ invalidity_date:
+ description: |
+ The point in time it was known/suspected that the private key was compromised
+ or that the certificate otherwise became invalid as ASN.1 TIME.
+ type: str
+ sample: 20190413202428Z
+ invalidity_date_critical:
+ description: Whether the invalidity date extension is critical.
+ type: bool
+ sample: no
+'''
+
+
+import traceback
+from distutils.version import LooseVersion
+
+from ansible.module_utils import crypto as crypto_utils
+from ansible.module_utils._text import to_native
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+MINIMAL_CRYPTOGRAPHY_VERSION = '1.2'
+
+CRYPTOGRAPHY_IMP_ERR = None
+try:
+ import cryptography
+ from cryptography import x509
+ from cryptography.hazmat.backends import default_backend
+ CRYPTOGRAPHY_VERSION = LooseVersion(cryptography.__version__)
+except ImportError:
+ CRYPTOGRAPHY_IMP_ERR = traceback.format_exc()
+ CRYPTOGRAPHY_FOUND = False
+else:
+ CRYPTOGRAPHY_FOUND = True
+
+
+TIMESTAMP_FORMAT = "%Y%m%d%H%M%SZ"
+
+
+class CRLError(crypto_utils.OpenSSLObjectError):
+ pass
+
+
+class CRLInfo(crypto_utils.OpenSSLObject):
+ """The main module implementation."""
+
+ def __init__(self, module):
+ super(CRLInfo, self).__init__(
+ module.params['path'] or '',
+ 'present',
+ False,
+ module.check_mode
+ )
+
+ self.content = module.params['content']
+
+ self.module = module
+
+ self.crl = None
+ if self.content is None:
+ try:
+ with open(self.path, 'rb') as f:
+ data = f.read()
+ except Exception as e:
+ self.module.fail_json(msg='Error while reading CRL file from disk: {0}'.format(e))
+ else:
+ data = self.content.encode('utf-8')
+
+ try:
+ self.crl = x509.load_pem_x509_crl(data, default_backend())
+ except Exception as e:
+ self.module.fail_json(msg='Error while decoding CRL: {0}'.format(e))
+
+ def _dump_revoked(self, entry):
+ return {
+ 'serial_number': entry['serial_number'],
+ 'revocation_date': entry['revocation_date'].strftime(TIMESTAMP_FORMAT),
+ 'issuer':
+ [crypto_utils.cryptography_decode_name(issuer) for issuer in entry['issuer']]
+ if entry['issuer'] is not None else None,
+ 'issuer_critical': entry['issuer_critical'],
+ 'reason': crypto_utils.REVOCATION_REASON_MAP_INVERSE.get(entry['reason']) if entry['reason'] is not None else None,
+ 'reason_critical': entry['reason_critical'],
+ 'invalidity_date':
+ entry['invalidity_date'].strftime(TIMESTAMP_FORMAT)
+ if entry['invalidity_date'] is not None else None,
+ 'invalidity_date_critical': entry['invalidity_date_critical'],
+ }
+
+ def get_info(self):
+ result = {
+ 'changed': False,
+ 'last_update': None,
+ 'next_update': None,
+ 'digest': None,
+ 'issuer_ordered': None,
+ 'issuer': None,
+ 'revoked_certificates': [],
+ }
+
+ result['last_update'] = self.crl.last_update.strftime(TIMESTAMP_FORMAT)
+ result['next_update'] = self.crl.next_update.strftime(TIMESTAMP_FORMAT)
+ try:
+ result['digest'] = crypto_utils.cryptography_oid_to_name(self.crl.signature_algorithm_oid)
+ except AttributeError:
+ # Older cryptography versions don't have signature_algorithm_oid yet
+ dotted = crypto_utils._obj2txt(
+ self.crl._backend._lib,
+ self.crl._backend._ffi,
+ self.crl._x509_crl.sig_alg.algorithm
+ )
+ oid = x509.oid.ObjectIdentifier(dotted)
+ result['digest'] = crypto_utils.cryptography_oid_to_name(oid)
+ issuer = []
+ for attribute in self.crl.issuer:
+ issuer.append([crypto_utils.cryptography_oid_to_name(attribute.oid), attribute.value])
+ result['issuer_ordered'] = issuer
+ result['issuer'] = {}
+ for k, v in issuer:
+ result['issuer'][k] = v
+ result['revoked_certificates'] = []
+ for cert in self.crl:
+ entry = crypto_utils.cryptography_decode_revoked_certificate(cert)
+ result['revoked_certificates'].append(self._dump_revoked(entry))
+
+ return result
+
+ def generate(self):
+ # Empty method because crypto_utils.OpenSSLObject wants this
+ pass
+
+ def dump(self):
+ # Empty method because crypto_utils.OpenSSLObject wants this
+ pass
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ path=dict(type='path'),
+ content=dict(type='str'),
+ ),
+ required_one_of=(
+ ['path', 'content'],
+ ),
+ mutually_exclusive=(
+ ['path', 'content'],
+ ),
+ supports_check_mode=True,
+ )
+
+ if not CRYPTOGRAPHY_FOUND:
+ module.fail_json(msg=missing_required_lib('cryptography >= {0}'.format(MINIMAL_CRYPTOGRAPHY_VERSION)),
+ exception=CRYPTOGRAPHY_IMP_ERR)
+
+ try:
+ crl = CRLInfo(module)
+ result = crl.get_info()
+ module.exit_json(**result)
+ except crypto_utils.OpenSSLObjectError as e:
+ module.fail_json(msg=to_native(e))
+
+
+if __name__ == "__main__":
+ main()
diff --git a/test/support/integration/plugins/modules/xml.py b/test/support/integration/plugins/modules/xml.py
new file mode 100644
index 0000000000..c71b3c1778
--- /dev/null
+++ b/test/support/integration/plugins/modules/xml.py
@@ -0,0 +1,965 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2014, Red Hat, Inc.
+# Copyright: (c) 2014, Tim Bielawa <tbielawa@redhat.com>
+# Copyright: (c) 2014, Magnus Hedemark <mhedemar@redhat.com>
+# Copyright: (c) 2017, Dag Wieers <dag@wieers.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+module: xml
+short_description: Manage bits and pieces of XML files or strings
+description:
+- A CRUD-like interface to managing bits of XML files.
+version_added: '2.4'
+options:
+ path:
+ description:
+ - Path to the file to operate on.
+ - This file must exist ahead of time.
+ - This parameter is required, unless C(xmlstring) is given.
+ type: path
+ required: yes
+ aliases: [ dest, file ]
+ xmlstring:
+ description:
+ - A string containing XML on which to operate.
+ - This parameter is required, unless C(path) is given.
+ type: str
+ required: yes
+ xpath:
+ description:
+ - A valid XPath expression describing the item(s) you want to manipulate.
+ - Operates on the document root, C(/), by default.
+ type: str
+ namespaces:
+ description:
+ - The namespace C(prefix:uri) mapping for the XPath expression.
+ - Needs to be a C(dict), not a C(list) of items.
+ type: dict
+ state:
+ description:
+ - Set or remove an xpath selection (node(s), attribute(s)).
+ type: str
+ choices: [ absent, present ]
+ default: present
+ aliases: [ ensure ]
+ attribute:
+ description:
+ - The attribute to select when using parameter C(value).
+ - This is a string, not prepended with C(@).
+ type: raw
+ value:
+ description:
+ - Desired state of the selected attribute.
+ - Either a string, or to unset a value, the Python C(None) keyword (YAML Equivalent, C(null)).
+ - Elements default to no value (but present).
+ - Attributes default to an empty string.
+ type: raw
+ add_children:
+ description:
+ - Add additional child-element(s) to a selected element for a given C(xpath).
+ - Child elements must be given in a list and each item may be either a string
+ (eg. C(children=ansible) to add an empty C(<ansible/>) child element),
+ or a hash where the key is an element name and the value is the element value.
+ - This parameter requires C(xpath) to be set.
+ type: list
+ set_children:
+ description:
+ - Set the child-element(s) of a selected element for a given C(xpath).
+ - Removes any existing children.
+ - Child elements must be specified as in C(add_children).
+ - This parameter requires C(xpath) to be set.
+ type: list
+ count:
+ description:
+ - Search for a given C(xpath) and provide the count of any matches.
+ - This parameter requires C(xpath) to be set.
+ type: bool
+ default: no
+ print_match:
+ description:
+ - Search for a given C(xpath) and print out any matches.
+ - This parameter requires C(xpath) to be set.
+ type: bool
+ default: no
+ pretty_print:
+ description:
+ - Pretty print XML output.
+ type: bool
+ default: no
+ content:
+ description:
+ - Search for a given C(xpath) and get content.
+ - This parameter requires C(xpath) to be set.
+ type: str
+ choices: [ attribute, text ]
+ input_type:
+ description:
+ - Type of input for C(add_children) and C(set_children).
+ type: str
+ choices: [ xml, yaml ]
+ default: yaml
+ backup:
+ description:
+ - Create a backup file including the timestamp information so you can get
+ the original file back if you somehow clobbered it incorrectly.
+ type: bool
+ default: no
+ strip_cdata_tags:
+ description:
+ - Remove CDATA tags surrounding text values.
+ - Note that this might break your XML file if text values contain characters that could be interpreted as XML.
+ type: bool
+ default: no
+ version_added: '2.7'
+ insertbefore:
+ description:
+ - Add additional child-element(s) before the first selected element for a given C(xpath).
+ - Child elements must be given in a list and each item may be either a string
+ (eg. C(children=ansible) to add an empty C(<ansible/>) child element),
+ or a hash where the key is an element name and the value is the element value.
+ - This parameter requires C(xpath) to be set.
+ type: bool
+ default: no
+ version_added: '2.8'
+ insertafter:
+ description:
+ - Add additional child-element(s) after the last selected element for a given C(xpath).
+ - Child elements must be given in a list and each item may be either a string
+ (eg. C(children=ansible) to add an empty C(<ansible/>) child element),
+ or a hash where the key is an element name and the value is the element value.
+ - This parameter requires C(xpath) to be set.
+ type: bool
+ default: no
+ version_added: '2.8'
+requirements:
+- lxml >= 2.3.0
+notes:
+- Use the C(--check) and C(--diff) options when testing your expressions.
+- The diff output is automatically pretty-printed, so may not reflect the actual file content, only the file structure.
+- This module does not handle complicated xpath expressions, so limit xpath selectors to simple expressions.
+- Beware that in case your XML elements are namespaced, you need to use the C(namespaces) parameter, see the examples.
+- Namespaces prefix should be used for all children of an element where namespace is defined, unless another namespace is defined for them.
+seealso:
+- name: Xml module development community wiki
+ description: More information related to the development of this xml module.
+ link: https://github.com/ansible/community/wiki/Module:-xml
+- name: Introduction to XPath
+ description: A brief tutorial on XPath (w3schools.com).
+ link: https://www.w3schools.com/xml/xpath_intro.asp
+- name: XPath Reference document
+ description: The reference documentation on XSLT/XPath (developer.mozilla.org).
+ link: https://developer.mozilla.org/en-US/docs/Web/XPath
+author:
+- Tim Bielawa (@tbielawa)
+- Magnus Hedemark (@magnus919)
+- Dag Wieers (@dagwieers)
+'''
+
+EXAMPLES = r'''
+# Consider the following XML file:
+#
+# <business type="bar">
+# <name>Tasty Beverage Co.</name>
+# <beers>
+# <beer>Rochefort 10</beer>
+# <beer>St. Bernardus Abbot 12</beer>
+# <beer>Schlitz</beer>
+# </beers>
+# <rating subjective="true">10</rating>
+# <website>
+# <mobilefriendly/>
+# <address>http://tastybeverageco.com</address>
+# </website>
+# </business>
+
+- name: Remove the 'subjective' attribute of the 'rating' element
+ xml:
+ path: /foo/bar.xml
+ xpath: /business/rating/@subjective
+ state: absent
+
+- name: Set the rating to '11'
+ xml:
+ path: /foo/bar.xml
+ xpath: /business/rating
+ value: 11
+
+# Retrieve and display the number of nodes
+- name: Get count of 'beers' nodes
+ xml:
+ path: /foo/bar.xml
+ xpath: /business/beers/beer
+ count: yes
+ register: hits
+
+- debug:
+ var: hits.count
+
+# Example where parent XML nodes are created automatically
+- name: Add a 'phonenumber' element to the 'business' element
+ xml:
+ path: /foo/bar.xml
+ xpath: /business/phonenumber
+ value: 555-555-1234
+
+- name: Add several more beers to the 'beers' element
+ xml:
+ path: /foo/bar.xml
+ xpath: /business/beers
+ add_children:
+ - beer: Old Rasputin
+ - beer: Old Motor Oil
+ - beer: Old Curmudgeon
+
+- name: Add several more beers to the 'beers' element and add them before the 'Rochefort 10' element
+ xml:
+ path: /foo/bar.xml
+ xpath: '/business/beers/beer[text()="Rochefort 10"]'
+ insertbefore: yes
+ add_children:
+ - beer: Old Rasputin
+ - beer: Old Motor Oil
+ - beer: Old Curmudgeon
+
+# NOTE: The 'state' defaults to 'present' and 'value' defaults to 'null' for elements
+- name: Add a 'validxhtml' element to the 'website' element
+ xml:
+ path: /foo/bar.xml
+ xpath: /business/website/validxhtml
+
+- name: Add an empty 'validatedon' attribute to the 'validxhtml' element
+ xml:
+ path: /foo/bar.xml
+ xpath: /business/website/validxhtml/@validatedon
+
+- name: Add or modify an attribute, add element if needed
+ xml:
+ path: /foo/bar.xml
+ xpath: /business/website/validxhtml
+ attribute: validatedon
+ value: 1976-08-05
+
+# How to read an attribute value and access it in Ansible
+- name: Read an element's attribute values
+ xml:
+ path: /foo/bar.xml
+ xpath: /business/website/validxhtml
+ content: attribute
+ register: xmlresp
+
+- name: Show an attribute value
+ debug:
+ var: xmlresp.matches[0].validxhtml.validatedon
+
+- name: Remove all children from the 'website' element (option 1)
+ xml:
+ path: /foo/bar.xml
+ xpath: /business/website/*
+ state: absent
+
+- name: Remove all children from the 'website' element (option 2)
+ xml:
+ path: /foo/bar.xml
+ xpath: /business/website
+ children: []
+
+# In case of namespaces, like in below XML, they have to be explicitly stated.
+#
+# <foo xmlns="http://x.test" xmlns:attr="http://z.test">
+# <bar>
+# <baz xmlns="http://y.test" attr:my_namespaced_attribute="true" />
+# </bar>
+# </foo>
+
+# NOTE: There is the prefix 'x' in front of the 'bar' element, too.
+- name: Set namespaced '/x:foo/x:bar/y:baz/@z:my_namespaced_attribute' to 'false'
+ xml:
+ path: foo.xml
+ xpath: /x:foo/x:bar/y:baz
+ namespaces:
+ x: http://x.test
+ y: http://y.test
+ z: http://z.test
+ attribute: z:my_namespaced_attribute
+ value: 'false'
+'''
+
+RETURN = r'''
+actions:
+ description: A dictionary with the original xpath, namespaces and state.
+ type: dict
+ returned: success
+ sample: {xpath: xpath, namespaces: [namespace1, namespace2], state=present}
+backup_file:
+ description: The name of the backup file that was created
+ type: str
+ returned: when backup=yes
+ sample: /path/to/file.xml.1942.2017-08-24@14:16:01~
+count:
+ description: The count of xpath matches.
+ type: int
+ returned: when parameter 'count' is set
+ sample: 2
+matches:
+ description: The xpath matches found.
+ type: list
+ returned: when parameter 'print_match' is set
+msg:
+ description: A message related to the performed action(s).
+ type: str
+ returned: always
+xmlstring:
+ description: An XML string of the resulting output.
+ type: str
+ returned: when parameter 'xmlstring' is set
+'''
+
+import copy
+import json
+import os
+import re
+import traceback
+
+from distutils.version import LooseVersion
+from io import BytesIO
+
+LXML_IMP_ERR = None
+try:
+ from lxml import etree, objectify
+ HAS_LXML = True
+except ImportError:
+ LXML_IMP_ERR = traceback.format_exc()
+ HAS_LXML = False
+
+from ansible.module_utils.basic import AnsibleModule, json_dict_bytes_to_unicode, missing_required_lib
+from ansible.module_utils.six import iteritems, string_types
+from ansible.module_utils._text import to_bytes, to_native
+from ansible.module_utils.common._collections_compat import MutableMapping
+
+_IDENT = r"[a-zA-Z-][a-zA-Z0-9_\-\.]*"
+_NSIDENT = _IDENT + "|" + _IDENT + ":" + _IDENT
+# Note: we can't reasonably support the 'if you need to put both ' and " in a string, concatenate
+# strings wrapped by the other delimiter' XPath trick, especially as simple XPath.
+_XPSTR = "('(?:.*)'|\"(?:.*)\")"
+
+_RE_SPLITSIMPLELAST = re.compile("^(.*)/(" + _NSIDENT + ")$")
+_RE_SPLITSIMPLELASTEQVALUE = re.compile("^(.*)/(" + _NSIDENT + ")/text\\(\\)=" + _XPSTR + "$")
+_RE_SPLITSIMPLEATTRLAST = re.compile("^(.*)/(@(?:" + _NSIDENT + "))$")
+_RE_SPLITSIMPLEATTRLASTEQVALUE = re.compile("^(.*)/(@(?:" + _NSIDENT + "))=" + _XPSTR + "$")
+_RE_SPLITSUBLAST = re.compile("^(.*)/(" + _NSIDENT + ")\\[(.*)\\]$")
+_RE_SPLITONLYEQVALUE = re.compile("^(.*)/text\\(\\)=" + _XPSTR + "$")
+
+
+def has_changed(doc):
+ orig_obj = etree.tostring(objectify.fromstring(etree.tostring(orig_doc)))
+ obj = etree.tostring(objectify.fromstring(etree.tostring(doc)))
+ return (orig_obj != obj)
+
+
+def do_print_match(module, tree, xpath, namespaces):
+ match = tree.xpath(xpath, namespaces=namespaces)
+ match_xpaths = []
+ for m in match:
+ match_xpaths.append(tree.getpath(m))
+ match_str = json.dumps(match_xpaths)
+ msg = "selector '%s' match: %s" % (xpath, match_str)
+ finish(module, tree, xpath, namespaces, changed=False, msg=msg)
+
+
+def count_nodes(module, tree, xpath, namespaces):
+ """ Return the count of nodes matching the xpath """
+ hits = tree.xpath("count(/%s)" % xpath, namespaces=namespaces)
+ msg = "found %d nodes" % hits
+ finish(module, tree, xpath, namespaces, changed=False, msg=msg, hitcount=int(hits))
+
+
+def is_node(tree, xpath, namespaces):
+ """ Test if a given xpath matches anything and if that match is a node.
+
+ For now we just assume you're only searching for one specific thing."""
+ if xpath_matches(tree, xpath, namespaces):
+ # OK, it found something
+ match = tree.xpath(xpath, namespaces=namespaces)
+ if isinstance(match[0], etree._Element):
+ return True
+
+ return False
+
+
+def is_attribute(tree, xpath, namespaces):
+ """ Test if a given xpath matches and that match is an attribute
+
+ An xpath attribute search will only match one item"""
+ if xpath_matches(tree, xpath, namespaces):
+ match = tree.xpath(xpath, namespaces=namespaces)
+ if isinstance(match[0], etree._ElementStringResult):
+ return True
+ elif isinstance(match[0], etree._ElementUnicodeResult):
+ return True
+ return False
+
+
+def xpath_matches(tree, xpath, namespaces):
+ """ Test if a node exists """
+ if tree.xpath(xpath, namespaces=namespaces):
+ return True
+ return False
+
+
+def delete_xpath_target(module, tree, xpath, namespaces):
+ """ Delete an attribute or element from a tree """
+ try:
+ for result in tree.xpath(xpath, namespaces=namespaces):
+ # Get the xpath for this result
+ if is_attribute(tree, xpath, namespaces):
+ # Delete an attribute
+ parent = result.getparent()
+ # Pop this attribute match out of the parent
+ # node's 'attrib' dict by using this match's
+ # 'attrname' attribute for the key
+ parent.attrib.pop(result.attrname)
+ elif is_node(tree, xpath, namespaces):
+ # Delete an element
+ result.getparent().remove(result)
+ else:
+ raise Exception("Impossible error")
+ except Exception as e:
+ module.fail_json(msg="Couldn't delete xpath target: %s (%s)" % (xpath, e))
+ else:
+ finish(module, tree, xpath, namespaces, changed=True)
+
+
+def replace_children_of(children, match):
+ for element in list(match):
+ match.remove(element)
+ match.extend(children)
+
+
+def set_target_children_inner(module, tree, xpath, namespaces, children, in_type):
+ matches = tree.xpath(xpath, namespaces=namespaces)
+
+ # Create a list of our new children
+ children = children_to_nodes(module, children, in_type)
+ children_as_string = [etree.tostring(c) for c in children]
+
+ changed = False
+
+ # xpaths always return matches as a list, so....
+ for match in matches:
+ # Check if elements differ
+ if len(list(match)) == len(children):
+ for idx, element in enumerate(list(match)):
+ if etree.tostring(element) != children_as_string[idx]:
+ replace_children_of(children, match)
+ changed = True
+ break
+ else:
+ replace_children_of(children, match)
+ changed = True
+
+ return changed
+
+
+def set_target_children(module, tree, xpath, namespaces, children, in_type):
+ changed = set_target_children_inner(module, tree, xpath, namespaces, children, in_type)
+ # Write it out
+ finish(module, tree, xpath, namespaces, changed=changed)
+
+
+def add_target_children(module, tree, xpath, namespaces, children, in_type, insertbefore, insertafter):
+ if is_node(tree, xpath, namespaces):
+ new_kids = children_to_nodes(module, children, in_type)
+ if insertbefore or insertafter:
+ insert_target_children(tree, xpath, namespaces, new_kids, insertbefore, insertafter)
+ else:
+ for node in tree.xpath(xpath, namespaces=namespaces):
+ node.extend(new_kids)
+ finish(module, tree, xpath, namespaces, changed=True)
+ else:
+ finish(module, tree, xpath, namespaces)
+
+
+def insert_target_children(tree, xpath, namespaces, children, insertbefore, insertafter):
+ """
+ Insert the given children before or after the given xpath. If insertbefore is True, it is inserted before the
+ first xpath hit, with insertafter, it is inserted after the last xpath hit.
+ """
+ insert_target = tree.xpath(xpath, namespaces=namespaces)
+ loc_index = 0 if insertbefore else -1
+ index_in_parent = insert_target[loc_index].getparent().index(insert_target[loc_index])
+ parent = insert_target[0].getparent()
+ if insertafter:
+ index_in_parent += 1
+ for child in children:
+ parent.insert(index_in_parent, child)
+ index_in_parent += 1
+
+
+def _extract_xpstr(g):
+ return g[1:-1]
+
+
+def split_xpath_last(xpath):
+ """split an XPath of the form /foo/bar/baz into /foo/bar and baz"""
+ xpath = xpath.strip()
+ m = _RE_SPLITSIMPLELAST.match(xpath)
+ if m:
+ # requesting an element to exist
+ return (m.group(1), [(m.group(2), None)])
+ m = _RE_SPLITSIMPLELASTEQVALUE.match(xpath)
+ if m:
+ # requesting an element to exist with an inner text
+ return (m.group(1), [(m.group(2), _extract_xpstr(m.group(3)))])
+
+ m = _RE_SPLITSIMPLEATTRLAST.match(xpath)
+ if m:
+ # requesting an attribute to exist
+ return (m.group(1), [(m.group(2), None)])
+ m = _RE_SPLITSIMPLEATTRLASTEQVALUE.match(xpath)
+ if m:
+ # requesting an attribute to exist with a value
+ return (m.group(1), [(m.group(2), _extract_xpstr(m.group(3)))])
+
+ m = _RE_SPLITSUBLAST.match(xpath)
+ if m:
+ content = [x.strip() for x in m.group(3).split(" and ")]
+ return (m.group(1), [('/' + m.group(2), content)])
+
+ m = _RE_SPLITONLYEQVALUE.match(xpath)
+ if m:
+ # requesting a change of inner text
+ return (m.group(1), [("", _extract_xpstr(m.group(2)))])
+ return (xpath, [])
+
+
+def nsnameToClark(name, namespaces):
+ if ":" in name:
+ (nsname, rawname) = name.split(":")
+ # return "{{%s}}%s" % (namespaces[nsname], rawname)
+ return "{{{0}}}{1}".format(namespaces[nsname], rawname)
+
+ # no namespace name here
+ return name
+
+
+def check_or_make_target(module, tree, xpath, namespaces):
+ (inner_xpath, changes) = split_xpath_last(xpath)
+ if (inner_xpath == xpath) or (changes is None):
+ module.fail_json(msg="Can't process Xpath %s in order to spawn nodes! tree is %s" %
+ (xpath, etree.tostring(tree, pretty_print=True)))
+ return False
+
+ changed = False
+
+ if not is_node(tree, inner_xpath, namespaces):
+ changed = check_or_make_target(module, tree, inner_xpath, namespaces)
+
+ # we test again after calling check_or_make_target
+ if is_node(tree, inner_xpath, namespaces) and changes:
+ for (eoa, eoa_value) in changes:
+ if eoa and eoa[0] != '@' and eoa[0] != '/':
+ # implicitly creating an element
+ new_kids = children_to_nodes(module, [nsnameToClark(eoa, namespaces)], "yaml")
+ if eoa_value:
+ for nk in new_kids:
+ nk.text = eoa_value
+
+ for node in tree.xpath(inner_xpath, namespaces=namespaces):
+ node.extend(new_kids)
+ changed = True
+ # module.fail_json(msg="now tree=%s" % etree.tostring(tree, pretty_print=True))
+ elif eoa and eoa[0] == '/':
+ element = eoa[1:]
+ new_kids = children_to_nodes(module, [nsnameToClark(element, namespaces)], "yaml")
+ for node in tree.xpath(inner_xpath, namespaces=namespaces):
+ node.extend(new_kids)
+ for nk in new_kids:
+ for subexpr in eoa_value:
+ # module.fail_json(msg="element=%s subexpr=%s node=%s now tree=%s" %
+ # (element, subexpr, etree.tostring(node, pretty_print=True), etree.tostring(tree, pretty_print=True))
+ check_or_make_target(module, nk, "./" + subexpr, namespaces)
+ changed = True
+
+ # module.fail_json(msg="now tree=%s" % etree.tostring(tree, pretty_print=True))
+ elif eoa == "":
+ for node in tree.xpath(inner_xpath, namespaces=namespaces):
+ if (node.text != eoa_value):
+ node.text = eoa_value
+ changed = True
+
+ elif eoa and eoa[0] == '@':
+ attribute = nsnameToClark(eoa[1:], namespaces)
+
+ for element in tree.xpath(inner_xpath, namespaces=namespaces):
+ changing = (attribute not in element.attrib or element.attrib[attribute] != eoa_value)
+
+ if changing:
+ changed = changed or changing
+ if eoa_value is None:
+ value = ""
+ else:
+ value = eoa_value
+ element.attrib[attribute] = value
+
+ # module.fail_json(msg="arf %s changing=%s as curval=%s changed tree=%s" %
+ # (xpath, changing, etree.tostring(tree, changing, element[attribute], pretty_print=True)))
+
+ else:
+ module.fail_json(msg="unknown tree transformation=%s" % etree.tostring(tree, pretty_print=True))
+
+ return changed
+
+
+def ensure_xpath_exists(module, tree, xpath, namespaces):
+ changed = False
+
+ if not is_node(tree, xpath, namespaces):
+ changed = check_or_make_target(module, tree, xpath, namespaces)
+
+ finish(module, tree, xpath, namespaces, changed)
+
+
+def set_target_inner(module, tree, xpath, namespaces, attribute, value):
+ changed = False
+
+ try:
+ if not is_node(tree, xpath, namespaces):
+ changed = check_or_make_target(module, tree, xpath, namespaces)
+ except Exception as e:
+ missing_namespace = ""
+ # NOTE: This checks only the namespaces defined in root element!
+ # TODO: Implement a more robust check to check for child namespaces' existence
+ if tree.getroot().nsmap and ":" not in xpath:
+ missing_namespace = "XML document has namespace(s) defined, but no namespace prefix(es) used in xpath!\n"
+ module.fail_json(msg="%sXpath %s causes a failure: %s\n -- tree is %s" %
+ (missing_namespace, xpath, e, etree.tostring(tree, pretty_print=True)), exception=traceback.format_exc())
+
+ if not is_node(tree, xpath, namespaces):
+ module.fail_json(msg="Xpath %s does not reference a node! tree is %s" %
+ (xpath, etree.tostring(tree, pretty_print=True)))
+
+ for element in tree.xpath(xpath, namespaces=namespaces):
+ if not attribute:
+ changed = changed or (element.text != value)
+ if element.text != value:
+ element.text = value
+ else:
+ changed = changed or (element.get(attribute) != value)
+ if ":" in attribute:
+ attr_ns, attr_name = attribute.split(":")
+ # attribute = "{{%s}}%s" % (namespaces[attr_ns], attr_name)
+ attribute = "{{{0}}}{1}".format(namespaces[attr_ns], attr_name)
+ if element.get(attribute) != value:
+ element.set(attribute, value)
+
+ return changed
+
+
+def set_target(module, tree, xpath, namespaces, attribute, value):
+ changed = set_target_inner(module, tree, xpath, namespaces, attribute, value)
+ finish(module, tree, xpath, namespaces, changed)
+
+
+def get_element_text(module, tree, xpath, namespaces):
+ if not is_node(tree, xpath, namespaces):
+ module.fail_json(msg="Xpath %s does not reference a node!" % xpath)
+
+ elements = []
+ for element in tree.xpath(xpath, namespaces=namespaces):
+ elements.append({element.tag: element.text})
+
+ finish(module, tree, xpath, namespaces, changed=False, msg=len(elements), hitcount=len(elements), matches=elements)
+
+
+def get_element_attr(module, tree, xpath, namespaces):
+ if not is_node(tree, xpath, namespaces):
+ module.fail_json(msg="Xpath %s does not reference a node!" % xpath)
+
+ elements = []
+ for element in tree.xpath(xpath, namespaces=namespaces):
+ child = {}
+ for key in element.keys():
+ value = element.get(key)
+ child.update({key: value})
+ elements.append({element.tag: child})
+
+ finish(module, tree, xpath, namespaces, changed=False, msg=len(elements), hitcount=len(elements), matches=elements)
+
+
+def child_to_element(module, child, in_type):
+ if in_type == 'xml':
+ infile = BytesIO(to_bytes(child, errors='surrogate_or_strict'))
+
+ try:
+ parser = etree.XMLParser()
+ node = etree.parse(infile, parser)
+ return node.getroot()
+ except etree.XMLSyntaxError as e:
+ module.fail_json(msg="Error while parsing child element: %s" % e)
+ elif in_type == 'yaml':
+ if isinstance(child, string_types):
+ return etree.Element(child)
+ elif isinstance(child, MutableMapping):
+ if len(child) > 1:
+ module.fail_json(msg="Can only create children from hashes with one key")
+
+ (key, value) = next(iteritems(child))
+ if isinstance(value, MutableMapping):
+ children = value.pop('_', None)
+
+ node = etree.Element(key, value)
+
+ if children is not None:
+ if not isinstance(children, list):
+ module.fail_json(msg="Invalid children type: %s, must be list." % type(children))
+
+ subnodes = children_to_nodes(module, children)
+ node.extend(subnodes)
+ else:
+ node = etree.Element(key)
+ node.text = value
+ return node
+ else:
+ module.fail_json(msg="Invalid child type: %s. Children must be either strings or hashes." % type(child))
+ else:
+ module.fail_json(msg="Invalid child input type: %s. Type must be either xml or yaml." % in_type)
+
+
+def children_to_nodes(module=None, children=None, type='yaml'):
+ """turn a str/hash/list of str&hash into a list of elements"""
+ children = [] if children is None else children
+
+ return [child_to_element(module, child, type) for child in children]
+
+
+def make_pretty(module, tree):
+ xml_string = etree.tostring(tree, xml_declaration=True, encoding='UTF-8', pretty_print=module.params['pretty_print'])
+
+ result = dict(
+ changed=False,
+ )
+
+ if module.params['path']:
+ xml_file = module.params['path']
+ with open(xml_file, 'rb') as xml_content:
+ if xml_string != xml_content.read():
+ result['changed'] = True
+ if not module.check_mode:
+ if module.params['backup']:
+ result['backup_file'] = module.backup_local(module.params['path'])
+ tree.write(xml_file, xml_declaration=True, encoding='UTF-8', pretty_print=module.params['pretty_print'])
+
+ elif module.params['xmlstring']:
+ result['xmlstring'] = xml_string
+ # NOTE: Modifying a string is not considered a change !
+ if xml_string != module.params['xmlstring']:
+ result['changed'] = True
+
+ module.exit_json(**result)
+
+
+def finish(module, tree, xpath, namespaces, changed=False, msg='', hitcount=0, matches=tuple()):
+
+ result = dict(
+ actions=dict(
+ xpath=xpath,
+ namespaces=namespaces,
+ state=module.params['state']
+ ),
+ changed=has_changed(tree),
+ )
+
+ if module.params['count'] or hitcount:
+ result['count'] = hitcount
+
+ if module.params['print_match'] or matches:
+ result['matches'] = matches
+
+ if msg:
+ result['msg'] = msg
+
+ if result['changed']:
+ if module._diff:
+ result['diff'] = dict(
+ before=etree.tostring(orig_doc, xml_declaration=True, encoding='UTF-8', pretty_print=True),
+ after=etree.tostring(tree, xml_declaration=True, encoding='UTF-8', pretty_print=True),
+ )
+
+ if module.params['path'] and not module.check_mode:
+ if module.params['backup']:
+ result['backup_file'] = module.backup_local(module.params['path'])
+ tree.write(module.params['path'], xml_declaration=True, encoding='UTF-8', pretty_print=module.params['pretty_print'])
+
+ if module.params['xmlstring']:
+ result['xmlstring'] = etree.tostring(tree, xml_declaration=True, encoding='UTF-8', pretty_print=module.params['pretty_print'])
+
+ module.exit_json(**result)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ path=dict(type='path', aliases=['dest', 'file']),
+ xmlstring=dict(type='str'),
+ xpath=dict(type='str'),
+ namespaces=dict(type='dict', default={}),
+ state=dict(type='str', default='present', choices=['absent', 'present'], aliases=['ensure']),
+ value=dict(type='raw'),
+ attribute=dict(type='raw'),
+ add_children=dict(type='list'),
+ set_children=dict(type='list'),
+ count=dict(type='bool', default=False),
+ print_match=dict(type='bool', default=False),
+ pretty_print=dict(type='bool', default=False),
+ content=dict(type='str', choices=['attribute', 'text']),
+ input_type=dict(type='str', default='yaml', choices=['xml', 'yaml']),
+ backup=dict(type='bool', default=False),
+ strip_cdata_tags=dict(type='bool', default=False),
+ insertbefore=dict(type='bool', default=False),
+ insertafter=dict(type='bool', default=False),
+ ),
+ supports_check_mode=True,
+ required_by=dict(
+ add_children=['xpath'],
+ # TODO: Reinstate this in Ansible v2.12 when we have deprecated the incorrect use below
+ # attribute=['value'],
+ content=['xpath'],
+ set_children=['xpath'],
+ value=['xpath'],
+ ),
+ required_if=[
+ ['count', True, ['xpath']],
+ ['print_match', True, ['xpath']],
+ ['insertbefore', True, ['xpath']],
+ ['insertafter', True, ['xpath']],
+ ],
+ required_one_of=[
+ ['path', 'xmlstring'],
+ ['add_children', 'content', 'count', 'pretty_print', 'print_match', 'set_children', 'value'],
+ ],
+ mutually_exclusive=[
+ ['add_children', 'content', 'count', 'print_match', 'set_children', 'value'],
+ ['path', 'xmlstring'],
+ ['insertbefore', 'insertafter'],
+ ],
+ )
+
+ xml_file = module.params['path']
+ xml_string = module.params['xmlstring']
+ xpath = module.params['xpath']
+ namespaces = module.params['namespaces']
+ state = module.params['state']
+ value = json_dict_bytes_to_unicode(module.params['value'])
+ attribute = module.params['attribute']
+ set_children = json_dict_bytes_to_unicode(module.params['set_children'])
+ add_children = json_dict_bytes_to_unicode(module.params['add_children'])
+ pretty_print = module.params['pretty_print']
+ content = module.params['content']
+ input_type = module.params['input_type']
+ print_match = module.params['print_match']
+ count = module.params['count']
+ backup = module.params['backup']
+ strip_cdata_tags = module.params['strip_cdata_tags']
+ insertbefore = module.params['insertbefore']
+ insertafter = module.params['insertafter']
+
+ # Check if we have lxml 2.3.0 or newer installed
+ if not HAS_LXML:
+ module.fail_json(msg=missing_required_lib("lxml"), exception=LXML_IMP_ERR)
+ elif LooseVersion('.'.join(to_native(f) for f in etree.LXML_VERSION)) < LooseVersion('2.3.0'):
+ module.fail_json(msg='The xml ansible module requires lxml 2.3.0 or newer installed on the managed machine')
+ elif LooseVersion('.'.join(to_native(f) for f in etree.LXML_VERSION)) < LooseVersion('3.0.0'):
+ module.warn('Using lxml version lower than 3.0.0 does not guarantee predictable element attribute order.')
+
+ # Report wrongly used attribute parameter when using content=attribute
+ # TODO: Remove this in Ansible v2.12 (and reinstate strict parameter test above) and remove the integration test example
+ if content == 'attribute' and attribute is not None:
+ module.deprecate("Parameter 'attribute=%s' is ignored when using 'content=attribute' only 'xpath' is used. Please remove entry." % attribute, '2.12')
+
+ # Check if the file exists
+ if xml_string:
+ infile = BytesIO(to_bytes(xml_string, errors='surrogate_or_strict'))
+ elif os.path.isfile(xml_file):
+ infile = open(xml_file, 'rb')
+ else:
+ module.fail_json(msg="The target XML source '%s' does not exist." % xml_file)
+
+ # Parse and evaluate xpath expression
+ if xpath is not None:
+ try:
+ etree.XPath(xpath)
+ except etree.XPathSyntaxError as e:
+ module.fail_json(msg="Syntax error in xpath expression: %s (%s)" % (xpath, e))
+ except etree.XPathEvalError as e:
+ module.fail_json(msg="Evaluation error in xpath expression: %s (%s)" % (xpath, e))
+
+ # Try to parse in the target XML file
+ try:
+ parser = etree.XMLParser(remove_blank_text=pretty_print, strip_cdata=strip_cdata_tags)
+ doc = etree.parse(infile, parser)
+ except etree.XMLSyntaxError as e:
+ module.fail_json(msg="Error while parsing document: %s (%s)" % (xml_file or 'xml_string', e))
+
+ # Ensure we have the original copy to compare
+ global orig_doc
+ orig_doc = copy.deepcopy(doc)
+
+ if print_match:
+ do_print_match(module, doc, xpath, namespaces)
+
+ if count:
+ count_nodes(module, doc, xpath, namespaces)
+
+ if content == 'attribute':
+ get_element_attr(module, doc, xpath, namespaces)
+ elif content == 'text':
+ get_element_text(module, doc, xpath, namespaces)
+
+ # File exists:
+ if state == 'absent':
+ # - absent: delete xpath target
+ delete_xpath_target(module, doc, xpath, namespaces)
+
+ # - present: carry on
+
+ # children && value both set?: should have already aborted by now
+ # add_children && set_children both set?: should have already aborted by now
+
+ # set_children set?
+ if set_children:
+ set_target_children(module, doc, xpath, namespaces, set_children, input_type)
+
+ # add_children set?
+ if add_children:
+ add_target_children(module, doc, xpath, namespaces, add_children, input_type, insertbefore, insertafter)
+
+ # No?: Carry on
+
+ # Is the xpath target an attribute selector?
+ if value is not None:
+ set_target(module, doc, xpath, namespaces, attribute, value)
+
+ # If an xpath was provided, we need to do something with the data
+ if xpath is not None:
+ ensure_xpath_exists(module, doc, xpath, namespaces)
+
+ # Otherwise only reformat the xml data?
+ if pretty_print:
+ make_pretty(module, doc)
+
+ module.fail_json(msg="Don't know what to do")
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/zabbix_host.py b/test/support/integration/plugins/modules/zabbix_host.py
new file mode 100644
index 0000000000..a2954a1fe0
--- /dev/null
+++ b/test/support/integration/plugins/modules/zabbix_host.py
@@ -0,0 +1,1075 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013-2014, Epic Games, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = r'''
+---
+module: zabbix_host
+short_description: Create/update/delete Zabbix hosts
+description:
+ - This module allows you to create, modify and delete Zabbix host entries and associated group and template data.
+version_added: "2.0"
+author:
+ - "Cove (@cove)"
+ - Tony Minfei Ding (!UNKNOWN)
+ - Harrison Gu (@harrisongu)
+ - Werner Dijkerman (@dj-wasabi)
+ - Eike Frost (@eikef)
+requirements:
+ - "python >= 2.6"
+ - "zabbix-api >= 0.5.4"
+options:
+ host_name:
+ description:
+ - Name of the host in Zabbix.
+ - I(host_name) is the unique identifier used and cannot be updated using this module.
+ required: true
+ type: str
+ visible_name:
+ description:
+ - Visible name of the host in Zabbix.
+ version_added: '2.3'
+ type: str
+ description:
+ description:
+ - Description of the host in Zabbix.
+ version_added: '2.5'
+ type: str
+ host_groups:
+ description:
+ - List of host groups the host is part of.
+ type: list
+ elements: str
+ link_templates:
+ description:
+ - List of templates linked to the host.
+ type: list
+ elements: str
+ inventory_mode:
+ description:
+ - Configure the inventory mode.
+ choices: ['automatic', 'manual', 'disabled']
+ version_added: '2.1'
+ type: str
+ inventory_zabbix:
+ description:
+ - Add Facts for a zabbix inventory (e.g. Tag) (see example below).
+ - Please review the interface documentation for more information on the supported properties
+ - U(https://www.zabbix.com/documentation/3.2/manual/api/reference/host/object#host_inventory)
+ version_added: '2.5'
+ type: dict
+ status:
+ description:
+ - Monitoring status of the host.
+ choices: ['enabled', 'disabled']
+ default: 'enabled'
+ type: str
+ state:
+ description:
+ - State of the host.
+ - On C(present), it will create if host does not exist or update the host if the associated data is different.
+ - On C(absent) will remove a host if it exists.
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+ proxy:
+ description:
+ - The name of the Zabbix proxy to be used.
+ type: str
+ interfaces:
+ type: list
+ elements: dict
+ description:
+ - List of interfaces to be created for the host (see example below).
+ - For more information, review host interface documentation at
+ - U(https://www.zabbix.com/documentation/4.0/manual/api/reference/hostinterface/object)
+ suboptions:
+ type:
+ description:
+ - Interface type to add
+ - Numerical values are also accepted for interface type
+ - 1 = agent
+ - 2 = snmp
+ - 3 = ipmi
+ - 4 = jmx
+ choices: ['agent', 'snmp', 'ipmi', 'jmx']
+ required: true
+ main:
+ type: int
+ description:
+ - Whether the interface is used as default.
+ - If multiple interfaces with the same type are provided, only one can be default.
+ - 0 (not default), 1 (default)
+ default: 0
+ choices: [0, 1]
+ useip:
+ type: int
+ description:
+ - Connect to host interface with IP address instead of DNS name.
+ - 0 (don't use ip), 1 (use ip)
+ default: 0
+ choices: [0, 1]
+ ip:
+ type: str
+ description:
+ - IP address used by host interface.
+ - Required if I(useip=1).
+ default: ''
+ dns:
+ type: str
+ description:
+ - DNS name of the host interface.
+ - Required if I(useip=0).
+ default: ''
+ port:
+ type: str
+ description:
+ - Port used by host interface.
+ - If not specified, default port for each type of interface is used
+ - 10050 if I(type='agent')
+ - 161 if I(type='snmp')
+ - 623 if I(type='ipmi')
+ - 12345 if I(type='jmx')
+ bulk:
+ type: int
+ description:
+ - Whether to use bulk SNMP requests.
+ - 0 (don't use bulk requests), 1 (use bulk requests)
+ choices: [0, 1]
+ default: 1
+ default: []
+ tls_connect:
+ description:
+ - Specifies what encryption to use for outgoing connections.
+ - Possible values, 1 (no encryption), 2 (PSK), 4 (certificate).
+ - Works only with >= Zabbix 3.0
+ default: 1
+ version_added: '2.5'
+ type: int
+ tls_accept:
+ description:
+ - Specifies what types of connections are allowed for incoming connections.
+ - The tls_accept parameter accepts values of 1 to 7
+ - Possible values, 1 (no encryption), 2 (PSK), 4 (certificate).
+ - Values can be combined.
+ - Works only with >= Zabbix 3.0
+ default: 1
+ version_added: '2.5'
+ type: int
+ tls_psk_identity:
+ description:
+ - It is a unique name by which this specific PSK is referred to by Zabbix components
+ - Do not put sensitive information in the PSK identity string, it is transmitted over the network unencrypted.
+ - Works only with >= Zabbix 3.0
+ version_added: '2.5'
+ type: str
+ tls_psk:
+ description:
+ - PSK value is a hard to guess string of hexadecimal digits.
+ - The preshared key, at least 32 hex digits. Required if either I(tls_connect) or I(tls_accept) has PSK enabled.
+ - Works only with >= Zabbix 3.0
+ version_added: '2.5'
+ type: str
+ ca_cert:
+ description:
+ - Required certificate issuer.
+ - Works only with >= Zabbix 3.0
+ version_added: '2.5'
+ aliases: [ tls_issuer ]
+ type: str
+ tls_subject:
+ description:
+ - Required certificate subject.
+ - Works only with >= Zabbix 3.0
+ version_added: '2.5'
+ type: str
+ ipmi_authtype:
+ description:
+ - IPMI authentication algorithm.
+ - Please review the Host object documentation for more information on the supported properties
+ - 'https://www.zabbix.com/documentation/3.4/manual/api/reference/host/object'
+ - Possible values are, C(0) (none), C(1) (MD2), C(2) (MD5), C(4) (straight), C(5) (OEM), C(6) (RMCP+),
+ with -1 being the API default.
+ - Please note that the Zabbix API will treat absent settings as default when updating
+ any of the I(ipmi_)-options; this means that if you attempt to set any of the four
+ options individually, the rest will be reset to default values.
+ version_added: '2.5'
+ type: int
+ ipmi_privilege:
+ description:
+ - IPMI privilege level.
+ - Please review the Host object documentation for more information on the supported properties
+ - 'https://www.zabbix.com/documentation/3.4/manual/api/reference/host/object'
+ - Possible values are C(1) (callback), C(2) (user), C(3) (operator), C(4) (admin), C(5) (OEM), with C(2)
+ being the API default.
+ - also see the last note in the I(ipmi_authtype) documentation
+ version_added: '2.5'
+ type: int
+ ipmi_username:
+ description:
+ - IPMI username.
+ - also see the last note in the I(ipmi_authtype) documentation
+ version_added: '2.5'
+ type: str
+ ipmi_password:
+ description:
+ - IPMI password.
+ - also see the last note in the I(ipmi_authtype) documentation
+ version_added: '2.5'
+ type: str
+ force:
+ description:
+ - Overwrite the host configuration, even if already present.
+ type: bool
+ default: 'yes'
+ version_added: '2.0'
+ macros:
+ description:
+ - List of user macros to assign to the zabbix host.
+ - Providing I(macros=[]) with I(force=yes) will clean all of the existing user macros from the host.
+ type: list
+ elements: dict
+ version_added: '2.10'
+ suboptions:
+ macro:
+ description:
+ - Name of the user macro.
+ - Can be in zabbix native format "{$MACRO}" or short format "MACRO".
+ type: str
+ required: true
+ value:
+ description:
+ - Value of the user macro.
+ type: str
+ required: true
+ description:
+ description:
+ - Description of the user macro.
+ - Works only with >= Zabbix 4.4.
+ type: str
+ required: false
+ default: ''
+ aliases: [ user_macros ]
+ tags:
+ description:
+ - List of host tags to assign to the zabbix host.
+ - Works only with >= Zabbix 4.2.
+ - Providing I(tags=[]) with I(force=yes) will clean all of the tags from the host.
+ type: list
+ elements: dict
+ version_added: '2.10'
+ suboptions:
+ tag:
+ description:
+ - Name of the host tag.
+ type: str
+ required: true
+ value:
+ description:
+ - Value of the host tag.
+ type: str
+ default: ''
+ aliases: [ host_tags ]
+
+extends_documentation_fragment:
+ - zabbix
+'''
+
+EXAMPLES = r'''
+- name: Create a new host or update an existing host's info
+ local_action:
+ module: zabbix_host
+ server_url: http://monitor.example.com
+ login_user: username
+ login_password: password
+ host_name: ExampleHost
+ visible_name: ExampleName
+ description: My ExampleHost Description
+ host_groups:
+ - Example group1
+ - Example group2
+ link_templates:
+ - Example template1
+ - Example template2
+ status: enabled
+ state: present
+ inventory_mode: manual
+ inventory_zabbix:
+ tag: "{{ your_tag }}"
+ alias: "{{ your_alias }}"
+ notes: "Special Informations: {{ your_informations | default('None') }}"
+ location: "{{ your_location }}"
+ site_rack: "{{ your_site_rack }}"
+ os: "{{ your_os }}"
+ hardware: "{{ your_hardware }}"
+ ipmi_authtype: 2
+ ipmi_privilege: 4
+ ipmi_username: username
+ ipmi_password: password
+ interfaces:
+ - type: 1
+ main: 1
+ useip: 1
+ ip: 10.xx.xx.xx
+ dns: ""
+ port: "10050"
+ - type: 4
+ main: 1
+ useip: 1
+ ip: 10.xx.xx.xx
+ dns: ""
+ port: "12345"
+ proxy: a.zabbix.proxy
+ macros:
+ - macro: '{$EXAMPLEMACRO}'
+ value: ExampleMacroValue
+ - macro: EXAMPLEMACRO2
+ value: ExampleMacroValue2
+ description: Example desc that work only with Zabbix 4.4 and higher
+ tags:
+ - tag: ExampleHostsTag
+ - tag: ExampleHostsTag2
+ value: ExampleTagValue
+
+- name: Update an existing host's TLS settings
+ local_action:
+ module: zabbix_host
+ server_url: http://monitor.example.com
+ login_user: username
+ login_password: password
+ host_name: ExampleHost
+ visible_name: ExampleName
+ host_groups:
+ - Example group1
+ tls_psk_identity: test
+ tls_connect: 2
+ tls_psk: 123456789abcdef123456789abcdef12
+'''
+
+
+import atexit
+import copy
+import traceback
+
+try:
+ from zabbix_api import ZabbixAPI
+ HAS_ZABBIX_API = True
+except ImportError:
+ ZBX_IMP_ERR = traceback.format_exc()
+ HAS_ZABBIX_API = False
+
+from distutils.version import LooseVersion
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+class Host(object):
+ def __init__(self, module, zbx):
+ self._module = module
+ self._zapi = zbx
+ self._zbx_api_version = zbx.api_version()[:5]
+
+ # exist host
+ def is_host_exist(self, host_name):
+ result = self._zapi.host.get({'filter': {'host': host_name}})
+ return result
+
+ # check if host group exists
+ def check_host_group_exist(self, group_names):
+ for group_name in group_names:
+ result = self._zapi.hostgroup.get({'filter': {'name': group_name}})
+ if not result:
+ self._module.fail_json(msg="Hostgroup not found: %s" % group_name)
+ return True
+
+ def get_template_ids(self, template_list):
+ template_ids = []
+ if template_list is None or len(template_list) == 0:
+ return template_ids
+ for template in template_list:
+ template_list = self._zapi.template.get({'output': 'extend', 'filter': {'host': template}})
+ if len(template_list) < 1:
+ self._module.fail_json(msg="Template not found: %s" % template)
+ else:
+ template_id = template_list[0]['templateid']
+ template_ids.append(template_id)
+ return template_ids
+
+ def add_host(self, host_name, group_ids, status, interfaces, proxy_id, visible_name, description, tls_connect,
+ tls_accept, tls_psk_identity, tls_psk, tls_issuer, tls_subject, ipmi_authtype, ipmi_privilege,
+ ipmi_username, ipmi_password, macros, tags):
+ try:
+ if self._module.check_mode:
+ self._module.exit_json(changed=True)
+ parameters = {'host': host_name, 'interfaces': interfaces, 'groups': group_ids, 'status': status,
+ 'tls_connect': tls_connect, 'tls_accept': tls_accept}
+ if proxy_id:
+ parameters['proxy_hostid'] = proxy_id
+ if visible_name:
+ parameters['name'] = visible_name
+ if tls_psk_identity is not None:
+ parameters['tls_psk_identity'] = tls_psk_identity
+ if tls_psk is not None:
+ parameters['tls_psk'] = tls_psk
+ if tls_issuer is not None:
+ parameters['tls_issuer'] = tls_issuer
+ if tls_subject is not None:
+ parameters['tls_subject'] = tls_subject
+ if description:
+ parameters['description'] = description
+ if ipmi_authtype is not None:
+ parameters['ipmi_authtype'] = ipmi_authtype
+ if ipmi_privilege is not None:
+ parameters['ipmi_privilege'] = ipmi_privilege
+ if ipmi_username is not None:
+ parameters['ipmi_username'] = ipmi_username
+ if ipmi_password is not None:
+ parameters['ipmi_password'] = ipmi_password
+ if macros is not None:
+ parameters['macros'] = macros
+ if tags is not None:
+ parameters['tags'] = tags
+
+ host_list = self._zapi.host.create(parameters)
+ if len(host_list) >= 1:
+ return host_list['hostids'][0]
+ except Exception as e:
+ self._module.fail_json(msg="Failed to create host %s: %s" % (host_name, e))
+
+ def update_host(self, host_name, group_ids, status, host_id, interfaces, exist_interface_list, proxy_id,
+ visible_name, description, tls_connect, tls_accept, tls_psk_identity, tls_psk, tls_issuer,
+ tls_subject, ipmi_authtype, ipmi_privilege, ipmi_username, ipmi_password, macros, tags):
+ try:
+ if self._module.check_mode:
+ self._module.exit_json(changed=True)
+ parameters = {'hostid': host_id, 'groups': group_ids, 'status': status, 'tls_connect': tls_connect,
+ 'tls_accept': tls_accept}
+ if proxy_id >= 0:
+ parameters['proxy_hostid'] = proxy_id
+ if visible_name:
+ parameters['name'] = visible_name
+ if tls_psk_identity:
+ parameters['tls_psk_identity'] = tls_psk_identity
+ if tls_psk:
+ parameters['tls_psk'] = tls_psk
+ if tls_issuer:
+ parameters['tls_issuer'] = tls_issuer
+ if tls_subject:
+ parameters['tls_subject'] = tls_subject
+ if description:
+ parameters['description'] = description
+ if ipmi_authtype:
+ parameters['ipmi_authtype'] = ipmi_authtype
+ if ipmi_privilege:
+ parameters['ipmi_privilege'] = ipmi_privilege
+ if ipmi_username:
+ parameters['ipmi_username'] = ipmi_username
+ if ipmi_password:
+ parameters['ipmi_password'] = ipmi_password
+ if macros is not None:
+ parameters['macros'] = macros
+ if tags is not None:
+ parameters['tags'] = tags
+
+ self._zapi.host.update(parameters)
+ interface_list_copy = exist_interface_list
+ if interfaces:
+ for interface in interfaces:
+ flag = False
+ interface_str = interface
+ for exist_interface in exist_interface_list:
+ interface_type = int(interface['type'])
+ exist_interface_type = int(exist_interface['type'])
+ if interface_type == exist_interface_type:
+ # update
+ interface_str['interfaceid'] = exist_interface['interfaceid']
+ self._zapi.hostinterface.update(interface_str)
+ flag = True
+ interface_list_copy.remove(exist_interface)
+ break
+ if not flag:
+ # add
+ interface_str['hostid'] = host_id
+ self._zapi.hostinterface.create(interface_str)
+ # remove
+ remove_interface_ids = []
+ for remove_interface in interface_list_copy:
+ interface_id = remove_interface['interfaceid']
+ remove_interface_ids.append(interface_id)
+ if len(remove_interface_ids) > 0:
+ self._zapi.hostinterface.delete(remove_interface_ids)
+ except Exception as e:
+ self._module.fail_json(msg="Failed to update host %s: %s" % (host_name, e))
+
+ def delete_host(self, host_id, host_name):
+ try:
+ if self._module.check_mode:
+ self._module.exit_json(changed=True)
+ self._zapi.host.delete([host_id])
+ except Exception as e:
+ self._module.fail_json(msg="Failed to delete host %s: %s" % (host_name, e))
+
+ # get host by host name
+ def get_host_by_host_name(self, host_name):
+ params = {
+ 'output': 'extend',
+ 'selectInventory': 'extend',
+ 'selectMacros': 'extend',
+ 'filter': {
+ 'host': [host_name]
+ }
+ }
+
+ if LooseVersion(self._zbx_api_version) >= LooseVersion('4.2.0'):
+ params.update({'selectTags': 'extend'})
+
+ host_list = self._zapi.host.get(params)
+ if len(host_list) < 1:
+ self._module.fail_json(msg="Host not found: %s" % host_name)
+ else:
+ return host_list[0]
+
+ # get proxyid by proxy name
+ def get_proxyid_by_proxy_name(self, proxy_name):
+ proxy_list = self._zapi.proxy.get({'output': 'extend', 'filter': {'host': [proxy_name]}})
+ if len(proxy_list) < 1:
+ self._module.fail_json(msg="Proxy not found: %s" % proxy_name)
+ else:
+ return int(proxy_list[0]['proxyid'])
+
+ # get group ids by group names
+ def get_group_ids_by_group_names(self, group_names):
+ if self.check_host_group_exist(group_names):
+ return self._zapi.hostgroup.get({'output': 'groupid', 'filter': {'name': group_names}})
+
+ # get host groups ids by host id
+ def get_group_ids_by_host_id(self, host_id):
+ return self._zapi.hostgroup.get({'output': 'groupid', 'hostids': host_id})
+
+ # get host templates by host id
+ def get_host_templates_by_host_id(self, host_id):
+ template_ids = []
+ template_list = self._zapi.template.get({'output': 'extend', 'hostids': host_id})
+ for template in template_list:
+ template_ids.append(template['templateid'])
+ return template_ids
+
+ # check the exist_interfaces whether it equals the interfaces or not
+ def check_interface_properties(self, exist_interface_list, interfaces):
+ interfaces_port_list = []
+
+ if interfaces is not None:
+ if len(interfaces) >= 1:
+ for interface in interfaces:
+ interfaces_port_list.append(str(interface['port']))
+
+ exist_interface_ports = []
+ if len(exist_interface_list) >= 1:
+ for exist_interface in exist_interface_list:
+ exist_interface_ports.append(str(exist_interface['port']))
+
+ if set(interfaces_port_list) != set(exist_interface_ports):
+ return True
+
+ for exist_interface in exist_interface_list:
+ exit_interface_port = str(exist_interface['port'])
+ for interface in interfaces:
+ interface_port = str(interface['port'])
+ if interface_port == exit_interface_port:
+ for key in interface.keys():
+ if str(exist_interface[key]) != str(interface[key]):
+ return True
+
+ return False
+
+ # get the status of host by host
+ def get_host_status_by_host(self, host):
+ return host['status']
+
+ # check all the properties before link or clear template
+ def check_all_properties(self, host_id, group_ids, status, interfaces, template_ids,
+ exist_interfaces, host, proxy_id, visible_name, description, host_name,
+ inventory_mode, inventory_zabbix, tls_accept, tls_psk_identity, tls_psk,
+ tls_issuer, tls_subject, tls_connect, ipmi_authtype, ipmi_privilege,
+ ipmi_username, ipmi_password, macros, tags):
+ # get the existing host's groups
+ exist_host_groups = sorted(self.get_group_ids_by_host_id(host_id), key=lambda k: k['groupid'])
+ if sorted(group_ids, key=lambda k: k['groupid']) != exist_host_groups:
+ return True
+
+ # get the existing status
+ exist_status = self.get_host_status_by_host(host)
+ if int(status) != int(exist_status):
+ return True
+
+ # check the exist_interfaces whether it equals the interfaces or not
+ if self.check_interface_properties(exist_interfaces, interfaces):
+ return True
+
+ # get the existing templates
+ exist_template_ids = self.get_host_templates_by_host_id(host_id)
+ if set(list(template_ids)) != set(exist_template_ids):
+ return True
+
+ if int(host['proxy_hostid']) != int(proxy_id):
+ return True
+
+ # Check whether the visible_name has changed; Zabbix defaults to the technical hostname if not set.
+ if visible_name:
+ if host['name'] != visible_name:
+ return True
+
+ # Only compare description if it is given as a module parameter
+ if description:
+ if host['description'] != description:
+ return True
+
+ if inventory_mode:
+ if LooseVersion(self._zbx_api_version) <= LooseVersion('4.4.0'):
+ if host['inventory']:
+ if int(host['inventory']['inventory_mode']) != self.inventory_mode_numeric(inventory_mode):
+ return True
+ elif inventory_mode != 'disabled':
+ return True
+ else:
+ if int(host['inventory_mode']) != self.inventory_mode_numeric(inventory_mode):
+ return True
+
+ if inventory_zabbix:
+ proposed_inventory = copy.deepcopy(host['inventory'])
+ proposed_inventory.update(inventory_zabbix)
+ if proposed_inventory != host['inventory']:
+ return True
+
+ if tls_accept is not None and 'tls_accept' in host:
+ if int(host['tls_accept']) != tls_accept:
+ return True
+
+ if tls_psk_identity is not None and 'tls_psk_identity' in host:
+ if host['tls_psk_identity'] != tls_psk_identity:
+ return True
+
+ if tls_psk is not None and 'tls_psk' in host:
+ if host['tls_psk'] != tls_psk:
+ return True
+
+ if tls_issuer is not None and 'tls_issuer' in host:
+ if host['tls_issuer'] != tls_issuer:
+ return True
+
+ if tls_subject is not None and 'tls_subject' in host:
+ if host['tls_subject'] != tls_subject:
+ return True
+
+ if tls_connect is not None and 'tls_connect' in host:
+ if int(host['tls_connect']) != tls_connect:
+ return True
+ if ipmi_authtype is not None:
+ if int(host['ipmi_authtype']) != ipmi_authtype:
+ return True
+ if ipmi_privilege is not None:
+ if int(host['ipmi_privilege']) != ipmi_privilege:
+ return True
+ if ipmi_username is not None:
+ if host['ipmi_username'] != ipmi_username:
+ return True
+ if ipmi_password is not None:
+ if host['ipmi_password'] != ipmi_password:
+ return True
+
+ # hostmacroid and hostid are present in every item of host['macros'] and need to be removed
+ if macros is not None and 'macros' in host:
+ existing_macros = sorted(host['macros'], key=lambda k: k['macro'])
+ for macro in existing_macros:
+ macro.pop('hostid', False)
+ macro.pop('hostmacroid', False)
+
+ if sorted(macros, key=lambda k: k['macro']) != existing_macros:
+ return True
+
+ if tags is not None and 'tags' in host:
+ if sorted(tags, key=lambda k: k['tag']) != sorted(host['tags'], key=lambda k: k['tag']):
+ return True
+
+ return False
+
+ # link or clear template of the host
+ def link_or_clear_template(self, host_id, template_id_list, tls_connect, tls_accept, tls_psk_identity, tls_psk,
+ tls_issuer, tls_subject, ipmi_authtype, ipmi_privilege, ipmi_username, ipmi_password):
+ # get host's exist template ids
+ exist_template_id_list = self.get_host_templates_by_host_id(host_id)
+
+ exist_template_ids = set(exist_template_id_list)
+ template_ids = set(template_id_list)
+ template_id_list = list(template_ids)
+
+ # get unlink and clear templates
+ templates_clear = exist_template_ids.difference(template_ids)
+ templates_clear_list = list(templates_clear)
+ request_str = {'hostid': host_id, 'templates': template_id_list, 'templates_clear': templates_clear_list,
+ 'tls_connect': tls_connect, 'tls_accept': tls_accept, 'ipmi_authtype': ipmi_authtype,
+ 'ipmi_privilege': ipmi_privilege, 'ipmi_username': ipmi_username, 'ipmi_password': ipmi_password}
+ if tls_psk_identity is not None:
+ request_str['tls_psk_identity'] = tls_psk_identity
+ if tls_psk is not None:
+ request_str['tls_psk'] = tls_psk
+ if tls_issuer is not None:
+ request_str['tls_issuer'] = tls_issuer
+ if tls_subject is not None:
+ request_str['tls_subject'] = tls_subject
+ try:
+ if self._module.check_mode:
+ self._module.exit_json(changed=True)
+ self._zapi.host.update(request_str)
+ except Exception as e:
+ self._module.fail_json(msg="Failed to link template to host: %s" % e)
+
+ def inventory_mode_numeric(self, inventory_mode):
+ if inventory_mode == "automatic":
+ return int(1)
+ elif inventory_mode == "manual":
+ return int(0)
+ elif inventory_mode == "disabled":
+ return int(-1)
+ return inventory_mode
+
+ # Update the host inventory_mode
+ def update_inventory_mode(self, host_id, inventory_mode):
+
+ # nothing was set, do nothing
+ if not inventory_mode:
+ return
+
+ inventory_mode = self.inventory_mode_numeric(inventory_mode)
+
+ # watch for - https://support.zabbix.com/browse/ZBX-6033
+ request_str = {'hostid': host_id, 'inventory_mode': inventory_mode}
+ try:
+ if self._module.check_mode:
+ self._module.exit_json(changed=True)
+ self._zapi.host.update(request_str)
+ except Exception as e:
+ self._module.fail_json(msg="Failed to set inventory_mode to host: %s" % e)
+
+ def update_inventory_zabbix(self, host_id, inventory):
+
+ if not inventory:
+ return
+
+ request_str = {'hostid': host_id, 'inventory': inventory}
+ try:
+ if self._module.check_mode:
+ self._module.exit_json(changed=True)
+ self._zapi.host.update(request_str)
+ except Exception as e:
+ self._module.fail_json(msg="Failed to set inventory to host: %s" % e)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ server_url=dict(type='str', required=True, aliases=['url']),
+ login_user=dict(type='str', required=True),
+ login_password=dict(type='str', required=True, no_log=True),
+ host_name=dict(type='str', required=True),
+ http_login_user=dict(type='str', required=False, default=None),
+ http_login_password=dict(type='str', required=False, default=None, no_log=True),
+ validate_certs=dict(type='bool', required=False, default=True),
+ host_groups=dict(type='list', required=False),
+ link_templates=dict(type='list', required=False),
+ status=dict(type='str', default="enabled", choices=['enabled', 'disabled']),
+ state=dict(type='str', default="present", choices=['present', 'absent']),
+ inventory_mode=dict(type='str', required=False, choices=['automatic', 'manual', 'disabled']),
+ ipmi_authtype=dict(type='int', default=None),
+ ipmi_privilege=dict(type='int', default=None),
+ ipmi_username=dict(type='str', required=False, default=None),
+ ipmi_password=dict(type='str', required=False, default=None, no_log=True),
+ tls_connect=dict(type='int', default=1),
+ tls_accept=dict(type='int', default=1),
+ tls_psk_identity=dict(type='str', required=False),
+ tls_psk=dict(type='str', required=False),
+ ca_cert=dict(type='str', required=False, aliases=['tls_issuer']),
+ tls_subject=dict(type='str', required=False),
+ inventory_zabbix=dict(type='dict', required=False),
+ timeout=dict(type='int', default=10),
+ interfaces=dict(type='list', required=False),
+ force=dict(type='bool', default=True),
+ proxy=dict(type='str', required=False),
+ visible_name=dict(type='str', required=False),
+ description=dict(type='str', required=False),
+ macros=dict(
+ type='list',
+ elements='dict',
+ aliases=['user_macros'],
+ options=dict(
+ macro=dict(type='str', required=True),
+ value=dict(type='str', required=True),
+ description=dict(type='str', required=False, default='')
+ )
+ ),
+ tags=dict(
+ type='list',
+ elements='dict',
+ aliases=['host_tags'],
+ options=dict(
+ tag=dict(type='str', required=True),
+ value=dict(type='str', default='')
+ )
+ )
+ ),
+ supports_check_mode=True
+ )
+
+ if not HAS_ZABBIX_API:
+ module.fail_json(msg=missing_required_lib('zabbix-api', url='https://pypi.org/project/zabbix-api/'), exception=ZBX_IMP_ERR)
+
+ server_url = module.params['server_url']
+ login_user = module.params['login_user']
+ login_password = module.params['login_password']
+ http_login_user = module.params['http_login_user']
+ http_login_password = module.params['http_login_password']
+ validate_certs = module.params['validate_certs']
+ host_name = module.params['host_name']
+ visible_name = module.params['visible_name']
+ description = module.params['description']
+ host_groups = module.params['host_groups']
+ link_templates = module.params['link_templates']
+ inventory_mode = module.params['inventory_mode']
+ ipmi_authtype = module.params['ipmi_authtype']
+ ipmi_privilege = module.params['ipmi_privilege']
+ ipmi_username = module.params['ipmi_username']
+ ipmi_password = module.params['ipmi_password']
+ tls_connect = module.params['tls_connect']
+ tls_accept = module.params['tls_accept']
+ tls_psk_identity = module.params['tls_psk_identity']
+ tls_psk = module.params['tls_psk']
+ tls_issuer = module.params['ca_cert']
+ tls_subject = module.params['tls_subject']
+ inventory_zabbix = module.params['inventory_zabbix']
+ status = module.params['status']
+ state = module.params['state']
+ timeout = module.params['timeout']
+ interfaces = module.params['interfaces']
+ force = module.params['force']
+ proxy = module.params['proxy']
+ macros = module.params['macros']
+ tags = module.params['tags']
+
+ # convert enabled to 0; disabled to 1
+ status = 1 if status == "disabled" else 0
+
+ zbx = None
+ # login to zabbix
+ try:
+ zbx = ZabbixAPI(server_url, timeout=timeout, user=http_login_user, passwd=http_login_password,
+ validate_certs=validate_certs)
+ zbx.login(login_user, login_password)
+ atexit.register(zbx.logout)
+ except Exception as e:
+ module.fail_json(msg="Failed to connect to Zabbix server: %s" % e)
+
+ host = Host(module, zbx)
+
+ template_ids = []
+ if link_templates:
+ template_ids = host.get_template_ids(link_templates)
+
+ group_ids = []
+
+ if host_groups:
+ group_ids = host.get_group_ids_by_group_names(host_groups)
+
+ ip = ""
+ if interfaces:
+ # ensure interfaces are well-formed
+ for interface in interfaces:
+ if 'type' not in interface:
+ module.fail_json(msg="(interface) type needs to be specified for interface '%s'." % interface)
+ interfacetypes = {'agent': 1, 'snmp': 2, 'ipmi': 3, 'jmx': 4}
+ if interface['type'] in interfacetypes.keys():
+ interface['type'] = interfacetypes[interface['type']]
+ if interface['type'] < 1 or interface['type'] > 4:
+ module.fail_json(msg="Interface type can only be 1-4 for interface '%s'." % interface)
+ if 'useip' not in interface:
+ interface['useip'] = 0
+ if 'dns' not in interface:
+ if interface['useip'] == 0:
+ module.fail_json(msg="dns needs to be set if useip is 0 on interface '%s'." % interface)
+ interface['dns'] = ''
+ if 'ip' not in interface:
+ if interface['useip'] == 1:
+ module.fail_json(msg="ip needs to be set if useip is 1 on interface '%s'." % interface)
+ interface['ip'] = ''
+ if 'main' not in interface:
+ interface['main'] = 0
+ if 'port' in interface and not isinstance(interface['port'], str):
+ try:
+ interface['port'] = str(interface['port'])
+ except ValueError:
+ module.fail_json(msg="port should be convertable to string on interface '%s'." % interface)
+ if 'port' not in interface:
+ if interface['type'] == 1:
+ interface['port'] = "10050"
+ elif interface['type'] == 2:
+ interface['port'] = "161"
+ elif interface['type'] == 3:
+ interface['port'] = "623"
+ elif interface['type'] == 4:
+ interface['port'] = "12345"
+
+ if interface['type'] == 1:
+ ip = interface['ip']
+
+ if macros:
+ # convert macros to zabbix native format - {$MACRO}
+ for macro in macros:
+ macro['macro'] = macro['macro'].upper()
+ if not macro['macro'].startswith('{$'):
+ macro['macro'] = '{$' + macro['macro']
+ if not macro['macro'].endswith('}'):
+ macro['macro'] = macro['macro'] + '}'
+ if LooseVersion(zbx.api_version()[:5]) <= LooseVersion('4.4.0'):
+ if 'description' in macro:
+ macro.pop('description', False)
+
+ # Use proxy specified, or set to 0
+ if proxy:
+ proxy_id = host.get_proxyid_by_proxy_name(proxy)
+ else:
+ proxy_id = 0
+
+ # check if host exist
+ is_host_exist = host.is_host_exist(host_name)
+
+ if is_host_exist:
+ # get host id by host name
+ zabbix_host_obj = host.get_host_by_host_name(host_name)
+ host_id = zabbix_host_obj['hostid']
+
+ # If proxy is not specified as a module parameter, use the existing setting
+ if proxy is None:
+ proxy_id = int(zabbix_host_obj['proxy_hostid'])
+
+ if state == "absent":
+ # remove host
+ host.delete_host(host_id, host_name)
+ module.exit_json(changed=True, result="Successfully delete host %s" % host_name)
+ else:
+ if not host_groups:
+ # if host_groups have not been specified when updating an existing host, just
+ # get the group_ids from the existing host without updating them.
+ group_ids = host.get_group_ids_by_host_id(host_id)
+
+ # get existing host's interfaces
+ exist_interfaces = host._zapi.hostinterface.get({'output': 'extend', 'hostids': host_id})
+
+ # if no interfaces were specified with the module, start with an empty list
+ if not interfaces:
+ interfaces = []
+
+ # When force=no is specified, append existing interfaces to interfaces to update. When
+ # no interfaces have been specified, copy existing interfaces as specified from the API.
+ # Do the same with templates and host groups.
+ if not force or not interfaces:
+ for interface in copy.deepcopy(exist_interfaces):
+ # remove values not used during hostinterface.add/update calls
+ for key in tuple(interface.keys()):
+ if key in ['interfaceid', 'hostid', 'bulk']:
+ interface.pop(key, None)
+
+ for index in interface.keys():
+ if index in ['useip', 'main', 'type']:
+ interface[index] = int(interface[index])
+
+ if interface not in interfaces:
+ interfaces.append(interface)
+
+ if not force or link_templates is None:
+ template_ids = list(set(template_ids + host.get_host_templates_by_host_id(host_id)))
+
+ if not force:
+ for group_id in host.get_group_ids_by_host_id(host_id):
+ if group_id not in group_ids:
+ group_ids.append(group_id)
+
+ # Macros not present in host.update will be removed if we dont copy them when force=no
+ if macros is not None and 'macros' in zabbix_host_obj.keys():
+ provided_macros = [m['macro'] for m in macros]
+ existing_macros = zabbix_host_obj['macros']
+ for macro in existing_macros:
+ if macro['macro'] not in provided_macros:
+ macros.append(macro)
+
+ # Tags not present in host.update will be removed if we dont copy them when force=no
+ if tags is not None and 'tags' in zabbix_host_obj.keys():
+ provided_tags = [t['tag'] for t in tags]
+ existing_tags = zabbix_host_obj['tags']
+ for tag in existing_tags:
+ if tag['tag'] not in provided_tags:
+ tags.append(tag)
+
+ # update host
+ if host.check_all_properties(
+ host_id, group_ids, status, interfaces, template_ids, exist_interfaces, zabbix_host_obj, proxy_id,
+ visible_name, description, host_name, inventory_mode, inventory_zabbix, tls_accept,
+ tls_psk_identity, tls_psk, tls_issuer, tls_subject, tls_connect, ipmi_authtype, ipmi_privilege,
+ ipmi_username, ipmi_password, macros, tags):
+
+ host.update_host(
+ host_name, group_ids, status, host_id, interfaces, exist_interfaces, proxy_id, visible_name,
+ description, tls_connect, tls_accept, tls_psk_identity, tls_psk, tls_issuer, tls_subject,
+ ipmi_authtype, ipmi_privilege, ipmi_username, ipmi_password, macros, tags)
+
+ host.link_or_clear_template(
+ host_id, template_ids, tls_connect, tls_accept, tls_psk_identity, tls_psk, tls_issuer,
+ tls_subject, ipmi_authtype, ipmi_privilege, ipmi_username, ipmi_password)
+
+ host.update_inventory_mode(host_id, inventory_mode)
+ host.update_inventory_zabbix(host_id, inventory_zabbix)
+
+ module.exit_json(changed=True,
+ result="Successfully update host %s (%s) and linked with template '%s'"
+ % (host_name, ip, link_templates))
+ else:
+ module.exit_json(changed=False)
+
+ else:
+ if state == "absent":
+ # the host is already deleted.
+ module.exit_json(changed=False)
+
+ if not group_ids:
+ module.fail_json(msg="Specify at least one group for creating host '%s'." % host_name)
+
+ if not interfaces or (interfaces and len(interfaces) == 0):
+ module.fail_json(msg="Specify at least one interface for creating host '%s'." % host_name)
+
+ # create host
+ host_id = host.add_host(
+ host_name, group_ids, status, interfaces, proxy_id, visible_name, description, tls_connect, tls_accept,
+ tls_psk_identity, tls_psk, tls_issuer, tls_subject, ipmi_authtype, ipmi_privilege, ipmi_username,
+ ipmi_password, macros, tags)
+
+ host.link_or_clear_template(
+ host_id, template_ids, tls_connect, tls_accept, tls_psk_identity, tls_psk, tls_issuer, tls_subject,
+ ipmi_authtype, ipmi_privilege, ipmi_username, ipmi_password)
+
+ host.update_inventory_mode(host_id, inventory_mode)
+ host.update_inventory_zabbix(host_id, inventory_zabbix)
+
+ module.exit_json(changed=True, result="Successfully added host %s (%s) and linked with template '%s'" % (
+ host_name, ip, link_templates))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/zabbix_proxy.py b/test/support/integration/plugins/modules/zabbix_proxy.py
new file mode 100644
index 0000000000..9643129304
--- /dev/null
+++ b/test/support/integration/plugins/modules/zabbix_proxy.py
@@ -0,0 +1,472 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# (c) 2017, Alen Komic
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = r'''
+---
+module: zabbix_proxy
+short_description: Create/delete/get/update Zabbix proxies
+description:
+ - This module allows you to create, modify, get and delete Zabbix proxy entries.
+version_added: "2.5"
+author:
+ - "Alen Komic (@akomic)"
+requirements:
+ - "python >= 2.6"
+ - "zabbix-api >= 0.5.4"
+options:
+ proxy_name:
+ description:
+ - Name of the proxy in Zabbix.
+ required: true
+ type: str
+ proxy_address:
+ description:
+ - Comma-delimited list of IP/CIDR addresses or DNS names to accept active proxy requests from.
+ - Requires I(status=active).
+ - Works only with >= Zabbix 4.0. ( remove option for <= 4.0 )
+ required: false
+ version_added: '2.10'
+ type: str
+ description:
+ description:
+ - Description of the proxy.
+ required: false
+ type: str
+ status:
+ description:
+ - Type of proxy. (4 - active, 5 - passive)
+ required: false
+ choices: ['active', 'passive']
+ default: "active"
+ type: str
+ tls_connect:
+ description:
+ - Connections to proxy.
+ required: false
+ choices: ['no_encryption','PSK','certificate']
+ default: 'no_encryption'
+ type: str
+ tls_accept:
+ description:
+ - Connections from proxy.
+ required: false
+ choices: ['no_encryption','PSK','certificate']
+ default: 'no_encryption'
+ type: str
+ ca_cert:
+ description:
+ - Certificate issuer.
+ required: false
+ aliases: [ tls_issuer ]
+ type: str
+ tls_subject:
+ description:
+ - Certificate subject.
+ required: false
+ type: str
+ tls_psk_identity:
+ description:
+ - PSK identity. Required if either I(tls_connect) or I(tls_accept) has PSK enabled.
+ required: false
+ type: str
+ tls_psk:
+ description:
+ - The preshared key, at least 32 hex digits. Required if either I(tls_connect) or I(tls_accept) has PSK enabled.
+ required: false
+ type: str
+ state:
+ description:
+ - State of the proxy.
+ - On C(present), it will create if proxy does not exist or update the proxy if the associated data is different.
+ - On C(absent) will remove a proxy if it exists.
+ required: false
+ choices: ['present', 'absent']
+ default: "present"
+ type: str
+ interface:
+ description:
+ - Dictionary with params for the interface when proxy is in passive mode.
+ - For more information, review proxy interface documentation at
+ - U(https://www.zabbix.com/documentation/4.0/manual/api/reference/proxy/object#proxy_interface).
+ required: false
+ suboptions:
+ useip:
+ type: int
+ description:
+ - Connect to proxy interface with IP address instead of DNS name.
+ - 0 (don't use ip), 1 (use ip).
+ default: 0
+ choices: [0, 1]
+ ip:
+ type: str
+ description:
+ - IP address used by proxy interface.
+ - Required if I(useip=1).
+ default: ''
+ dns:
+ type: str
+ description:
+ - DNS name of the proxy interface.
+ - Required if I(useip=0).
+ default: ''
+ port:
+ type: str
+ description:
+ - Port used by proxy interface.
+ default: '10051'
+ type:
+ type: int
+ description:
+ - Interface type to add.
+ - This suboption is currently ignored for Zabbix proxy.
+ - This suboption is deprecated since Ansible 2.10 and will eventually be removed in 2.14.
+ required: false
+ default: 0
+ main:
+ type: int
+ description:
+ - Whether the interface is used as default.
+ - This suboption is currently ignored for Zabbix proxy.
+ - This suboption is deprecated since Ansible 2.10 and will eventually be removed in 2.14.
+ required: false
+ default: 0
+ default: {}
+ type: dict
+
+extends_documentation_fragment:
+ - zabbix
+'''
+
+EXAMPLES = r'''
+- name: Create or update a proxy with proxy type active
+ local_action:
+ module: zabbix_proxy
+ server_url: http://monitor.example.com
+ login_user: username
+ login_password: password
+ proxy_name: ExampleProxy
+ description: ExampleProxy
+ status: active
+ state: present
+ proxy_address: ExampleProxy.local
+
+- name: Create a new passive proxy using only it's IP
+ local_action:
+ module: zabbix_proxy
+ server_url: http://monitor.example.com
+ login_user: username
+ login_password: password
+ proxy_name: ExampleProxy
+ description: ExampleProxy
+ status: passive
+ state: present
+ interface:
+ useip: 1
+ ip: 10.1.1.2
+ port: 10051
+
+- name: Create a new passive proxy using only it's DNS
+ local_action:
+ module: zabbix_proxy
+ server_url: http://monitor.example.com
+ login_user: username
+ login_password: password
+ proxy_name: ExampleProxy
+ description: ExampleProxy
+ status: passive
+ state: present
+ interface:
+ dns: proxy.example.com
+ port: 10051
+'''
+
+RETURN = r''' # '''
+
+
+import traceback
+import atexit
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+try:
+ from zabbix_api import ZabbixAPI
+
+ HAS_ZABBIX_API = True
+except ImportError:
+ ZBX_IMP_ERR = traceback.format_exc()
+ HAS_ZABBIX_API = False
+
+
+class Proxy(object):
+ def __init__(self, module, zbx):
+ self._module = module
+ self._zapi = zbx
+ self.existing_data = None
+
+ def proxy_exists(self, proxy_name):
+ result = self._zapi.proxy.get({
+ 'output': 'extend', 'selectInterface': 'extend',
+ 'filter': {'host': proxy_name}})
+
+ if len(result) > 0 and 'proxyid' in result[0]:
+ self.existing_data = result[0]
+ return result[0]['proxyid']
+ else:
+ return result
+
+ def add_proxy(self, data):
+ try:
+ if self._module.check_mode:
+ self._module.exit_json(changed=True)
+
+ parameters = {}
+ for item in data:
+ if data[item]:
+ parameters[item] = data[item]
+
+ if 'proxy_address' in data and data['status'] != '5':
+ parameters.pop('proxy_address', False)
+
+ if 'interface' in data and data['status'] != '6':
+ parameters.pop('interface', False)
+
+ proxy_ids_list = self._zapi.proxy.create(parameters)
+ self._module.exit_json(changed=True,
+ result="Successfully added proxy %s (%s)" %
+ (data['host'], data['status']))
+ if len(proxy_ids_list) >= 1:
+ return proxy_ids_list['proxyids'][0]
+ except Exception as e:
+ self._module.fail_json(msg="Failed to create proxy %s: %s" %
+ (data['host'], e))
+
+ def delete_proxy(self, proxy_id, proxy_name):
+ try:
+ if self._module.check_mode:
+ self._module.exit_json(changed=True)
+ self._zapi.proxy.delete([proxy_id])
+ self._module.exit_json(changed=True,
+ result="Successfully deleted"
+ + " proxy %s" % proxy_name)
+ except Exception as e:
+ self._module.fail_json(msg="Failed to delete proxy %s: %s" %
+ (proxy_name, str(e)))
+
+ def compile_interface_params(self, new_interface):
+ old_interface = {}
+ if 'interface' in self.existing_data and \
+ len(self.existing_data['interface']) > 0:
+ old_interface = self.existing_data['interface']
+
+ for item in ['type', 'main']:
+ new_interface.pop(item, False)
+
+ final_interface = old_interface.copy()
+ final_interface.update(new_interface)
+ final_interface = dict((k, str(v)) for k, v in final_interface.items())
+
+ if final_interface != old_interface:
+ return final_interface
+ else:
+ return {}
+
+ def update_proxy(self, proxy_id, data):
+ try:
+ if self._module.check_mode:
+ self._module.exit_json(changed=True)
+ parameters = {'proxyid': proxy_id}
+
+ for item in data:
+ if data[item] and item in self.existing_data and \
+ self.existing_data[item] != data[item]:
+ parameters[item] = data[item]
+
+ if 'interface' in parameters:
+ parameters.pop('interface')
+
+ if 'proxy_address' in data and data['status'] != '5':
+ parameters.pop('proxy_address', False)
+
+ if 'interface' in data and data['status'] != '6':
+ parameters.pop('interface', False)
+
+ if 'interface' in data and data['status'] == '6':
+ new_interface = self.compile_interface_params(data['interface'])
+ if len(new_interface) > 0:
+ parameters['interface'] = new_interface
+
+ if len(parameters) > 1:
+ self._zapi.proxy.update(parameters)
+ self._module.exit_json(
+ changed=True,
+ result="Successfully updated proxy %s (%s)" %
+ (data['host'], proxy_id)
+ )
+ else:
+ self._module.exit_json(changed=False)
+ except Exception as e:
+ self._module.fail_json(msg="Failed to update proxy %s: %s" %
+ (data['host'], e))
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ server_url=dict(type='str', required=True, aliases=['url']),
+ login_user=dict(type='str', required=True),
+ login_password=dict(type='str', required=True, no_log=True),
+ proxy_name=dict(type='str', required=True),
+ proxy_address=dict(type='str', required=False),
+ http_login_user=dict(type='str', required=False, default=None),
+ http_login_password=dict(type='str', required=False,
+ default=None, no_log=True),
+ validate_certs=dict(type='bool', required=False, default=True),
+ status=dict(type='str', default="active", choices=['active', 'passive']),
+ state=dict(type='str', default="present", choices=['present', 'absent']),
+ description=dict(type='str', required=False),
+ tls_connect=dict(type='str', default='no_encryption',
+ choices=['no_encryption', 'PSK', 'certificate']),
+ tls_accept=dict(type='str', default='no_encryption',
+ choices=['no_encryption', 'PSK', 'certificate']),
+ ca_cert=dict(type='str', required=False, default=None, aliases=['tls_issuer']),
+ tls_subject=dict(type='str', required=False, default=None),
+ tls_psk_identity=dict(type='str', required=False, default=None),
+ tls_psk=dict(type='str', required=False, default=None),
+ timeout=dict(type='int', default=10),
+ interface=dict(
+ type='dict',
+ required=False,
+ default={},
+ options=dict(
+ useip=dict(type='int', choices=[0, 1], default=0),
+ ip=dict(type='str', default=''),
+ dns=dict(type='str', default=''),
+ port=dict(type='str', default='10051'),
+ type=dict(type='int', default=0, removed_in_version='2.14'),
+ main=dict(type='int', default=0, removed_in_version='2.14')
+ ),
+ )
+ ),
+ supports_check_mode=True
+ )
+
+ if not HAS_ZABBIX_API:
+ module.fail_json(msg=missing_required_lib('zabbix-api', url='https://pypi.org/project/zabbix-api/'), exception=ZBX_IMP_ERR)
+
+ server_url = module.params['server_url']
+ login_user = module.params['login_user']
+ login_password = module.params['login_password']
+ http_login_user = module.params['http_login_user']
+ http_login_password = module.params['http_login_password']
+ validate_certs = module.params['validate_certs']
+ proxy_name = module.params['proxy_name']
+ proxy_address = module.params['proxy_address']
+ description = module.params['description']
+ status = module.params['status']
+ tls_connect = module.params['tls_connect']
+ tls_accept = module.params['tls_accept']
+ tls_issuer = module.params['ca_cert']
+ tls_subject = module.params['tls_subject']
+ tls_psk_identity = module.params['tls_psk_identity']
+ tls_psk = module.params['tls_psk']
+ state = module.params['state']
+ timeout = module.params['timeout']
+ interface = module.params['interface']
+
+ # convert enabled to 0; disabled to 1
+ status = 6 if status == "passive" else 5
+
+ if tls_connect == 'certificate':
+ tls_connect = 4
+ elif tls_connect == 'PSK':
+ tls_connect = 2
+ else:
+ tls_connect = 1
+
+ if tls_accept == 'certificate':
+ tls_accept = 4
+ elif tls_accept == 'PSK':
+ tls_accept = 2
+ else:
+ tls_accept = 1
+
+ zbx = None
+ # login to zabbix
+ try:
+ zbx = ZabbixAPI(server_url, timeout=timeout,
+ user=http_login_user,
+ passwd=http_login_password,
+ validate_certs=validate_certs)
+ zbx.login(login_user, login_password)
+ atexit.register(zbx.logout)
+ except Exception as e:
+ module.fail_json(msg="Failed to connect to Zabbix server: %s" % e)
+
+ proxy = Proxy(module, zbx)
+
+ # check if proxy already exists
+ proxy_id = proxy.proxy_exists(proxy_name)
+
+ if proxy_id:
+ if state == "absent":
+ # remove proxy
+ proxy.delete_proxy(proxy_id, proxy_name)
+ else:
+ proxy.update_proxy(proxy_id, {
+ 'host': proxy_name,
+ 'description': description,
+ 'status': str(status),
+ 'tls_connect': str(tls_connect),
+ 'tls_accept': str(tls_accept),
+ 'tls_issuer': tls_issuer,
+ 'tls_subject': tls_subject,
+ 'tls_psk_identity': tls_psk_identity,
+ 'tls_psk': tls_psk,
+ 'interface': interface,
+ 'proxy_address': proxy_address
+ })
+ else:
+ if state == "absent":
+ # the proxy is already deleted.
+ module.exit_json(changed=False)
+
+ proxy_id = proxy.add_proxy(data={
+ 'host': proxy_name,
+ 'description': description,
+ 'status': str(status),
+ 'tls_connect': str(tls_connect),
+ 'tls_accept': str(tls_accept),
+ 'tls_issuer': tls_issuer,
+ 'tls_subject': tls_subject,
+ 'tls_psk_identity': tls_psk_identity,
+ 'tls_psk': tls_psk,
+ 'interface': interface,
+ 'proxy_address': proxy_address
+ })
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/zypper.py b/test/support/integration/plugins/modules/zypper.py
new file mode 100644
index 0000000000..bfb318190d
--- /dev/null
+++ b/test/support/integration/plugins/modules/zypper.py
@@ -0,0 +1,540 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Patrick Callahan <pmc@patrickcallahan.com>
+# based on
+# openbsd_pkg
+# (c) 2013
+# Patrik Lundin <patrik.lundin.swe@gmail.com>
+#
+# yum
+# (c) 2012, Red Hat, Inc
+# Written by Seth Vidal <skvidal at fedoraproject.org>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+---
+module: zypper
+author:
+ - "Patrick Callahan (@dirtyharrycallahan)"
+ - "Alexander Gubin (@alxgu)"
+ - "Thomas O'Donnell (@andytom)"
+ - "Robin Roth (@robinro)"
+ - "Andrii Radyk (@AnderEnder)"
+version_added: "1.2"
+short_description: Manage packages on SUSE and openSUSE
+description:
+ - Manage packages on SUSE and openSUSE using the zypper and rpm tools.
+options:
+ name:
+ description:
+ - Package name C(name) or package specifier or a list of either.
+ - Can include a version like C(name=1.0), C(name>3.4) or C(name<=2.7). If a version is given, C(oldpackage) is implied and zypper is allowed to
+ update the package within the version range given.
+ - You can also pass a url or a local path to a rpm file.
+ - When using state=latest, this can be '*', which updates all installed packages.
+ required: true
+ aliases: [ 'pkg' ]
+ state:
+ description:
+ - C(present) will make sure the package is installed.
+ C(latest) will make sure the latest version of the package is installed.
+ C(absent) will make sure the specified package is not installed.
+ C(dist-upgrade) will make sure the latest version of all installed packages from all enabled repositories is installed.
+ - When using C(dist-upgrade), I(name) should be C('*').
+ required: false
+ choices: [ present, latest, absent, dist-upgrade ]
+ default: "present"
+ type:
+ description:
+ - The type of package to be operated on.
+ required: false
+ choices: [ package, patch, pattern, product, srcpackage, application ]
+ default: "package"
+ version_added: "2.0"
+ extra_args_precommand:
+ version_added: "2.6"
+ required: false
+ description:
+ - Add additional global target options to C(zypper).
+ - Options should be supplied in a single line as if given in the command line.
+ disable_gpg_check:
+ description:
+ - Whether to disable to GPG signature checking of the package
+ signature being installed. Has an effect only if state is
+ I(present) or I(latest).
+ required: false
+ default: "no"
+ type: bool
+ disable_recommends:
+ version_added: "1.8"
+ description:
+ - Corresponds to the C(--no-recommends) option for I(zypper). Default behavior (C(yes)) modifies zypper's default behavior; C(no) does
+ install recommended packages.
+ required: false
+ default: "yes"
+ type: bool
+ force:
+ version_added: "2.2"
+ description:
+ - Adds C(--force) option to I(zypper). Allows to downgrade packages and change vendor or architecture.
+ required: false
+ default: "no"
+ type: bool
+ force_resolution:
+ version_added: "2.10"
+ description:
+ - Adds C(--force-resolution) option to I(zypper). Allows to (un)install packages with conflicting requirements (resolver will choose a solution).
+ required: false
+ default: "no"
+ type: bool
+ update_cache:
+ version_added: "2.2"
+ description:
+ - Run the equivalent of C(zypper refresh) before the operation. Disabled in check mode.
+ required: false
+ default: "no"
+ type: bool
+ aliases: [ "refresh" ]
+ oldpackage:
+ version_added: "2.2"
+ description:
+ - Adds C(--oldpackage) option to I(zypper). Allows to downgrade packages with less side-effects than force. This is implied as soon as a
+ version is specified as part of the package name.
+ required: false
+ default: "no"
+ type: bool
+ extra_args:
+ version_added: "2.4"
+ required: false
+ description:
+ - Add additional options to C(zypper) command.
+ - Options should be supplied in a single line as if given in the command line.
+notes:
+ - When used with a `loop:` each package will be processed individually,
+ it is much more efficient to pass the list directly to the `name` option.
+# informational: requirements for nodes
+requirements:
+ - "zypper >= 1.0 # included in openSUSE >= 11.1 or SUSE Linux Enterprise Server/Desktop >= 11.0"
+ - python-xml
+ - rpm
+'''
+
+EXAMPLES = '''
+# Install "nmap"
+- zypper:
+ name: nmap
+ state: present
+
+# Install apache2 with recommended packages
+- zypper:
+ name: apache2
+ state: present
+ disable_recommends: no
+
+# Apply a given patch
+- zypper:
+ name: openSUSE-2016-128
+ state: present
+ type: patch
+
+# Remove the "nmap" package
+- zypper:
+ name: nmap
+ state: absent
+
+# Install the nginx rpm from a remote repo
+- zypper:
+ name: 'http://nginx.org/packages/sles/12/x86_64/RPMS/nginx-1.8.0-1.sles12.ngx.x86_64.rpm'
+ state: present
+
+# Install local rpm file
+- zypper:
+ name: /tmp/fancy-software.rpm
+ state: present
+
+# Update all packages
+- zypper:
+ name: '*'
+ state: latest
+
+# Apply all available patches
+- zypper:
+ name: '*'
+ state: latest
+ type: patch
+
+# Perform a dist-upgrade with additional arguments
+- zypper:
+ name: '*'
+ state: dist-upgrade
+ extra_args: '--no-allow-vendor-change --allow-arch-change'
+
+# Refresh repositories and update package "openssl"
+- zypper:
+ name: openssl
+ state: present
+ update_cache: yes
+
+# Install specific version (possible comparisons: <, >, <=, >=, =)
+- zypper:
+ name: 'docker>=1.10'
+ state: present
+
+# Wait 20 seconds to acquire the lock before failing
+- zypper:
+ name: mosh
+ state: present
+ environment:
+ ZYPP_LOCK_TIMEOUT: 20
+'''
+
+import xml
+import re
+from xml.dom.minidom import parseString as parseXML
+from ansible.module_utils.six import iteritems
+from ansible.module_utils._text import to_native
+
+# import module snippets
+from ansible.module_utils.basic import AnsibleModule
+
+
+class Package:
+ def __init__(self, name, prefix, version):
+ self.name = name
+ self.prefix = prefix
+ self.version = version
+ self.shouldinstall = (prefix == '+')
+
+ def __str__(self):
+ return self.prefix + self.name + self.version
+
+
+def split_name_version(name):
+ """splits of the package name and desired version
+
+ example formats:
+ - docker>=1.10
+ - apache=2.4
+
+ Allowed version specifiers: <, >, <=, >=, =
+ Allowed version format: [0-9.-]*
+
+ Also allows a prefix indicating remove "-", "~" or install "+"
+ """
+
+ prefix = ''
+ if name[0] in ['-', '~', '+']:
+ prefix = name[0]
+ name = name[1:]
+ if prefix == '~':
+ prefix = '-'
+
+ version_check = re.compile('^(.*?)((?:<|>|<=|>=|=)[0-9.-]*)?$')
+ try:
+ reres = version_check.match(name)
+ name, version = reres.groups()
+ if version is None:
+ version = ''
+ return prefix, name, version
+ except Exception:
+ return prefix, name, ''
+
+
+def get_want_state(names, remove=False):
+ packages = []
+ urls = []
+ for name in names:
+ if '://' in name or name.endswith('.rpm'):
+ urls.append(name)
+ else:
+ prefix, pname, version = split_name_version(name)
+ if prefix not in ['-', '+']:
+ if remove:
+ prefix = '-'
+ else:
+ prefix = '+'
+ packages.append(Package(pname, prefix, version))
+ return packages, urls
+
+
+def get_installed_state(m, packages):
+ "get installed state of packages"
+
+ cmd = get_cmd(m, 'search')
+ cmd.extend(['--match-exact', '--details', '--installed-only'])
+ cmd.extend([p.name for p in packages])
+ return parse_zypper_xml(m, cmd, fail_not_found=False)[0]
+
+
+def parse_zypper_xml(m, cmd, fail_not_found=True, packages=None):
+ rc, stdout, stderr = m.run_command(cmd, check_rc=False)
+
+ try:
+ dom = parseXML(stdout)
+ except xml.parsers.expat.ExpatError as exc:
+ m.fail_json(msg="Failed to parse zypper xml output: %s" % to_native(exc),
+ rc=rc, stdout=stdout, stderr=stderr, cmd=cmd)
+
+ if rc == 104:
+ # exit code 104 is ZYPPER_EXIT_INF_CAP_NOT_FOUND (no packages found)
+ if fail_not_found:
+ errmsg = dom.getElementsByTagName('message')[-1].childNodes[0].data
+ m.fail_json(msg=errmsg, rc=rc, stdout=stdout, stderr=stderr, cmd=cmd)
+ else:
+ return {}, rc, stdout, stderr
+ elif rc in [0, 106, 103]:
+ # zypper exit codes
+ # 0: success
+ # 106: signature verification failed
+ # 103: zypper was upgraded, run same command again
+ if packages is None:
+ firstrun = True
+ packages = {}
+ solvable_list = dom.getElementsByTagName('solvable')
+ for solvable in solvable_list:
+ name = solvable.getAttribute('name')
+ packages[name] = {}
+ packages[name]['version'] = solvable.getAttribute('edition')
+ packages[name]['oldversion'] = solvable.getAttribute('edition-old')
+ status = solvable.getAttribute('status')
+ packages[name]['installed'] = status == "installed"
+ packages[name]['group'] = solvable.parentNode.nodeName
+ if rc == 103 and firstrun:
+ # if this was the first run and it failed with 103
+ # run zypper again with the same command to complete update
+ return parse_zypper_xml(m, cmd, fail_not_found=fail_not_found, packages=packages)
+
+ return packages, rc, stdout, stderr
+ m.fail_json(msg='Zypper run command failed with return code %s.' % rc, rc=rc, stdout=stdout, stderr=stderr, cmd=cmd)
+
+
+def get_cmd(m, subcommand):
+ "puts together the basic zypper command arguments with those passed to the module"
+ is_install = subcommand in ['install', 'update', 'patch', 'dist-upgrade']
+ is_refresh = subcommand == 'refresh'
+ cmd = ['/usr/bin/zypper', '--quiet', '--non-interactive', '--xmlout']
+ if m.params['extra_args_precommand']:
+ args_list = m.params['extra_args_precommand'].split()
+ cmd.extend(args_list)
+ # add global options before zypper command
+ if (is_install or is_refresh) and m.params['disable_gpg_check']:
+ cmd.append('--no-gpg-checks')
+
+ if subcommand == 'search':
+ cmd.append('--disable-repositories')
+
+ cmd.append(subcommand)
+ if subcommand not in ['patch', 'dist-upgrade'] and not is_refresh:
+ cmd.extend(['--type', m.params['type']])
+ if m.check_mode and subcommand != 'search':
+ cmd.append('--dry-run')
+ if is_install:
+ cmd.append('--auto-agree-with-licenses')
+ if m.params['disable_recommends']:
+ cmd.append('--no-recommends')
+ if m.params['force']:
+ cmd.append('--force')
+ if m.params['force_resolution']:
+ cmd.append('--force-resolution')
+ if m.params['oldpackage']:
+ cmd.append('--oldpackage')
+ if m.params['extra_args']:
+ args_list = m.params['extra_args'].split(' ')
+ cmd.extend(args_list)
+
+ return cmd
+
+
+def set_diff(m, retvals, result):
+ # TODO: if there is only one package, set before/after to version numbers
+ packages = {'installed': [], 'removed': [], 'upgraded': []}
+ if result:
+ for p in result:
+ group = result[p]['group']
+ if group == 'to-upgrade':
+ versions = ' (' + result[p]['oldversion'] + ' => ' + result[p]['version'] + ')'
+ packages['upgraded'].append(p + versions)
+ elif group == 'to-install':
+ packages['installed'].append(p)
+ elif group == 'to-remove':
+ packages['removed'].append(p)
+
+ output = ''
+ for state in packages:
+ if packages[state]:
+ output += state + ': ' + ', '.join(packages[state]) + '\n'
+ if 'diff' not in retvals:
+ retvals['diff'] = {}
+ if 'prepared' not in retvals['diff']:
+ retvals['diff']['prepared'] = output
+ else:
+ retvals['diff']['prepared'] += '\n' + output
+
+
+def package_present(m, name, want_latest):
+ "install and update (if want_latest) the packages in name_install, while removing the packages in name_remove"
+ retvals = {'rc': 0, 'stdout': '', 'stderr': ''}
+ packages, urls = get_want_state(name)
+
+ # add oldpackage flag when a version is given to allow downgrades
+ if any(p.version for p in packages):
+ m.params['oldpackage'] = True
+
+ if not want_latest:
+ # for state=present: filter out already installed packages
+ # if a version is given leave the package in to let zypper handle the version
+ # resolution
+ packageswithoutversion = [p for p in packages if not p.version]
+ prerun_state = get_installed_state(m, packageswithoutversion)
+ # generate lists of packages to install or remove
+ packages = [p for p in packages if p.shouldinstall != (p.name in prerun_state)]
+
+ if not packages and not urls:
+ # nothing to install/remove and nothing to update
+ return None, retvals
+
+ # zypper install also updates packages
+ cmd = get_cmd(m, 'install')
+ cmd.append('--')
+ cmd.extend(urls)
+ # pass packages to zypper
+ # allow for + or - prefixes in install/remove lists
+ # also add version specifier if given
+ # do this in one zypper run to allow for dependency-resolution
+ # for example "-exim postfix" runs without removing packages depending on mailserver
+ cmd.extend([str(p) for p in packages])
+
+ retvals['cmd'] = cmd
+ result, retvals['rc'], retvals['stdout'], retvals['stderr'] = parse_zypper_xml(m, cmd)
+
+ return result, retvals
+
+
+def package_update_all(m):
+ "run update or patch on all available packages"
+
+ retvals = {'rc': 0, 'stdout': '', 'stderr': ''}
+ if m.params['type'] == 'patch':
+ cmdname = 'patch'
+ elif m.params['state'] == 'dist-upgrade':
+ cmdname = 'dist-upgrade'
+ else:
+ cmdname = 'update'
+
+ cmd = get_cmd(m, cmdname)
+ retvals['cmd'] = cmd
+ result, retvals['rc'], retvals['stdout'], retvals['stderr'] = parse_zypper_xml(m, cmd)
+ return result, retvals
+
+
+def package_absent(m, name):
+ "remove the packages in name"
+ retvals = {'rc': 0, 'stdout': '', 'stderr': ''}
+ # Get package state
+ packages, urls = get_want_state(name, remove=True)
+ if any(p.prefix == '+' for p in packages):
+ m.fail_json(msg="Can not combine '+' prefix with state=remove/absent.")
+ if urls:
+ m.fail_json(msg="Can not remove via URL.")
+ if m.params['type'] == 'patch':
+ m.fail_json(msg="Can not remove patches.")
+ prerun_state = get_installed_state(m, packages)
+ packages = [p for p in packages if p.name in prerun_state]
+
+ if not packages:
+ return None, retvals
+
+ cmd = get_cmd(m, 'remove')
+ cmd.extend([p.name + p.version for p in packages])
+
+ retvals['cmd'] = cmd
+ result, retvals['rc'], retvals['stdout'], retvals['stderr'] = parse_zypper_xml(m, cmd)
+ return result, retvals
+
+
+def repo_refresh(m):
+ "update the repositories"
+ retvals = {'rc': 0, 'stdout': '', 'stderr': ''}
+
+ cmd = get_cmd(m, 'refresh')
+
+ retvals['cmd'] = cmd
+ result, retvals['rc'], retvals['stdout'], retvals['stderr'] = parse_zypper_xml(m, cmd)
+
+ return retvals
+
+# ===========================================
+# Main control flow
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True, aliases=['pkg'], type='list'),
+ state=dict(required=False, default='present', choices=['absent', 'installed', 'latest', 'present', 'removed', 'dist-upgrade']),
+ type=dict(required=False, default='package', choices=['package', 'patch', 'pattern', 'product', 'srcpackage', 'application']),
+ extra_args_precommand=dict(required=False, default=None),
+ disable_gpg_check=dict(required=False, default='no', type='bool'),
+ disable_recommends=dict(required=False, default='yes', type='bool'),
+ force=dict(required=False, default='no', type='bool'),
+ force_resolution=dict(required=False, default='no', type='bool'),
+ update_cache=dict(required=False, aliases=['refresh'], default='no', type='bool'),
+ oldpackage=dict(required=False, default='no', type='bool'),
+ extra_args=dict(required=False, default=None),
+ ),
+ supports_check_mode=True
+ )
+
+ name = module.params['name']
+ state = module.params['state']
+ update_cache = module.params['update_cache']
+
+ # remove empty strings from package list
+ name = list(filter(None, name))
+
+ # Refresh repositories
+ if update_cache and not module.check_mode:
+ retvals = repo_refresh(module)
+
+ if retvals['rc'] != 0:
+ module.fail_json(msg="Zypper refresh run failed.", **retvals)
+
+ # Perform requested action
+ if name == ['*'] and state in ['latest', 'dist-upgrade']:
+ packages_changed, retvals = package_update_all(module)
+ elif name != ['*'] and state == 'dist-upgrade':
+ module.fail_json(msg="Can not dist-upgrade specific packages.")
+ else:
+ if state in ['absent', 'removed']:
+ packages_changed, retvals = package_absent(module, name)
+ elif state in ['installed', 'present', 'latest']:
+ packages_changed, retvals = package_present(module, name, state == 'latest')
+
+ retvals['changed'] = retvals['rc'] == 0 and bool(packages_changed)
+
+ if module._diff:
+ set_diff(module, retvals, packages_changed)
+
+ if retvals['rc'] != 0:
+ module.fail_json(msg="Zypper run failed.", **retvals)
+
+ if not retvals['changed']:
+ del retvals['stdout']
+ del retvals['stderr']
+
+ module.exit_json(name=name, state=state, update_cache=update_cache, **retvals)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/test/utils/shippable/incidental/aix.sh b/test/utils/shippable/incidental/aix.sh
new file mode 120000
index 0000000000..6ddb776854
--- /dev/null
+++ b/test/utils/shippable/incidental/aix.sh
@@ -0,0 +1 @@
+remote.sh \ No newline at end of file
diff --git a/test/utils/shippable/incidental/freebsd.sh b/test/utils/shippable/incidental/freebsd.sh
new file mode 120000
index 0000000000..6ddb776854
--- /dev/null
+++ b/test/utils/shippable/incidental/freebsd.sh
@@ -0,0 +1 @@
+remote.sh \ No newline at end of file
diff --git a/test/utils/shippable/incidental/linux.sh b/test/utils/shippable/incidental/linux.sh
new file mode 100755
index 0000000000..201d23bc48
--- /dev/null
+++ b/test/utils/shippable/incidental/linux.sh
@@ -0,0 +1,15 @@
+#!/usr/bin/env bash
+
+set -o pipefail -eux
+
+declare -a args
+IFS='/:' read -ra args <<< "$1"
+
+image="${args[1]}"
+
+target="shippable/posix/incidental/"
+
+# shellcheck disable=SC2086
+ansible-test integration --color -v --retry-on-error "${target}" ${COVERAGE:+"$COVERAGE"} ${CHANGED:+"$CHANGED"} ${UNSTABLE:+"$UNSTABLE"} \
+ --docker "${image}" \
+ --enable-test-support \
diff --git a/test/utils/shippable/incidental/osx.sh b/test/utils/shippable/incidental/osx.sh
new file mode 120000
index 0000000000..6ddb776854
--- /dev/null
+++ b/test/utils/shippable/incidental/osx.sh
@@ -0,0 +1 @@
+remote.sh \ No newline at end of file
diff --git a/test/utils/shippable/incidental/remote.sh b/test/utils/shippable/incidental/remote.sh
new file mode 100755
index 0000000000..c3f1331582
--- /dev/null
+++ b/test/utils/shippable/incidental/remote.sh
@@ -0,0 +1,19 @@
+#!/usr/bin/env bash
+
+set -o pipefail -eux
+
+declare -a args
+IFS='/:' read -ra args <<< "$1"
+
+platform="${args[0]}"
+version="${args[1]}"
+
+target="shippable/posix/incidental/"
+
+stage="${S:-prod}"
+provider="${P:-default}"
+
+# shellcheck disable=SC2086
+ansible-test integration --color -v --retry-on-error "${target}" ${COVERAGE:+"$COVERAGE"} ${CHANGED:+"$CHANGED"} ${UNSTABLE:+"$UNSTABLE"} \
+ --remote "${platform}/${version}" --remote-terminate always --remote-stage "${stage}" --remote-provider "${provider}" \
+ --enable-test-support \
diff --git a/test/utils/shippable/incidental/rhel.sh b/test/utils/shippable/incidental/rhel.sh
new file mode 120000
index 0000000000..6ddb776854
--- /dev/null
+++ b/test/utils/shippable/incidental/rhel.sh
@@ -0,0 +1 @@
+remote.sh \ No newline at end of file