summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJames Falcon <james.falcon@canonical.com>2021-11-02 18:07:49 -0500
committergit-ubuntu importer <ubuntu-devel-discuss@lists.ubuntu.com>2021-11-03 02:16:09 +0000
commit5552b6be6680af032bcf1fc02d4af96736c741b9 (patch)
tree5da200c97ce6b94851908f489a746fe6632249f0
parentf02f0c8050f1f98a21b402b72ecb72a52b654e3e (diff)
downloadcloud-init-git-5552b6be6680af032bcf1fc02d4af96736c741b9.tar.gz
21.4-0ubuntu1~22.04.1 (patches unapplied)
Imported using git-ubuntu import.
-rw-r--r--.gitignore1
-rw-r--r--.pylintrc3
-rw-r--r--.travis.yml2
-rw-r--r--ChangeLog98
-rw-r--r--HACKING.rst8
-rw-r--r--Makefile11
-rw-r--r--README.md2
-rw-r--r--cloud-tests-requirements.txt28
-rw-r--r--cloudinit/cmd/devel/hotplug_hook.py8
-rw-r--r--cloudinit/cmd/main.py11
-rw-r--r--cloudinit/cmd/tests/test_main.py23
-rw-r--r--cloudinit/config/cc_apt_configure.py148
-rw-r--r--cloudinit/config/cc_disable_ec2_metadata.py2
-rw-r--r--cloudinit/config/cc_disk_setup.py11
-rw-r--r--cloudinit/config/cc_emit_upstart.py2
-rw-r--r--cloudinit/config/cc_fan.py42
-rw-r--r--cloudinit/config/cc_final_message.py2
-rw-r--r--cloudinit/config/cc_growpart.py40
-rw-r--r--cloudinit/config/cc_install_hotplug.py140
-rw-r--r--cloudinit/config/cc_migrator.py2
-rw-r--r--cloudinit/config/cc_ntp.py26
-rw-r--r--cloudinit/config/cc_puppet.py23
-rw-r--r--cloudinit/config/cc_refresh_rmc_and_interface.py2
-rw-r--r--cloudinit/config/cc_rsyslog.py17
-rw-r--r--cloudinit/config/cc_runcmd.py7
-rw-r--r--cloudinit/config/cc_scripts_per_boot.py2
-rw-r--r--cloudinit/config/cc_set_hostname.py2
-rwxr-xr-xcloudinit/config/cc_set_passwords.py19
-rwxr-xr-xcloudinit/config/cc_ssh.py18
-rw-r--r--cloudinit/config/cc_update_etc_hosts.py34
-rw-r--r--cloudinit/config/cc_update_hostname.py2
-rw-r--r--cloudinit/config/cc_write_files.py41
-rw-r--r--cloudinit/config/cc_write_files_deferred.py55
-rw-r--r--cloudinit/config/cc_yum_add_repo.py10
-rw-r--r--cloudinit/config/tests/test_resolv_conf.py4
-rw-r--r--cloudinit/config/tests/test_set_passwords.py41
-rwxr-xr-xcloudinit/distros/__init__.py39
-rw-r--r--cloudinit/distros/cloudlinux.py (renamed from tests/cloud_tests/testcases/bugs/__init__.py)7
-rw-r--r--cloudinit/distros/openEuler.py (renamed from tests/cloud_tests/testcases/examples/__init__.py)7
-rwxr-xr-xcloudinit/distros/ug_util.py234
-rw-r--r--cloudinit/dmi.py4
-rw-r--r--cloudinit/gpg.py30
-rw-r--r--cloudinit/net/__init__.py37
-rw-r--r--cloudinit/net/dhcp.py20
-rw-r--r--cloudinit/net/network_state.py4
-rw-r--r--cloudinit/net/networkd.py6
-rw-r--r--cloudinit/net/renderer.py4
-rw-r--r--cloudinit/net/sysconfig.py4
-rw-r--r--cloudinit/net/tests/test_dhcp.py8
-rw-r--r--cloudinit/net/tests/test_init.py20
-rw-r--r--cloudinit/net/tests/test_network_state.py1
-rwxr-xr-xcloudinit/reporting/handlers.py10
-rw-r--r--cloudinit/safeyaml.py2
-rw-r--r--cloudinit/settings.py1
-rwxr-xr-xcloudinit/sources/DataSourceAzure.py90
-rw-r--r--cloudinit/sources/DataSourceCloudStack.py2
-rw-r--r--cloudinit/sources/DataSourceGCE.py21
-rw-r--r--cloudinit/sources/DataSourceLXD.py358
-rw-r--r--cloudinit/sources/DataSourceNoCloud.py2
-rw-r--r--cloudinit/sources/DataSourceOVF.py7
-rw-r--r--cloudinit/sources/DataSourceOpenNebula.py8
-rw-r--r--cloudinit/sources/DataSourceOracle.py13
-rw-r--r--cloudinit/sources/DataSourceVultr.py54
-rw-r--r--cloudinit/sources/__init__.py26
-rw-r--r--cloudinit/sources/helpers/vultr.py101
-rw-r--r--cloudinit/sources/tests/test_init.py17
-rw-r--r--cloudinit/sources/tests/test_lxd.py185
-rw-r--r--cloudinit/sources/tests/test_oracle.py10
-rw-r--r--cloudinit/ssh_util.py8
-rw-r--r--cloudinit/stages.py97
-rw-r--r--cloudinit/templater.py4
-rw-r--r--cloudinit/tests/test_subp.py8
-rw-r--r--cloudinit/tests/test_util.py55
-rw-r--r--cloudinit/util.py24
-rw-r--r--cloudinit/version.py2
-rw-r--r--config/cloud.cfg.tmpl14
-rw-r--r--debian/changelog111
-rw-r--r--debian/cloud-init.templates6
-rw-r--r--debian/patches/cpick-28e56d99-Azure-Retry-dhcp-on-timeouts-when-polling73
-rw-r--r--debian/patches/cpick-612e3908-Add-connectivity_url-to-Oracle-s-EphemeralDHCPv4-988322
-rw-r--r--debian/patches/cpick-9c147e83-Allow-disabling-of-network-activation-SC-307-1048190
-rw-r--r--debian/patches/cpick-dc227869-Set-Azure-to-apply-networking-config-every-BOOT-102380
-rw-r--r--debian/patches/cpick-e69a8874-Set-Azure-to-only-update-metadata-on-BOOT_NEW_INSTANCE55
-rw-r--r--debian/patches/series5
-rw-r--r--debian/po/templates.pot6
-rw-r--r--debian/upstream/metadata2
-rw-r--r--doc-requirements.txt3
-rw-r--r--doc/examples/cloud-config-apt.txt24
-rw-r--r--doc/examples/cloud-config-user-groups.txt8
-rw-r--r--doc/man/cloud-id.12
-rw-r--r--doc/rtd/index.rst1
-rw-r--r--doc/rtd/topics/bugs.rst4
-rw-r--r--doc/rtd/topics/cloud_tests.rst764
-rw-r--r--doc/rtd/topics/datasources.rst1
-rw-r--r--doc/rtd/topics/datasources/gce.rst22
-rw-r--r--doc/rtd/topics/datasources/lxd.rst65
-rw-r--r--doc/rtd/topics/datasources/opennebula.rst8
-rw-r--r--doc/rtd/topics/datasources/vmware.rst1
-rw-r--r--doc/rtd/topics/modules.rst1
-rw-r--r--doc/rtd/topics/network-config.rst11
-rw-r--r--doc/sources/ovf/example/ovf-env.xml2
-rw-r--r--integration-requirements.txt2
-rw-r--r--packages/redhat/cloud-init.spec.in8
-rw-r--r--packages/suse/cloud-init.spec.in1
-rwxr-xr-xsetup.py19
-rw-r--r--systemd/cloud-init-generator.tmpl4
-rw-r--r--systemd/cloud-init.service.tmpl4
-rw-r--r--systemd/disable-sshd-keygen-if-cloud-init-active.conf7
-rw-r--r--templates/hosts.alpine.tmpl13
-rw-r--r--templates/hosts.debian.tmpl5
-rw-r--r--templates/sources.list.debian.tmpl4
-rw-r--r--test-requirements.txt1
-rw-r--r--tests/cloud_tests/__init__.py39
-rw-r--r--tests/cloud_tests/__main__.py71
-rw-r--r--tests/cloud_tests/args.py304
-rw-r--r--tests/cloud_tests/bddeb.py119
-rw-r--r--tests/cloud_tests/collect.py219
-rw-r--r--tests/cloud_tests/config.py165
-rw-r--r--tests/cloud_tests/manage.py74
-rw-r--r--tests/cloud_tests/platforms.yaml77
-rw-r--r--tests/cloud_tests/platforms/__init__.py43
-rw-r--r--tests/cloud_tests/platforms/azurecloud/__init__.py0
-rw-r--r--tests/cloud_tests/platforms/azurecloud/image.py116
-rw-r--r--tests/cloud_tests/platforms/azurecloud/instance.py247
-rw-r--r--tests/cloud_tests/platforms/azurecloud/platform.py240
-rw-r--r--tests/cloud_tests/platforms/azurecloud/regions.json42
-rw-r--r--tests/cloud_tests/platforms/azurecloud/snapshot.py58
-rw-r--r--tests/cloud_tests/platforms/ec2/__init__.py0
-rw-r--r--tests/cloud_tests/platforms/ec2/image.py100
-rw-r--r--tests/cloud_tests/platforms/ec2/instance.py132
-rw-r--r--tests/cloud_tests/platforms/ec2/platform.py263
-rw-r--r--tests/cloud_tests/platforms/ec2/snapshot.py66
-rw-r--r--tests/cloud_tests/platforms/images.py56
-rw-r--r--tests/cloud_tests/platforms/instances.py165
-rw-r--r--tests/cloud_tests/platforms/lxd/__init__.py0
-rw-r--r--tests/cloud_tests/platforms/lxd/image.py211
-rw-r--r--tests/cloud_tests/platforms/lxd/instance.py278
-rw-r--r--tests/cloud_tests/platforms/lxd/platform.py104
-rw-r--r--tests/cloud_tests/platforms/lxd/snapshot.py53
-rw-r--r--tests/cloud_tests/platforms/nocloudkvm/__init__.py0
-rw-r--r--tests/cloud_tests/platforms/nocloudkvm/image.py79
-rw-r--r--tests/cloud_tests/platforms/nocloudkvm/instance.py197
-rw-r--r--tests/cloud_tests/platforms/nocloudkvm/platform.py94
-rw-r--r--tests/cloud_tests/platforms/nocloudkvm/snapshot.py59
-rw-r--r--tests/cloud_tests/platforms/platforms.py109
-rw-r--r--tests/cloud_tests/platforms/snapshots.py44
-rw-r--r--tests/cloud_tests/releases.yaml381
-rw-r--r--tests/cloud_tests/run_funcs.py75
-rw-r--r--tests/cloud_tests/setup_image.py237
-rw-r--r--tests/cloud_tests/stage.py116
-rw-r--r--tests/cloud_tests/testcases.yaml50
-rw-r--r--tests/cloud_tests/testcases/__init__.py73
-rw-r--r--tests/cloud_tests/testcases/base.py385
-rw-r--r--tests/cloud_tests/testcases/bugs/README.md13
-rw-r--r--tests/cloud_tests/testcases/bugs/lp1511485.py15
-rw-r--r--tests/cloud_tests/testcases/bugs/lp1511485.yaml11
-rw-r--r--tests/cloud_tests/testcases/bugs/lp1611074.yaml8
-rw-r--r--tests/cloud_tests/testcases/bugs/lp1628337.py23
-rw-r--r--tests/cloud_tests/testcases/bugs/lp1628337.yaml23
-rw-r--r--tests/cloud_tests/testcases/examples/README.md12
-rw-r--r--tests/cloud_tests/testcases/examples/TODO.md15
-rw-r--r--tests/cloud_tests/testcases/examples/add_apt_repositories.py20
-rw-r--r--tests/cloud_tests/testcases/examples/add_apt_repositories.yaml23
-rw-r--r--tests/cloud_tests/testcases/examples/alter_completion_message.py40
-rw-r--r--tests/cloud_tests/testcases/examples/alter_completion_message.yaml16
-rw-r--r--tests/cloud_tests/testcases/examples/configure_instance_trusted_ca_certificates.py27
-rw-r--r--tests/cloud_tests/testcases/examples/configure_instance_trusted_ca_certificates.yaml41
-rw-r--r--tests/cloud_tests/testcases/examples/configure_instances_ssh_keys.py31
-rw-r--r--tests/cloud_tests/testcases/examples/configure_instances_ssh_keys.yaml63
-rw-r--r--tests/cloud_tests/testcases/examples/including_user_groups.py49
-rw-r--r--tests/cloud_tests/testcases/examples/including_user_groups.yaml56
-rw-r--r--tests/cloud_tests/testcases/examples/install_arbitrary_packages.py20
-rw-r--r--tests/cloud_tests/testcases/examples/install_arbitrary_packages.yaml20
-rw-r--r--tests/cloud_tests/testcases/examples/install_run_chef_recipes.py17
-rw-r--r--tests/cloud_tests/testcases/examples/install_run_chef_recipes.yaml104
-rw-r--r--tests/cloud_tests/testcases/examples/run_apt_upgrade.py19
-rw-r--r--tests/cloud_tests/testcases/examples/run_apt_upgrade.yaml11
-rw-r--r--tests/cloud_tests/testcases/examples/run_commands.py15
-rw-r--r--tests/cloud_tests/testcases/examples/run_commands.yaml16
-rw-r--r--tests/cloud_tests/testcases/examples/run_commands_first_boot.py15
-rw-r--r--tests/cloud_tests/testcases/examples/run_commands_first_boot.yaml16
-rw-r--r--tests/cloud_tests/testcases/examples/setup_run_puppet.yaml55
-rw-r--r--tests/cloud_tests/testcases/examples/writing_out_arbitrary_files.py30
-rw-r--r--tests/cloud_tests/testcases/examples/writing_out_arbitrary_files.yaml45
-rw-r--r--tests/cloud_tests/testcases/main/README.md11
-rw-r--r--tests/cloud_tests/testcases/main/__init__.py8
-rw-r--r--tests/cloud_tests/testcases/main/command_output_simple.py21
-rw-r--r--tests/cloud_tests/testcases/main/command_output_simple.yaml13
-rw-r--r--tests/cloud_tests/testcases/modules/README.md12
-rw-r--r--tests/cloud_tests/testcases/modules/TODO.md95
-rw-r--r--tests/cloud_tests/testcases/modules/__init__.py8
-rw-r--r--tests/cloud_tests/testcases/modules/apt_configure_conf.py20
-rw-r--r--tests/cloud_tests/testcases/modules/apt_configure_conf.yaml21
-rw-r--r--tests/cloud_tests/testcases/modules/apt_configure_disable_suites.py15
-rw-r--r--tests/cloud_tests/testcases/modules/apt_configure_disable_suites.yaml20
-rw-r--r--tests/cloud_tests/testcases/modules/apt_configure_primary.py24
-rw-r--r--tests/cloud_tests/testcases/modules/apt_configure_primary.yaml19
-rw-r--r--tests/cloud_tests/testcases/modules/apt_configure_proxy.py22
-rw-r--r--tests/cloud_tests/testcases/modules/apt_configure_proxy.yaml18
-rw-r--r--tests/cloud_tests/testcases/modules/apt_configure_security.py15
-rw-r--r--tests/cloud_tests/testcases/modules/apt_configure_security.yaml18
-rw-r--r--tests/cloud_tests/testcases/modules/apt_configure_sources_key.py23
-rw-r--r--tests/cloud_tests/testcases/modules/apt_configure_sources_key.yaml50
-rw-r--r--tests/cloud_tests/testcases/modules/apt_configure_sources_keyserver.py23
-rw-r--r--tests/cloud_tests/testcases/modules/apt_configure_sources_keyserver.yaml23
-rw-r--r--tests/cloud_tests/testcases/modules/apt_configure_sources_list.py31
-rw-r--r--tests/cloud_tests/testcases/modules/apt_configure_sources_list.yaml28
-rw-r--r--tests/cloud_tests/testcases/modules/apt_configure_sources_ppa.py23
-rw-r--r--tests/cloud_tests/testcases/modules/apt_configure_sources_ppa.yaml29
-rw-r--r--tests/cloud_tests/testcases/modules/apt_pipelining_disable.py15
-rw-r--r--tests/cloud_tests/testcases/modules/apt_pipelining_disable.yaml14
-rw-r--r--tests/cloud_tests/testcases/modules/apt_pipelining_os.py15
-rw-r--r--tests/cloud_tests/testcases/modules/apt_pipelining_os.yaml14
-rw-r--r--tests/cloud_tests/testcases/modules/bootcmd.py15
-rw-r--r--tests/cloud_tests/testcases/modules/bootcmd.yaml13
-rw-r--r--tests/cloud_tests/testcases/modules/byobu.py24
-rw-r--r--tests/cloud_tests/testcases/modules/byobu.yaml17
-rw-r--r--tests/cloud_tests/testcases/modules/ca_certs.py33
-rw-r--r--tests/cloud_tests/testcases/modules/ca_certs.yaml56
-rw-r--r--tests/cloud_tests/testcases/modules/debug_disable.py16
-rw-r--r--tests/cloud_tests/testcases/modules/debug_disable.yaml9
-rw-r--r--tests/cloud_tests/testcases/modules/debug_enable.py15
-rw-r--r--tests/cloud_tests/testcases/modules/debug_enable.yaml9
-rw-r--r--tests/cloud_tests/testcases/modules/final_message.py40
-rw-r--r--tests/cloud_tests/testcases/modules/final_message.yaml13
-rw-r--r--tests/cloud_tests/testcases/modules/keys_to_console.py22
-rw-r--r--tests/cloud_tests/testcases/modules/keys_to_console.yaml15
-rw-r--r--tests/cloud_tests/testcases/modules/landscape.yaml28
-rw-r--r--tests/cloud_tests/testcases/modules/locale.py30
-rw-r--r--tests/cloud_tests/testcases/modules/locale.yaml22
-rw-r--r--tests/cloud_tests/testcases/modules/lxd_bridge.py36
-rw-r--r--tests/cloud_tests/testcases/modules/lxd_bridge.yaml32
-rw-r--r--tests/cloud_tests/testcases/modules/lxd_dir.py30
-rw-r--r--tests/cloud_tests/testcases/modules/lxd_dir.yaml19
-rw-r--r--tests/cloud_tests/testcases/modules/ntp.py24
-rw-r--r--tests/cloud_tests/testcases/modules/ntp.yaml22
-rw-r--r--tests/cloud_tests/testcases/modules/ntp_chrony.py26
-rw-r--r--tests/cloud_tests/testcases/modules/ntp_chrony.yaml17
-rw-r--r--tests/cloud_tests/testcases/modules/ntp_pools.py34
-rw-r--r--tests/cloud_tests/testcases/modules/ntp_pools.yaml32
-rw-r--r--tests/cloud_tests/testcases/modules/ntp_servers.py34
-rw-r--r--tests/cloud_tests/testcases/modules/ntp_servers.yaml28
-rw-r--r--tests/cloud_tests/testcases/modules/ntp_timesyncd.py15
-rw-r--r--tests/cloud_tests/testcases/modules/ntp_timesyncd.yaml15
-rw-r--r--tests/cloud_tests/testcases/modules/package_update_upgrade_install.py36
-rw-r--r--tests/cloud_tests/testcases/modules/package_update_upgrade_install.yaml30
-rw-r--r--tests/cloud_tests/testcases/modules/runcmd.py15
-rw-r--r--tests/cloud_tests/testcases/modules/runcmd.yaml13
-rw-r--r--tests/cloud_tests/testcases/modules/seed_random_command.yaml18
-rw-r--r--tests/cloud_tests/testcases/modules/seed_random_data.py15
-rw-r--r--tests/cloud_tests/testcases/modules/seed_random_data.yaml15
-rw-r--r--tests/cloud_tests/testcases/modules/set_hostname.py17
-rw-r--r--tests/cloud_tests/testcases/modules/set_hostname.yaml21
-rw-r--r--tests/cloud_tests/testcases/modules/set_hostname_fqdn.py31
-rw-r--r--tests/cloud_tests/testcases/modules/set_hostname_fqdn.yaml23
-rw-r--r--tests/cloud_tests/testcases/modules/set_password.py22
-rw-r--r--tests/cloud_tests/testcases/modules/set_password.yaml19
-rw-r--r--tests/cloud_tests/testcases/modules/set_password_expire.py23
-rw-r--r--tests/cloud_tests/testcases/modules/set_password_expire.yaml32
-rw-r--r--tests/cloud_tests/testcases/modules/set_password_list.py12
-rw-r--r--tests/cloud_tests/testcases/modules/set_password_list.yaml41
-rw-r--r--tests/cloud_tests/testcases/modules/set_password_list_string.py12
-rw-r--r--tests/cloud_tests/testcases/modules/set_password_list_string.yaml41
-rw-r--r--tests/cloud_tests/testcases/modules/snap.py16
-rw-r--r--tests/cloud_tests/testcases/modules/snap.yaml21
-rw-r--r--tests/cloud_tests/testcases/modules/ssh_auth_key_fingerprints_disable.py16
-rw-r--r--tests/cloud_tests/testcases/modules/ssh_auth_key_fingerprints_disable.yaml14
-rw-r--r--tests/cloud_tests/testcases/modules/ssh_auth_key_fingerprints_enable.py18
-rw-r--r--tests/cloud_tests/testcases/modules/ssh_auth_key_fingerprints_enable.yaml21
-rw-r--r--tests/cloud_tests/testcases/modules/ssh_import_id.py17
-rw-r--r--tests/cloud_tests/testcases/modules/ssh_import_id.yaml17
-rw-r--r--tests/cloud_tests/testcases/modules/ssh_keys_generate.py52
-rw-r--r--tests/cloud_tests/testcases/modules/ssh_keys_generate.yaml38
-rw-r--r--tests/cloud_tests/testcases/modules/ssh_keys_provided.py58
-rw-r--r--tests/cloud_tests/testcases/modules/ssh_keys_provided.yaml99
-rw-r--r--tests/cloud_tests/testcases/modules/timezone.py15
-rw-r--r--tests/cloud_tests/testcases/modules/timezone.yaml16
-rw-r--r--tests/cloud_tests/testcases/modules/user_groups.py49
-rw-r--r--tests/cloud_tests/testcases/modules/user_groups.yaml55
-rw-r--r--tests/cloud_tests/testcases/modules/write_files.py33
-rw-r--r--tests/cloud_tests/testcases/modules/write_files.yaml53
-rw-r--r--tests/cloud_tests/util.py532
-rw-r--r--tests/cloud_tests/verify.py149
-rw-r--r--tests/configs/sample1.yaml49
-rw-r--r--tests/integration_tests/bugs/test_gh632.py6
-rw-r--r--tests/integration_tests/bugs/test_gh868.py4
-rw-r--r--tests/integration_tests/bugs/test_lp1813396.py1
-rw-r--r--tests/integration_tests/bugs/test_lp1835584.py3
-rw-r--r--tests/integration_tests/bugs/test_lp1886531.py4
-rw-r--r--tests/integration_tests/bugs/test_lp1898997.py4
-rw-r--r--tests/integration_tests/clouds.py11
-rw-r--r--tests/integration_tests/datasources/test_lxd_discovery.py62
-rw-r--r--tests/integration_tests/datasources/test_network_dependency.py32
-rw-r--r--tests/integration_tests/instances.py44
-rw-r--r--tests/integration_tests/modules/test_apt.py62
-rw-r--r--tests/integration_tests/modules/test_combined.py46
-rw-r--r--tests/integration_tests/modules/test_disk_setup.py10
-rw-r--r--tests/integration_tests/modules/test_hotplug.py17
-rw-r--r--tests/integration_tests/modules/test_jinja_templating.py30
-rw-r--r--tests/integration_tests/modules/test_lxd_bridge.py4
-rw-r--r--tests/integration_tests/modules/test_ntp_servers.py8
-rw-r--r--tests/integration_tests/modules/test_set_password.py12
-rw-r--r--tests/integration_tests/modules/test_ssh_keysfile.py10
-rw-r--r--tests/integration_tests/modules/test_user_events.py21
-rw-r--r--tests/integration_tests/modules/test_version_change.py35
-rw-r--r--tests/integration_tests/modules/test_write_files.py21
-rw-r--r--tests/integration_tests/test_upgrade.py35
-rw-r--r--tests/integration_tests/util.py34
-rw-r--r--tests/unittests/cmd/devel/test_hotplug_hook.py24
-rw-r--r--tests/unittests/test_cli.py6
-rw-r--r--tests/unittests/test_datasource/test_azure.py123
-rw-r--r--tests/unittests/test_datasource/test_common.py2
-rw-r--r--tests/unittests/test_datasource/test_configdrive.py2
-rw-r--r--tests/unittests/test_datasource/test_gce.py1
-rw-r--r--tests/unittests/test_datasource/test_opennebula.py12
-rw-r--r--tests/unittests/test_datasource/test_openstack.py2
-rw-r--r--tests/unittests/test_datasource/test_ovf.py55
-rw-r--r--tests/unittests/test_datasource/test_scaleway.py22
-rw-r--r--tests/unittests/test_datasource/test_vultr.py30
-rw-r--r--tests/unittests/test_distros/test_create_users.py43
-rw-r--r--tests/unittests/test_distros/test_manage_service.py38
-rw-r--r--tests/unittests/test_ds_identify.py24
-rw-r--r--tests/unittests/test_gpg.py81
-rw-r--r--tests/unittests/test_handler/test_handler_apt_configure_sources_list_v1.py19
-rw-r--r--tests/unittests/test_handler/test_handler_apt_configure_sources_list_v3.py60
-rw-r--r--tests/unittests/test_handler/test_handler_apt_key.py137
-rw-r--r--tests/unittests/test_handler/test_handler_apt_source_v1.py75
-rw-r--r--tests/unittests/test_handler/test_handler_apt_source_v3.py111
-rw-r--r--tests/unittests/test_handler/test_handler_bootcmd.py29
-rw-r--r--tests/unittests/test_handler/test_handler_ca_certs.py33
-rw-r--r--tests/unittests/test_handler/test_handler_chef.py25
-rw-r--r--tests/unittests/test_handler/test_handler_debug.py31
-rw-r--r--tests/unittests/test_handler/test_handler_install_hotplug.py113
-rw-r--r--tests/unittests/test_handler/test_handler_landscape.py28
-rw-r--r--tests/unittests/test_handler/test_handler_locale.py41
-rw-r--r--tests/unittests/test_handler/test_handler_lxd.py21
-rw-r--r--tests/unittests/test_handler/test_handler_mcollective.py23
-rw-r--r--tests/unittests/test_handler/test_handler_ntp.py58
-rw-r--r--tests/unittests/test_handler/test_handler_puppet.py125
-rw-r--r--tests/unittests/test_handler/test_handler_runcmd.py56
-rw-r--r--tests/unittests/test_handler/test_handler_seed_random.py46
-rw-r--r--tests/unittests/test_handler/test_handler_timezone.py24
-rw-r--r--tests/unittests/test_handler/test_handler_write_files.py13
-rw-r--r--tests/unittests/test_handler/test_handler_write_files_deferred.py77
-rw-r--r--tests/unittests/test_handler/test_schema.py55
-rw-r--r--tests/unittests/test_net.py74
-rw-r--r--tests/unittests/test_net_activators.py4
-rw-r--r--tests/unittests/test_net_freebsd.py4
-rw-r--r--tests/unittests/test_sshutil.py8
-rw-r--r--tests/unittests/test_util.py2
-rw-r--r--tests/unittests/util.py143
-rw-r--r--tools/.github-cla-signers12
-rw-r--r--tools/.lp-to-git-user1
-rwxr-xr-xtools/ds-identify48
-rwxr-xr-xtools/hook-hotplug6
-rwxr-xr-xtools/render-cloudcfg4
-rwxr-xr-xtools/run-flake8 (renamed from tools/run-pyflakes)4
-rwxr-xr-xtools/run-pep821
-rw-r--r--tox.ini22
-rw-r--r--udev/10-cloud-init-hook-hotplug.rules6
360 files changed, 3901 insertions, 12078 deletions
diff --git a/.gitignore b/.gitignore
index eb26e0da..1be358d2 100644
--- a/.gitignore
+++ b/.gitignore
@@ -19,6 +19,7 @@ stage
.pytest_cache/
.vscode/
htmlcov/
+tags
# Ignore packaging artifacts
cloud-init.dsc
diff --git a/.pylintrc b/.pylintrc
index 94a81d0e..3edb0092 100644
--- a/.pylintrc
+++ b/.pylintrc
@@ -24,8 +24,9 @@ jobs=4
# W0631(undefined-loop-variable)
# W0703(broad-except)
# W1401(anomalous-backslash-in-string)
+# W1514(unspecified-encoding)
-disable=C, F, I, R, W0201, W0212, W0221, W0222, W0223, W0231, W0311, W0511, W0602, W0603, W0611, W0613, W0621, W0622, W0631, W0703, W1401
+disable=C, F, I, R, W0201, W0212, W0221, W0222, W0223, W0231, W0311, W0511, W0602, W0603, W0611, W0613, W0621, W0622, W0631, W0703, W1401, W1514
[REPORTS]
diff --git a/.travis.yml b/.travis.yml
index e112789a..1582e829 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -131,7 +131,7 @@ matrix:
env: TOXENV=flake8
- python: 3.6
env: TOXENV=pylint
- - python: 3.6
+ - python: 3.7
env: TOXENV=doc
# Test all supported Python versions (but at the end, so we schedule
# longer-running jobs first)
diff --git a/ChangeLog b/ChangeLog
index 6de07ad3..219c46fb 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,101 @@
+21.4
+ - Azure: fallback nic needs to be reevaluated during reprovisioning
+ (#1094) [Anh Vo]
+ - azure: pps imds (#1093) [Anh Vo]
+ - testing: Remove calls to 'install_new_cloud_init' (#1092)
+ - Add LXD datasource (#1040)
+ - Fix unhandled apt_configure case. (#1065) [Brett Holman]
+ - Allow libexec for hotplug (#1088)
+ - Add necessary mocks to test_ovf unit tests (#1087)
+ - Remove (deprecated) apt-key (#1068) [Brett Holman] (LP: #1836336)
+ - distros: Remove a completed "TODO" comment (#1086)
+ - cc_ssh.py: Add configuration for controlling ssh-keygen output (#1083)
+ [dermotbradley]
+ - Add "install hotplug" module (SC-476) (#1069) (LP: #1946003)
+ - hosts.alpine.tmpl: rearrange the order of short and long hostnames
+ (#1084) [dermotbradley]
+ - Add max version to docutils
+ - cloudinit/dmi.py: Change warning to debug to prevent console display
+ (#1082) [dermotbradley]
+ - remove unnecessary EOF string in
+ disable-sshd-keygen-if-cloud-init-active.conf (#1075) [Emanuele
+ Giuseppe Esposito]
+ - Add module 'write-files-deferred' executed in stage 'final' (#916)
+ [Lucendio]
+ - Bump pycloudlib to fix CI (#1080)
+ - Remove pin in dependencies for jsonschema (#1078)
+ - Add "Google" as possible system-product-name (#1077) [vteratipally]
+ - Update Debian security suite for bullseye (#1076) [Johann Queuniet]
+ - Leave the details of service management to the distro (#1074)
+ [Andy Fiddaman]
+ - Fix typos in setup.py (#1059) [Christian Clauss]
+ - Update Azure _unpickle (SC-500) (#1067) (LP: #1946644)
+ - cc_ssh.py: fix private key group owner and permissions (#1070)
+ [Emanuele Giuseppe Esposito]
+ - VMware: read network-config from ISO (#1066) [Thomas Weißschuh]
+ - testing: mock sleep in gce unit tests (#1072)
+ - CloudStack: fix data-server DNS resolution (#1004)
+ [Olivier Lemasle] (LP: #1942232)
+ - Fix unit test broken by pyyaml upgrade (#1071)
+ - testing: add get_cloud function (SC-461) (#1038)
+ - Inhibit sshd-keygen@.service if cloud-init is active (#1028)
+ [Ryan Harper]
+ - VMWARE: search the deployPkg plugin in multiarch dir (#1061)
+ [xiaofengw-vmware] (LP: #1944946)
+ - Fix set-name/interface DNS bug (#1058) [Andrew Kutz] (LP: #1946493)
+ - Use specified tmp location for growpart (#1046) [jshen28]
+ - .gitignore: ignore tags file for ctags users (#1057) [Brett Holman]
+ - Allow comments in runcmd and report failed commands correctly (#1049)
+ [Brett Holman] (LP: #1853146)
+ - tox integration: pass the *_proxy, GOOGLE_*, GCP_* env vars (#1050)
+ [Paride Legovini]
+ - Allow disabling of network activation (SC-307) (#1048) (LP: #1938299)
+ - renderer: convert relative imports to absolute (#1052) [Paride Legovini]
+ - Support ETHx_IP6_GATEWAY, SET_HOSTNAME on OpenNebula (#1045)
+ [Vlastimil Holer]
+ - integration-requirements: bump the pycloudlib commit (#1047)
+ [Paride Legovini]
+ - Allow Vultr to set MTU and use as-is configs (#1037) [eb3095]
+ - pin jsonschema in requirements.txt (#1043)
+ - testing: remove cloud_tests (#1020)
+ - Add andgein as contributor (#1042) [Andrew Gein]
+ - Make wording for module frequency consistent (#1039) [Nicolas Bock]
+ - Use ascii code for growpart (#1036) [jshen28]
+ - Add jshen28 as contributor (#1035) [jshen28]
+ - Skip test_cache_purged_on_version_change on Azure (#1033)
+ - Remove invalid ssh_import_id from examples (#1031)
+ - Cleanup Vultr support (#987) [eb3095]
+ - docs: update cc_disk_setup for fs to raw disk (#1017)
+ - HACKING.rst: change contact info to James Falcon (#1030)
+ - tox: bump the pinned flake8 and pylint version (#1029)
+ [Paride Legovini] (LP: #1944414)
+ - Add retries to DataSourceGCE.py when connecting to GCE (#1005)
+ [vteratipally]
+ - Set Azure to apply networking config every BOOT (#1023)
+ - Add connectivity_url to Oracle's EphemeralDHCPv4 (#988) (LP: #1939603)
+ - docs: fix typo and include sudo for report bugs commands (#1022)
+ [Renan Rodrigo] (LP: #1940236)
+ - VMware: Fix typo introduced in #947 and add test (#1019) [PengpengSun]
+ - Update IPv6 entries in /etc/hosts (#1021) [Richard Hansen] (LP: #1943798)
+ - Integration test upgrades for the 21.3-1 SRU (#1001)
+ - Add Jille to tools/.github-cla-signers (#1016) [Jille Timmermans]
+ - Improve ug_util.py (#1013) [Shreenidhi Shedi]
+ - Support openEuler OS (#1012) [zhuzaifangxuele]
+ - ssh_utils.py: ignore when sshd_config options are not key/value pairs
+ (#1007) [Emanuele Giuseppe Esposito]
+ - Set Azure to only update metadata on BOOT_NEW_INSTANCE (#1006)
+ - cc_update_etc_hosts: Use the distribution-defined path for the hosts
+ file (#983) [Andy Fiddaman]
+ - Add CloudLinux OS support (#1003) [Alexandr Kravchenko]
+ - puppet config: add the start_agent option (#1002) [Andrew Bogott]
+ - Fix `make style-check` errors (#1000) [Shreenidhi Shedi]
+ - Make cloud-id copyright year (#991) [Andrii Podanenko]
+ - Add support to accept-ra in networkd renderer (#999) [Shreenidhi Shedi]
+ - Update ds-identify to pass shellcheck (#979) [Andrew Kutz]
+ - Azure: Retry dhcp on timeouts when polling reprovisiondata (#998)
+ [aswinrajamannar]
+ - testing: Fix ssh keys integration test (#992)
+
21.3
- Azure: During primary nic detection, check interface status continuously
before rebinding again (#990) [aswinrajamannar]
diff --git a/HACKING.rst b/HACKING.rst
index fc858672..6b7dae5a 100644
--- a/HACKING.rst
+++ b/HACKING.rst
@@ -23,17 +23,17 @@ Follow these steps to submit your first pull request to cloud-init:
* ensure that you fill in the GitHub username field.
* when prompted for 'Project contact' or 'Canonical Project
- Manager', enter 'Rick Harding'.
+ Manager', enter 'James Falcon'.
* If your company has signed the CLA for you, please contact us to
help in verifying which Launchpad/GitHub accounts are associated
with the company.
- * For any questions or help with the process, please email `Rick
- Harding <mailto:rick.harding@canonical.com>`_ with the subject,
+ * For any questions or help with the process, please email `James
+ Falcon <mailto:james.falcon@canonical.com>`_ with the subject,
"Cloud-Init CLA"
- * You also may contact user ``rick_h`` in the ``#cloud-init``
+ * You also may contact user ``falcojr`` in the ``#cloud-init``
channel on the Libera IRC network.
* Configure git with your email and name for commit messages.
diff --git a/Makefile b/Makefile
index 5fb0fcbf..0c015dae 100644
--- a/Makefile
+++ b/Makefile
@@ -18,13 +18,10 @@ all: check
check: check_version test yaml
-style-check: pep8 $(pyflakes)
+style-check: flake8
-pep8:
- @$(CWD)/tools/run-pep8
-
-pyflakes:
- @$(CWD)/tools/run-pyflakes
+flake8:
+ @$(CWD)/tools/run-flake8
unittest: clean_pyc
python3 -m pytest -v tests/unittests cloudinit
@@ -86,6 +83,6 @@ deb-src:
doc:
tox -e doc
-.PHONY: test pyflakes clean pep8 rpm srpm deb deb-src yaml
+.PHONY: test flake8 clean rpm srpm deb deb-src yaml
.PHONY: check_version pip-test-requirements pip-requirements clean_pyc
.PHONY: unittest style-check doc
diff --git a/README.md b/README.md
index 5828c2fa..e96541ef 100644
--- a/README.md
+++ b/README.md
@@ -39,7 +39,7 @@ get in contact with that distribution and send them our way!
| Supported OSes | Supported Public Clouds | Supported Private Clouds |
| --- | --- | --- |
-| Alpine Linux<br />ArchLinux<br />Debian<br />DragonFlyBSD<br />Fedora<br />FreeBSD<br />Gentoo Linux<br />NetBSD<br />OpenBSD<br />RHEL/CentOS/AlmaLinux/Rocky/PhotonOS/Virtuozzo/EuroLinux<br />SLES/openSUSE<br />Ubuntu<br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /> | Amazon Web Services<br />Microsoft Azure<br />Google Cloud Platform<br />Oracle Cloud Infrastructure<br />Softlayer<br />Rackspace Public Cloud<br />IBM Cloud<br />DigitalOcean<br />Bigstep<br />Hetzner<br />Joyent<br />CloudSigma<br />Alibaba Cloud<br />OVH<br />OpenNebula<br />Exoscale<br />Scaleway<br />CloudStack<br />AltCloud<br />SmartOS<br />HyperOne<br />Vultr<br />Rootbox<br /> | Bare metal installs<br />OpenStack<br />LXD<br />KVM<br />Metal-as-a-Service (MAAS)<br />VMware<br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br />|
+| Alpine Linux<br />ArchLinux<br />Debian<br />DragonFlyBSD<br />Fedora<br />FreeBSD<br />Gentoo Linux<br />NetBSD<br />OpenBSD<br />openEuler<br />RHEL/CentOS/AlmaLinux/Rocky/PhotonOS/Virtuozzo/EuroLinux/CloudLinux<br />SLES/openSUSE<br />Ubuntu<br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /> | Amazon Web Services<br />Microsoft Azure<br />Google Cloud Platform<br />Oracle Cloud Infrastructure<br />Softlayer<br />Rackspace Public Cloud<br />IBM Cloud<br />DigitalOcean<br />Bigstep<br />Hetzner<br />Joyent<br />CloudSigma<br />Alibaba Cloud<br />OVH<br />OpenNebula<br />Exoscale<br />Scaleway<br />CloudStack<br />AltCloud<br />SmartOS<br />HyperOne<br />Vultr<br />Rootbox<br /> | Bare metal installs<br />OpenStack<br />LXD<br />KVM<br />Metal-as-a-Service (MAAS)<br />VMware<br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br />|
## To start developing cloud-init
diff --git a/cloud-tests-requirements.txt b/cloud-tests-requirements.txt
deleted file mode 100644
index eecab63e..00000000
--- a/cloud-tests-requirements.txt
+++ /dev/null
@@ -1,28 +0,0 @@
-# PyPI requirements for cloud-init cloud tests
-# https://cloudinit.readthedocs.io/en/latest/topics/cloud_tests.html
-#
-# Note: Changes to this requirements may require updates to
-# the packages/pkg-deps.json file as well.
-#
-
-# ec2 backend
-boto3==1.14.53
-
-# ssh communication
-paramiko==2.7.2
-cryptography==3.2
-
-# lxd backend
-pylxd==2.2.11
-
-# finds latest image information
-git+https://git.launchpad.net/simplestreams
-
-# azure backend
-azure-storage==0.36.0
-msrestazure==0.6.1
-azure-common==1.1.23
-azure-mgmt-compute==7.0.0
-azure-mgmt-network==5.0.0
-azure-mgmt-resource==4.0.0
-azure-mgmt-storage==6.0.0
diff --git a/cloudinit/cmd/devel/hotplug_hook.py b/cloudinit/cmd/devel/hotplug_hook.py
index a0058f03..f6f36a00 100644
--- a/cloudinit/cmd/devel/hotplug_hook.py
+++ b/cloudinit/cmd/devel/hotplug_hook.py
@@ -8,12 +8,14 @@ import time
from cloudinit import log
from cloudinit import reporting
+from cloudinit import stages
from cloudinit.event import EventScope, EventType
from cloudinit.net import activators, read_sys_net_safe
from cloudinit.net.network_state import parse_net_config_data
from cloudinit.reporting import events
from cloudinit.stages import Init
-from cloudinit.sources import DataSource, DataSourceNotFoundException
+from cloudinit.sources import DataSource # noqa: F401
+from cloudinit.sources import DataSourceNotFoundException
LOG = log.getLogger(__name__)
@@ -163,7 +165,9 @@ def is_enabled(hotplug_init, subsystem):
subsystem)
) from e
- return hotplug_init.update_event_enabled(
+ return stages.update_event_enabled(
+ datasource=hotplug_init.datasource,
+ cfg=hotplug_init.cfg,
event_source_type=EventType.HOTPLUG,
scope=scope
)
diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py
index 1de1de99..63186d34 100644
--- a/cloudinit/cmd/main.py
+++ b/cloudinit/cmd/main.py
@@ -239,6 +239,12 @@ def purge_cache_on_python_version_change(init):
util.write_file(python_version_path, current_python_version)
+def _should_bring_up_interfaces(init, args):
+ if util.get_cfg_option_bool(init.cfg, 'disable_network_activation'):
+ return False
+ return not args.local
+
+
def main_init(name, args):
deps = [sources.DEP_FILESYSTEM, sources.DEP_NETWORK]
if args.local:
@@ -348,6 +354,7 @@ def main_init(name, args):
util.del_file(os.path.join(path_helper.get_cpath("data"), "no-net"))
# Stage 5
+ bring_up_interfaces = _should_bring_up_interfaces(init, args)
try:
init.fetch(existing=existing)
# if in network mode, and the datasource is local
@@ -367,7 +374,7 @@ def main_init(name, args):
util.logexc(LOG, ("No instance datasource found!"
" Likely bad things to come!"))
if not args.force:
- init.apply_network_config(bring_up=not args.local)
+ init.apply_network_config(bring_up=bring_up_interfaces)
LOG.debug("[%s] Exiting without datasource", mode)
if mode == sources.DSMODE_LOCAL:
return (None, [])
@@ -388,7 +395,7 @@ def main_init(name, args):
# dhcp clients to advertize this hostname to any DDNS services
# LP: #1746455.
_maybe_set_hostname(init, stage='local', retry_stage='network')
- init.apply_network_config(bring_up=bool(mode != sources.DSMODE_LOCAL))
+ init.apply_network_config(bring_up=bring_up_interfaces)
if mode == sources.DSMODE_LOCAL:
if init.datasource.dsmode != mode:
diff --git a/cloudinit/cmd/tests/test_main.py b/cloudinit/cmd/tests/test_main.py
index 1f5975b0..2e380848 100644
--- a/cloudinit/cmd/tests/test_main.py
+++ b/cloudinit/cmd/tests/test_main.py
@@ -4,6 +4,9 @@ from collections import namedtuple
import copy
import os
from io import StringIO
+from unittest import mock
+
+import pytest
from cloudinit.cmd import main
from cloudinit import safeyaml
@@ -162,4 +165,24 @@ class TestMain(FilesystemMockingTestCase):
for log in expected_logs:
self.assertIn(log, self.stderr.getvalue())
+
+class TestShouldBringUpInterfaces:
+ @pytest.mark.parametrize('cfg_disable,args_local,expected', [
+ (True, True, False),
+ (True, False, False),
+ (False, True, False),
+ (False, False, True),
+ ])
+ def test_should_bring_up_interfaces(
+ self, cfg_disable, args_local, expected
+ ):
+ init = mock.Mock()
+ init.cfg = {'disable_network_activation': cfg_disable}
+
+ args = mock.Mock()
+ args.local = args_local
+
+ result = main._should_bring_up_interfaces(init, args)
+ assert result == expected
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py
index 0c9c7925..86d0feae 100644
--- a/cloudinit/config/cc_apt_configure.py
+++ b/cloudinit/config/cc_apt_configure.py
@@ -11,6 +11,7 @@
import glob
import os
import re
+import pathlib
from textwrap import dedent
from cloudinit.config.schema import (
@@ -27,18 +28,22 @@ LOG = logging.getLogger(__name__)
# this will match 'XXX:YYY' (ie, 'cloud-archive:foo' or 'ppa:bar')
ADD_APT_REPO_MATCH = r"^[\w-]+:\w"
+APT_LOCAL_KEYS = '/etc/apt/trusted.gpg'
+APT_TRUSTED_GPG_DIR = '/etc/apt/trusted.gpg.d/'
+CLOUD_INIT_GPG_DIR = '/etc/apt/cloud-init.gpg.d/'
+
frequency = PER_INSTANCE
distros = ["ubuntu", "debian"]
mirror_property = {
'type': 'array',
- 'item': {
+ 'items': {
'type': 'object',
'additionalProperties': False,
'required': ['arches'],
'properties': {
'arches': {
'type': 'array',
- 'item': {
+ 'items': {
'type': 'string'
},
'minItems': 1
@@ -49,7 +54,7 @@ mirror_property = {
},
'search': {
'type': 'array',
- 'item': {
+ 'items': {
'type': 'string',
'format': 'uri'
},
@@ -108,11 +113,12 @@ schema = {
search:
- 'http://cool.but-sometimes-unreachable.com/ubuntu'
- 'http://us.archive.ubuntu.com/ubuntu'
- search_dns: <true/false>
+ search_dns: false
- arches:
- s390x
- arm64
uri: 'http://archive-to-use-for-arm64.example.com/ubuntu'
+
security:
- arches:
- default
@@ -139,7 +145,7 @@ schema = {
source1:
keyid: 'keyid'
keyserver: 'keyserverurl'
- source: 'deb http://<url>/ xenial main'
+ source: 'deb [signed-by=$KEY_FILE] http://<url>/ xenial main'
source2:
source: 'ppa:<ppa-name>'
source3:
@@ -255,7 +261,8 @@ schema = {
``http://archive.ubuntu.com/ubuntu``.
- ``security`` => \
``http://security.ubuntu.com/ubuntu``
- """)},
+ """)
+ },
'security': {
**mirror_property,
'description': dedent("""\
@@ -312,7 +319,8 @@ schema = {
- ``$MIRROR``
- ``$RELEASE``
- ``$PRIMARY``
- - ``$SECURITY``""")
+ - ``$SECURITY``
+ - ``$KEY_FILE``""")
},
'conf': {
'type': 'string',
@@ -374,6 +382,7 @@ schema = {
- ``key``: a raw PGP key.
- ``keyserver``: alternate keyserver to pull \
``keyid`` key from.
+ - ``filename``: specify the name of the .list file
The ``source`` key supports variable
replacements for the following strings:
@@ -381,7 +390,8 @@ schema = {
- ``$MIRROR``
- ``$PRIMARY``
- ``$SECURITY``
- - ``$RELEASE``""")
+ - ``$RELEASE``
+ - ``$KEY_FILE``""")
}
}
}
@@ -683,7 +693,7 @@ def add_mirror_keys(cfg, target):
"""Adds any keys included in the primary/security mirror clauses"""
for key in ('primary', 'security'):
for mirror in cfg.get(key, []):
- add_apt_key(mirror, target)
+ add_apt_key(mirror, target, file_name=key)
def generate_sources_list(cfg, release, mirrors, cloud):
@@ -714,20 +724,21 @@ def generate_sources_list(cfg, release, mirrors, cloud):
util.write_file(aptsrc, disabled, mode=0o644)
-def add_apt_key_raw(key, target=None):
+def add_apt_key_raw(key, file_name, hardened=False, target=None):
"""
actual adding of a key as defined in key argument
to the system
"""
LOG.debug("Adding key:\n'%s'", key)
try:
- subp.subp(['apt-key', 'add', '-'], data=key.encode(), target=target)
+ name = pathlib.Path(file_name).stem
+ return apt_key('add', output_file=name, data=key, hardened=hardened)
except subp.ProcessExecutionError:
LOG.exception("failed to add apt GPG Key to apt keyring")
raise
-def add_apt_key(ent, target=None):
+def add_apt_key(ent, target=None, hardened=False, file_name=None):
"""
Add key to the system as defined in ent (if any).
Supports raw keys or keyid's
@@ -741,7 +752,10 @@ def add_apt_key(ent, target=None):
ent['key'] = gpg.getkeybyid(ent['keyid'], keyserver)
if 'key' in ent:
- add_apt_key_raw(ent['key'], target)
+ return add_apt_key_raw(
+ ent['key'],
+ file_name or ent['filename'],
+ hardened=hardened)
def update_packages(cloud):
@@ -751,9 +765,28 @@ def update_packages(cloud):
def add_apt_sources(srcdict, cloud, target=None, template_params=None,
aa_repo_match=None):
"""
- add entries in /etc/apt/sources.list.d for each abbreviated
- sources.list entry in 'srcdict'. When rendering template, also
- include the values in dictionary searchList
+ install keys and repo source .list files defined in 'sources'
+
+ for each 'source' entry in the config:
+ 1. expand template variables and write source .list file in
+ /etc/apt/sources.list.d/
+ 2. install defined keys
+ 3. update packages via distro-specific method (i.e. apt-key update)
+
+
+ @param srcdict: a dict containing elements required
+ @param cloud: cloud instance object
+
+ Example srcdict value:
+ {
+ 'rio-grande-repo': {
+ 'source': 'deb [signed-by=$KEY_FILE] $MIRROR $RELEASE main',
+ 'keyid': 'B59D 5F15 97A5 04B7 E230 6DCA 0620 BBCF 0368 3F77',
+ 'keyserver': 'pgp.mit.edu'
+ }
+ }
+
+ Note: Deb822 format is not supported
"""
if template_params is None:
template_params = {}
@@ -770,7 +803,11 @@ def add_apt_sources(srcdict, cloud, target=None, template_params=None,
if 'filename' not in ent:
ent['filename'] = filename
- add_apt_key(ent, target)
+ if 'source' in ent and '$KEY_FILE' in ent['source']:
+ key_file = add_apt_key(ent, target, hardened=True)
+ template_params['KEY_FILE'] = key_file
+ else:
+ key_file = add_apt_key(ent, target)
if 'source' not in ent:
continue
@@ -1006,7 +1043,7 @@ def get_arch_mirrorconfig(cfg, mirrortype, arch):
# select the specification matching the target arch
default = None
for mirror_cfg_elem in mirror_cfg_list:
- arches = mirror_cfg_elem.get("arches")
+ arches = mirror_cfg_elem.get("arches") or []
if arch in arches:
return mirror_cfg_elem
if "default" in arches:
@@ -1089,6 +1126,81 @@ def apply_apt_config(cfg, proxy_fname, config_fname):
LOG.debug("no apt config configured, removed %s", config_fname)
+def apt_key(command, output_file=None, data=None, hardened=False,
+ human_output=True):
+ """apt-key replacement
+
+ commands implemented: 'add', 'list', 'finger'
+
+ @param output_file: name of output gpg file (without .gpg or .asc)
+ @param data: key contents
+ @param human_output: list keys formatted for human parsing
+ @param hardened: write keys to to /etc/apt/cloud-init.gpg.d/ (referred to
+ with [signed-by] in sources file)
+ """
+
+ def _get_key_files():
+ """return all apt keys
+
+ /etc/apt/trusted.gpg (if it exists) and all keyfiles (and symlinks to
+ keyfiles) in /etc/apt/trusted.gpg.d/ are returned
+
+ based on apt-key implementation
+ """
+ key_files = [APT_LOCAL_KEYS] if os.path.isfile(APT_LOCAL_KEYS) else []
+
+ for file in os.listdir(APT_TRUSTED_GPG_DIR):
+ if file.endswith('.gpg') or file.endswith('.asc'):
+ key_files.append(APT_TRUSTED_GPG_DIR + file)
+ return key_files if key_files else ''
+
+ def apt_key_add():
+ """apt-key add <file>
+
+ returns filepath to new keyring, or '/dev/null' when an error occurs
+ """
+ file_name = '/dev/null'
+ if not output_file:
+ util.logexc(
+ LOG, 'Unknown filename, failed to add key: "{}"'.format(data))
+ else:
+ try:
+ key_dir = \
+ CLOUD_INIT_GPG_DIR if hardened else APT_TRUSTED_GPG_DIR
+ stdout = gpg.dearmor(data)
+ file_name = '{}{}.gpg'.format(key_dir, output_file)
+ util.write_file(file_name, stdout)
+ except subp.ProcessExecutionError:
+ util.logexc(LOG, 'Gpg error, failed to add key: {}'.format(
+ data))
+ except UnicodeDecodeError:
+ util.logexc(LOG, 'Decode error, failed to add key: {}'.format(
+ data))
+ return file_name
+
+ def apt_key_list():
+ """apt-key list
+
+ returns string of all trusted keys (in /etc/apt/trusted.gpg and
+ /etc/apt/trusted.gpg.d/)
+ """
+ key_list = []
+ for key_file in _get_key_files():
+ try:
+ key_list.append(gpg.list(key_file, human_output=human_output))
+ except subp.ProcessExecutionError as error:
+ LOG.warning('Failed to list key "%s": %s', key_file, error)
+ return '\n'.join(key_list)
+
+ if command == 'add':
+ return apt_key_add()
+ elif command == 'finger' or command == 'list':
+ return apt_key_list()
+ else:
+ raise ValueError(
+ 'apt_key() commands add, list, and finger are currently supported')
+
+
CONFIG_CLEANERS = {
'cloud-init': clean_cloud_init,
}
diff --git a/cloudinit/config/cc_disable_ec2_metadata.py b/cloudinit/config/cc_disable_ec2_metadata.py
index dff93245..61c769b3 100644
--- a/cloudinit/config/cc_disable_ec2_metadata.py
+++ b/cloudinit/config/cc_disable_ec2_metadata.py
@@ -17,7 +17,7 @@ by default.
**Internal name:** ``cc_disable_ec2_metadata``
-**Module frequency:** per always
+**Module frequency:** always
**Supported distros:** all
diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py
index 3ec49ca5..440f05f1 100644
--- a/cloudinit/config/cc_disk_setup.py
+++ b/cloudinit/config/cc_disk_setup.py
@@ -50,15 +50,18 @@ filesystem on may be specified either as a path or as an alias in the format
``<alias name>.<y>`` where ``<y>`` denotes the partition number on the device.
The partition can also be specified by setting ``partition`` to the desired
partition number. The ``partition`` option may also be set to ``auto``, in
-which this module will search for the existance of a filesystem matching the
+which this module will search for the existence of a filesystem matching the
``label``, ``type`` and ``device`` of the ``fs_setup`` entry and will skip
creating the filesystem if one is found. The ``partition`` option may also be
set to ``any``, in which case any file system that matches ``type`` and
``device`` will cause this module to skip filesystem creation for the
``fs_setup`` entry, regardless of ``label`` matching or not. To write a
-filesystem directly to a device, use ``partition: none``. A label can be
-specified for the filesystem using ``label``, and the filesystem type can be
-specified using ``filesystem``.
+filesystem directly to a device, use ``partition: none``. ``partition: none``
+will **always** write the filesystem, even when the ``label`` and
+``filesystem`` are matched, and ``overwrite`` is ``false``.
+
+A label can be specified for the filesystem using
+``label``, and the filesystem type can be specified using ``filesystem``.
.. note::
If specifying device using the ``<device name>.<partition number>`` format,
diff --git a/cloudinit/config/cc_emit_upstart.py b/cloudinit/config/cc_emit_upstart.py
index b1d99f97..40eee052 100644
--- a/cloudinit/config/cc_emit_upstart.py
+++ b/cloudinit/config/cc_emit_upstart.py
@@ -16,7 +16,7 @@ user configuration should be required.
**Internal name:** ``cc_emit_upstart``
-**Module frequency:** per always
+**Module frequency:** always
**Supported distros:** ubuntu, debian
"""
diff --git a/cloudinit/config/cc_fan.py b/cloudinit/config/cc_fan.py
index 77984bca..91f50e22 100644
--- a/cloudinit/config/cc_fan.py
+++ b/cloudinit/config/cc_fan.py
@@ -52,35 +52,26 @@ BUILTIN_CFG = {
}
-def stop_update_start(service, config_file, content, systemd=False):
- if systemd:
- cmds = {'stop': ['systemctl', 'stop', service],
- 'start': ['systemctl', 'start', service],
- 'enable': ['systemctl', 'enable', service]}
- else:
- cmds = {'stop': ['service', 'stop'],
- 'start': ['service', 'start']}
-
- def run(cmd, msg):
- try:
- return subp.subp(cmd, capture=True)
- except subp.ProcessExecutionError as e:
- LOG.warning("failed: %s (%s): %s", service, cmd, e)
- return False
-
- stop_failed = not run(cmds['stop'], msg='stop %s' % service)
+def stop_update_start(distro, service, config_file, content):
+ try:
+ distro.manage_service('stop', service)
+ stop_failed = False
+ except subp.ProcessExecutionError as e:
+ stop_failed = True
+ LOG.warning("failed to stop %s: %s", service, e)
+
if not content.endswith('\n'):
content += '\n'
util.write_file(config_file, content, omode="w")
- ret = run(cmds['start'], msg='start %s' % service)
- if ret and stop_failed:
- LOG.warning("success: %s started", service)
-
- if 'enable' in cmds:
- ret = run(cmds['enable'], msg='enable %s' % service)
+ try:
+ distro.manage_service('start', service)
+ if stop_failed:
+ LOG.warning("success: %s started", service)
+ except subp.ProcessExecutionError as e:
+ LOG.warning("failed to start %s: %s", service, e)
- return ret
+ distro.manage_service('enable', service)
def handle(name, cfg, cloud, log, args):
@@ -99,7 +90,8 @@ def handle(name, cfg, cloud, log, args):
distro.install_packages(['ubuntu-fan'])
stop_update_start(
+ distro,
service='ubuntu-fan', config_file=mycfg.get('config_path'),
- content=mycfg.get('config'), systemd=distro.uses_systemd())
+ content=mycfg.get('config'))
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_final_message.py b/cloudinit/config/cc_final_message.py
index 3441f7a9..4fa5297e 100644
--- a/cloudinit/config/cc_final_message.py
+++ b/cloudinit/config/cc_final_message.py
@@ -21,7 +21,7 @@ specified as a jinja template with the following variables set:
**Internal name:** ``cc_final_message``
-**Module frequency:** per always
+**Module frequency:** always
**Supported distros:** all
diff --git a/cloudinit/config/cc_growpart.py b/cloudinit/config/cc_growpart.py
index 9f5525a1..1ddc9dc7 100644
--- a/cloudinit/config/cc_growpart.py
+++ b/cloudinit/config/cc_growpart.py
@@ -50,7 +50,7 @@ growpart is::
**Internal name:** ``cc_growpart``
-**Module frequency:** per always
+**Module frequency:** always
**Supported distros:** all
@@ -72,6 +72,7 @@ import stat
from cloudinit import log as logging
from cloudinit.settings import PER_ALWAYS
from cloudinit import subp
+from cloudinit import temp_utils
from cloudinit import util
frequency = PER_ALWAYS
@@ -142,21 +143,32 @@ class ResizeGrowPart(object):
return False
def resize(self, diskdev, partnum, partdev):
+ myenv = os.environ.copy()
+ myenv['LANG'] = 'C'
before = get_size(partdev)
- try:
- subp.subp(["growpart", '--dry-run', diskdev, partnum])
- except subp.ProcessExecutionError as e:
- if e.exit_code != 1:
- util.logexc(LOG, "Failed growpart --dry-run for (%s, %s)",
- diskdev, partnum)
- raise ResizeFailedException(e) from e
- return (before, before)
- try:
- subp.subp(["growpart", diskdev, partnum])
- except subp.ProcessExecutionError as e:
- util.logexc(LOG, "Failed: growpart %s %s", diskdev, partnum)
- raise ResizeFailedException(e) from e
+ # growpart uses tmp dir to store intermediate states
+ # and may conflict with systemd-tmpfiles-clean
+ with temp_utils.tempdir(needs_exe=True) as tmpd:
+ growpart_tmp = os.path.join(tmpd, "growpart")
+ if not os.path.exists(growpart_tmp):
+ os.mkdir(growpart_tmp, 0o700)
+ myenv['TMPDIR'] = growpart_tmp
+ try:
+ subp.subp(["growpart", '--dry-run', diskdev, partnum],
+ env=myenv)
+ except subp.ProcessExecutionError as e:
+ if e.exit_code != 1:
+ util.logexc(LOG, "Failed growpart --dry-run for (%s, %s)",
+ diskdev, partnum)
+ raise ResizeFailedException(e) from e
+ return (before, before)
+
+ try:
+ subp.subp(["growpart", diskdev, partnum], env=myenv)
+ except subp.ProcessExecutionError as e:
+ util.logexc(LOG, "Failed: growpart %s %s", diskdev, partnum)
+ raise ResizeFailedException(e) from e
return (before, get_size(partdev))
diff --git a/cloudinit/config/cc_install_hotplug.py b/cloudinit/config/cc_install_hotplug.py
new file mode 100644
index 00000000..da98c409
--- /dev/null
+++ b/cloudinit/config/cc_install_hotplug.py
@@ -0,0 +1,140 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+"""Install hotplug udev rules if supported and enabled"""
+import os
+from textwrap import dedent
+
+from cloudinit import util
+from cloudinit import subp
+from cloudinit import stages
+from cloudinit.config.schema import get_schema_doc, validate_cloudconfig_schema
+from cloudinit.distros import ALL_DISTROS
+from cloudinit.event import EventType, EventScope
+from cloudinit.settings import PER_INSTANCE
+
+
+frequency = PER_INSTANCE
+distros = [ALL_DISTROS]
+
+schema = {
+ "id": "cc_install_hotplug",
+ "name": "Install Hotplug",
+ "title": "Install hotplug if supported and enabled",
+ "description": dedent("""\
+ This module will install the udev rules to enable hotplug if
+ supported by the datasource and enabled in the userdata. The udev
+ rules will be installed as
+ ``/etc/udev/rules.d/10-cloud-init-hook-hotplug.rules``.
+
+ When hotplug is enabled, newly added network devices will be added
+ to the system by cloud-init. After udev detects the event,
+ cloud-init will referesh the instance metadata from the datasource,
+ detect the device in the updated metadata, then apply the updated
+ network configuration.
+
+ Currently supported datasources: Openstack, EC2
+ """),
+ "distros": distros,
+ "examples": [
+ dedent("""\
+ # Enable hotplug of network devices
+ updates:
+ network:
+ when: ["hotplug"]
+ """),
+ dedent("""\
+ # Enable network hotplug alongside boot event
+ updates:
+ network:
+ when: ["boot", "hotplug"]
+ """),
+ ],
+ "frequency": frequency,
+ "type": "object",
+ "properties": {
+ "updates": {
+ "type": "object",
+ "additionalProperties": False,
+ "properties": {
+ "network": {
+ "type": "object",
+ "required": ["when"],
+ "additionalProperties": False,
+ "properties": {
+ "when": {
+ "type": "array",
+ "additionalProperties": False,
+ "items": {
+ "type": "string",
+ "additionalProperties": False,
+ "enum": [
+ "boot-new-instance",
+ "boot-legacy",
+ "boot",
+ "hotplug",
+ ]
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+__doc__ = get_schema_doc(schema)
+
+
+HOTPLUG_UDEV_PATH = "/etc/udev/rules.d/10-cloud-init-hook-hotplug.rules"
+HOTPLUG_UDEV_RULES_TEMPLATE = """\
+# Installed by cloud-init due to network hotplug userdata
+ACTION!="add|remove", GOTO="cloudinit_end"
+LABEL="cloudinit_hook"
+SUBSYSTEM=="net", RUN+="{libexecdir}/hook-hotplug"
+LABEL="cloudinit_end"
+"""
+
+
+def handle(_name, cfg, cloud, log, _args):
+ validate_cloudconfig_schema(cfg, schema)
+ network_hotplug_enabled = (
+ 'updates' in cfg and
+ 'network' in cfg['updates'] and
+ 'when' in cfg['updates']['network'] and
+ 'hotplug' in cfg['updates']['network']['when']
+ )
+ hotplug_supported = EventType.HOTPLUG in (
+ cloud.datasource.get_supported_events(
+ [EventType.HOTPLUG]).get(EventScope.NETWORK, set())
+ )
+ hotplug_enabled = stages.update_event_enabled(
+ datasource=cloud.datasource,
+ cfg=cfg,
+ event_source_type=EventType.HOTPLUG,
+ scope=EventScope.NETWORK,
+ )
+ if not (hotplug_supported and hotplug_enabled):
+ if os.path.exists(HOTPLUG_UDEV_PATH):
+ log.debug("Uninstalling hotplug, not enabled")
+ util.del_file(HOTPLUG_UDEV_PATH)
+ subp.subp(["udevadm", "control", "--reload-rules"])
+ elif network_hotplug_enabled:
+ log.warning(
+ "Hotplug is unsupported by current datasource. "
+ "Udev rules will NOT be installed."
+ )
+ else:
+ log.debug("Skipping hotplug install, not enabled")
+ return
+ if not subp.which("udevadm"):
+ log.debug("Skipping hotplug install, udevadm not found")
+ return
+
+ # This may need to turn into a distro property at some point
+ libexecdir = "/usr/libexec/cloud-init"
+ if not os.path.exists(libexecdir):
+ libexecdir = "/usr/lib/cloud-init"
+ util.write_file(
+ filename=HOTPLUG_UDEV_PATH,
+ content=HOTPLUG_UDEV_RULES_TEMPLATE.format(libexecdir=libexecdir),
+ )
+ subp.subp(["udevadm", "control", "--reload-rules"])
diff --git a/cloudinit/config/cc_migrator.py b/cloudinit/config/cc_migrator.py
index 3995704a..79bcc27d 100644
--- a/cloudinit/config/cc_migrator.py
+++ b/cloudinit/config/cc_migrator.py
@@ -17,7 +17,7 @@ false`` in config.
**Internal name:** ``cc_migrator``
-**Module frequency:** per always
+**Module frequency:** always
**Supported distros:** all
diff --git a/cloudinit/config/cc_ntp.py b/cloudinit/config/cc_ntp.py
index 7c371a49..c3aee798 100644
--- a/cloudinit/config/cc_ntp.py
+++ b/cloudinit/config/cc_ntp.py
@@ -24,9 +24,9 @@ LOG = logging.getLogger(__name__)
frequency = PER_INSTANCE
NTP_CONF = '/etc/ntp.conf'
NR_POOL_SERVERS = 4
-distros = ['almalinux', 'alpine', 'centos', 'debian', 'eurolinux', 'fedora',
- 'opensuse', 'photon', 'rhel', 'rocky', 'sles', 'ubuntu',
- 'virtuozzo']
+distros = ['almalinux', 'alpine', 'centos', 'cloudlinux', 'debian',
+ 'eurolinux', 'fedora', 'openEuler', 'opensuse', 'photon',
+ 'rhel', 'rocky', 'sles', 'ubuntu', 'virtuozzo']
NTP_CLIENT_CONFIG = {
'chrony': {
@@ -473,21 +473,6 @@ def write_ntp_config_template(distro_name, service_name=None, servers=None,
util.del_file(template_fn)
-def reload_ntp(service, systemd=False):
- """Restart or reload an ntp system service.
-
- @param service: A string specifying the name of the service to be affected.
- @param systemd: A boolean indicating if the distro uses systemd, defaults
- to False.
- @returns: A tuple of stdout, stderr results from executing the action.
- """
- if systemd:
- cmd = ['systemctl', 'reload-or-restart', service]
- else:
- cmd = ['service', service, 'restart']
- subp.subp(cmd, capture=True)
-
-
def supplemental_schema_validation(ntp_config):
"""Validate user-provided ntp:config option values.
@@ -596,10 +581,11 @@ def handle(name, cfg, cloud, log, _args):
packages=ntp_client_config['packages'],
check_exe=ntp_client_config['check_exe'])
try:
- reload_ntp(ntp_client_config['service_name'],
- systemd=cloud.distro.uses_systemd())
+ cloud.distro.manage_service('reload',
+ ntp_client_config.get('service_name'))
except subp.ProcessExecutionError as e:
LOG.exception("Failed to reload/start ntp service: %s", e)
raise
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_puppet.py b/cloudinit/config/cc_puppet.py
index a0779eb0..dc20fc44 100644
--- a/cloudinit/config/cc_puppet.py
+++ b/cloudinit/config/cc_puppet.py
@@ -59,10 +59,13 @@ Additionally it's possible to create a ``csr_attributes.yaml`` file for CSR
attributes and certificate extension requests.
See https://puppet.com/docs/puppet/latest/config_file_csr_attributes.html
-The puppet service will be automatically enabled after installation. A manual
-run can also be triggered by setting ``exec`` to ``true``, and additional
-arguments can be passed to ``puppet agent`` via the ``exec_args`` key (by
-default the agent will execute with the ``--test`` flag).
+By default, the puppet service will be automatically enabled after installation
+and set to automatically start on boot. To override this in favor of manual
+puppet execution set ``start_service`` to ``false``.
+
+A single manual run can be triggered by setting ``exec`` to ``true``, and
+additional arguments can be passed to ``puppet agent`` via the ``exec_args``
+key (by default the agent will execute with the ``--test`` flag).
**Internal name:** ``cc_puppet``
@@ -85,6 +88,7 @@ default the agent will execute with the ``--test`` flag).
package_name: 'puppet'
exec: <true/false>
exec_args: ['--test']
+ start_service: <true/false>
conf:
agent:
server: "puppetserver.example.org"
@@ -197,6 +201,9 @@ def handle(name, cfg, cloud, log, _args):
puppet_cfg, 'install_type', 'packages')
cleanup = util.get_cfg_option_bool(puppet_cfg, 'cleanup', True)
run = util.get_cfg_option_bool(puppet_cfg, 'exec', default=False)
+ start_puppetd = util.get_cfg_option_bool(puppet_cfg,
+ 'start_service',
+ default=True)
aio_install_url = util.get_cfg_option_str(
puppet_cfg, 'aio_install_url', default=AIO_INSTALL_URL)
@@ -291,7 +298,8 @@ def handle(name, cfg, cloud, log, _args):
default_flow_style=False))
# Set it up so it autostarts
- _autostart_puppet(log)
+ if start_puppetd:
+ _autostart_puppet(log)
# Run the agent if needed
if run:
@@ -312,7 +320,8 @@ def handle(name, cfg, cloud, log, _args):
cmd.extend(PUPPET_AGENT_DEFAULT_ARGS)
subp.subp(cmd, capture=False)
- # Start puppetd
- subp.subp(['service', 'puppet', 'start'], capture=False)
+ if start_puppetd:
+ # Start puppetd
+ subp.subp(['service', 'puppet', 'start'], capture=False)
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_refresh_rmc_and_interface.py b/cloudinit/config/cc_refresh_rmc_and_interface.py
index 146758ad..d5e0ecb2 100644
--- a/cloudinit/config/cc_refresh_rmc_and_interface.py
+++ b/cloudinit/config/cc_refresh_rmc_and_interface.py
@@ -28,7 +28,7 @@ This module handles
**Internal name:** ``cc_refresh_rmc_and_interface``
-**Module frequency:** per always
+**Module frequency:** always
**Supported distros:** RHEL
diff --git a/cloudinit/config/cc_rsyslog.py b/cloudinit/config/cc_rsyslog.py
index 2a2bc931..dd2bbd00 100644
--- a/cloudinit/config/cc_rsyslog.py
+++ b/cloudinit/config/cc_rsyslog.py
@@ -207,16 +207,11 @@ HOST_PORT_RE = re.compile(
r'([:](?P<port>[0-9]+))?$')
-def reload_syslog(command=DEF_RELOAD, systemd=False):
- service = 'rsyslog'
+def reload_syslog(distro, command=DEF_RELOAD):
if command == DEF_RELOAD:
- if systemd:
- cmd = ['systemctl', 'reload-or-try-restart', service]
- else:
- cmd = ['service', service, 'restart']
- else:
- cmd = command
- subp.subp(cmd, capture=True)
+ service = distro.get_option('rsyslog_svcname', 'rsyslog')
+ return distro.manage_service('try-reload', service)
+ return subp.subp(command, capture=True)
def load_config(cfg):
@@ -429,9 +424,7 @@ def handle(name, cfg, cloud, log, _args):
return
try:
- restarted = reload_syslog(
- command=mycfg[KEYNAME_RELOAD],
- systemd=cloud.distro.uses_systemd()),
+ restarted = reload_syslog(cloud.distro, command=mycfg[KEYNAME_RELOAD])
except subp.ProcessExecutionError as e:
restarted = False
log.warning("Failed to reload syslog", e)
diff --git a/cloudinit/config/cc_runcmd.py b/cloudinit/config/cc_runcmd.py
index 1f75d6c5..15960c7d 100644
--- a/cloudinit/config/cc_runcmd.py
+++ b/cloudinit/config/cc_runcmd.py
@@ -65,7 +65,8 @@ schema = {
'items': {
'oneOf': [
{'type': 'array', 'items': {'type': 'string'}},
- {'type': 'string'}]
+ {'type': 'string'},
+ {'type': 'null'}]
},
'additionalItems': False, # Reject items of non-string non-list
'additionalProperties': False,
@@ -90,7 +91,7 @@ def handle(name, cfg, cloud, log, _args):
try:
content = util.shellify(cmd)
util.write_file(out_fn, content, 0o700)
- except Exception:
- util.logexc(log, "Failed to shellify %s into file %s", cmd, out_fn)
+ except Exception as e:
+ raise type(e)('Failed to shellify {} into file {}'.format(cmd, out_fn))
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_scripts_per_boot.py b/cloudinit/config/cc_scripts_per_boot.py
index 1e3f419e..1bf3f508 100644
--- a/cloudinit/config/cc_scripts_per_boot.py
+++ b/cloudinit/config/cc_scripts_per_boot.py
@@ -17,7 +17,7 @@ module does not accept any config keys.
**Internal name:** ``cc_scripts_per_boot``
-**Module frequency:** per always
+**Module frequency:** always
**Supported distros:** all
"""
diff --git a/cloudinit/config/cc_set_hostname.py b/cloudinit/config/cc_set_hostname.py
index 5a59dc32..a96bcc18 100644
--- a/cloudinit/config/cc_set_hostname.py
+++ b/cloudinit/config/cc_set_hostname.py
@@ -34,7 +34,7 @@ based on initial hostname.
**Internal name:** ``cc_set_hostname``
-**Module frequency:** per always
+**Module frequency:** always
**Supported distros:** all
diff --git a/cloudinit/config/cc_set_passwords.py b/cloudinit/config/cc_set_passwords.py
index 433de751..3843aaf7 100755
--- a/cloudinit/config/cc_set_passwords.py
+++ b/cloudinit/config/cc_set_passwords.py
@@ -94,18 +94,15 @@ PW_SET = (''.join([x for x in ascii_letters + digits
if x not in 'loLOI01']))
-def handle_ssh_pwauth(pw_auth, service_cmd=None, service_name="ssh"):
+def handle_ssh_pwauth(pw_auth, distro):
"""Apply sshd PasswordAuthentication changes.
@param pw_auth: config setting from 'pw_auth'.
Best given as True, False, or "unchanged".
- @param service_cmd: The service command list (['service'])
- @param service_name: The name of the sshd service for the system.
+ @param distro: an instance of the distro class for the target distribution
@return: None"""
cfg_name = "PasswordAuthentication"
- if service_cmd is None:
- service_cmd = ["service"]
if util.is_true(pw_auth):
cfg_val = 'yes'
@@ -124,16 +121,12 @@ def handle_ssh_pwauth(pw_auth, service_cmd=None, service_name="ssh"):
LOG.debug("No need to restart SSH service, %s not updated.", cfg_name)
return
- if 'systemctl' in service_cmd:
- cmd = list(service_cmd) + ["restart", service_name]
- else:
- cmd = list(service_cmd) + [service_name, "restart"]
- subp.subp(cmd)
+ distro.manage_service('restart', distro.get_option('ssh_svcname', 'ssh'))
LOG.debug("Restarted the SSH daemon.")
def handle(_name, cfg, cloud, log, args):
- if len(args) != 0:
+ if args:
# if run from command line, and give args, wipe the chpasswd['list']
password = args[0]
if 'chpasswd' in cfg and 'list' in cfg['chpasswd']:
@@ -229,9 +222,7 @@ def handle(_name, cfg, cloud, log, args):
if expired_users:
log.debug("Expired passwords for: %s users", expired_users)
- handle_ssh_pwauth(
- cfg.get('ssh_pwauth'), service_cmd=cloud.distro.init_cmd,
- service_name=cloud.distro.get_option('ssh_svcname', 'ssh'))
+ handle_ssh_pwauth(cfg.get('ssh_pwauth'), cloud.distro)
if len(errors):
log.debug("%s errors occured, re-raising the last one", len(errors))
diff --git a/cloudinit/config/cc_ssh.py b/cloudinit/config/cc_ssh.py
index 05a16dbc..1053ab67 100755
--- a/cloudinit/config/cc_ssh.py
+++ b/cloudinit/config/cc_ssh.py
@@ -17,7 +17,7 @@ keys.
Authorized Keys
^^^^^^^^^^^^^^^
-Authorized keys are a list of public SSH keys that are allowed to connect to a
+Authorized keys are a list of public SSH keys that are allowed to connect to
a user account on a system. They are stored in `.ssh/authorized_keys` in that
account's home directory. Authorized keys for the default user defined in
``users`` can be specified using ``ssh_authorized_keys``. Keys
@@ -89,6 +89,10 @@ optionally, ``<key type>_certificate``, e.g. ``rsa_private: <key>``,
key types. Not all key types have to be specified, ones left unspecified will
not be used. If this config option is used, then no keys will be generated.
+When host keys are generated the output of the ssh-keygen command(s) can be
+displayed on the console using the ``ssh_quiet_keygen`` configuration key.
+This settings defaults to False which displays the keygen output.
+
.. note::
when specifying private host keys in cloud-config, care should be taken to
ensure that the communication between the data source and the instance is
@@ -151,6 +155,7 @@ config flags are:
ssh_publish_hostkeys:
enabled: <true/false> (Defaults to true)
blacklist: <list of key types> (Defaults to [dsa])
+ ssh_quiet_keygen: <true/false>
"""
import glob
@@ -239,7 +244,16 @@ def handle(_name, cfg, cloud, log, _args):
with util.SeLinuxGuard("/etc/ssh", recursive=True):
try:
out, err = subp.subp(cmd, capture=True, env=lang_c)
- sys.stdout.write(util.decode_binary(out))
+ if not util.get_cfg_option_bool(cfg, 'ssh_quiet_keygen',
+ False):
+ sys.stdout.write(util.decode_binary(out))
+
+ gid = util.get_group_id("ssh_keys")
+ if gid != -1:
+ # perform same "sanitize permissions" as sshd-keygen
+ os.chown(keyfile, -1, gid)
+ os.chmod(keyfile, 0o640)
+ os.chmod(keyfile + ".pub", 0o644)
except subp.ProcessExecutionError as e:
err = util.decode_binary(e.stderr).lower()
if (e.exit_code == 1 and
diff --git a/cloudinit/config/cc_update_etc_hosts.py b/cloudinit/config/cc_update_etc_hosts.py
index 03fffb96..32368bbb 100644
--- a/cloudinit/config/cc_update_etc_hosts.py
+++ b/cloudinit/config/cc_update_etc_hosts.py
@@ -9,27 +9,28 @@
"""
Update Etc Hosts
----------------
-**Summary:** update ``/etc/hosts``
+**Summary:** update the hosts file (usually ``/etc/hosts``)
-This module will update the contents of ``/etc/hosts`` based on the
-hostname/fqdn specified in config. Management of ``/etc/hosts`` is controlled
-using ``manage_etc_hosts``. If this is set to false, cloud-init will not manage
-``/etc/hosts`` at all. This is the default behavior.
+This module will update the contents of the local hosts database (hosts file;
+usually ``/etc/hosts``) based on the hostname/fqdn specified in config.
+Management of the hosts file is controlled using ``manage_etc_hosts``. If this
+is set to false, cloud-init will not manage the hosts file at all. This is the
+default behavior.
-If set to ``true`` or ``template``, cloud-init will generate ``/etc/hosts``
+If set to ``true`` or ``template``, cloud-init will generate the hosts file
using the template located in ``/etc/cloud/templates/hosts.tmpl``. In the
``/etc/cloud/templates/hosts.tmpl`` template, the strings ``$hostname`` and
``$fqdn`` will be replaced with the hostname and fqdn respectively.
If ``manage_etc_hosts`` is set to ``localhost``, then cloud-init will not
-rewrite ``/etc/hosts`` entirely, but rather will ensure that a entry for the
-fqdn with a distribution dependent ip is present in ``/etc/hosts`` (i.e.
-``ping <hostname>`` will ping ``127.0.0.1`` or ``127.0.1.1`` or other ip).
+rewrite the hosts file entirely, but rather will ensure that a entry for the
+fqdn with a distribution dependent ip is present (i.e. ``ping <hostname>`` will
+ping ``127.0.0.1`` or ``127.0.1.1`` or other ip).
.. note::
if ``manage_etc_hosts`` is set ``true`` or ``template``, the contents
- of ``/etc/hosts`` will be updated every boot. to make any changes to
- ``/etc/hosts`` persistant they must be made in
+ of the hosts file will be updated every boot. To make any changes to
+ the hosts file persistent they must be made in
``/etc/cloud/templates/hosts.tmpl``
.. note::
@@ -38,7 +39,7 @@ fqdn with a distribution dependent ip is present in ``/etc/hosts`` (i.e.
**Internal name:** ``cc_update_etc_hosts``
-**Module frequency:** per always
+**Module frequency:** always
**Supported distros:** all
@@ -59,6 +60,9 @@ frequency = PER_ALWAYS
def handle(name, cfg, cloud, log, _args):
manage_hosts = util.get_cfg_option_str(cfg, "manage_etc_hosts", False)
+
+ hosts_fn = cloud.distro.hosts_fn
+
if util.translate_bool(manage_hosts, addons=['template']):
(hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud)
if not hostname:
@@ -74,7 +78,7 @@ def handle(name, cfg, cloud, log, _args):
" found for distro %s") %
(cloud.distro.osfamily))
- templater.render_to_file(tpl_fn_name, '/etc/hosts',
+ templater.render_to_file(tpl_fn_name, hosts_fn,
{'hostname': hostname, 'fqdn': fqdn})
elif manage_hosts == "localhost":
@@ -84,10 +88,10 @@ def handle(name, cfg, cloud, log, _args):
" but no hostname was found"))
return
- log.debug("Managing localhost in /etc/hosts")
+ log.debug("Managing localhost in %s", hosts_fn)
cloud.distro.update_etc_hosts(hostname, fqdn)
else:
log.debug(("Configuration option 'manage_etc_hosts' is not set,"
- " not managing /etc/hosts in module %s"), name)
+ " not managing %s in module %s"), hosts_fn, name)
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_update_hostname.py b/cloudinit/config/cc_update_hostname.py
index f4120356..370de73a 100644
--- a/cloudinit/config/cc_update_hostname.py
+++ b/cloudinit/config/cc_update_hostname.py
@@ -20,7 +20,7 @@ is set, then the hostname will not be altered.
**Internal name:** ``cc_update_hostname``
-**Module frequency:** per always
+**Module frequency:** always
**Supported distros:** all
diff --git a/cloudinit/config/cc_write_files.py b/cloudinit/config/cc_write_files.py
index 8601e707..41c75fa2 100644
--- a/cloudinit/config/cc_write_files.py
+++ b/cloudinit/config/cc_write_files.py
@@ -21,6 +21,7 @@ frequency = PER_INSTANCE
DEFAULT_OWNER = "root:root"
DEFAULT_PERMS = 0o644
+DEFAULT_DEFER = False
UNKNOWN_ENC = 'text/plain'
LOG = logging.getLogger(__name__)
@@ -90,6 +91,24 @@ schema = {
# Create an empty file on the system
write_files:
- path: /root/CLOUD_INIT_WAS_HERE
+ """),
+ dedent("""\
+ # Defer writing the file until after the package (Nginx) is
+ # installed and its user is created alongside
+ write_files:
+ - path: /etc/nginx/conf.d/example.com.conf
+ content: |
+ server {
+ server_name example.com;
+ listen 80;
+ root /var/www;
+ location / {
+ try_files $uri $uri/ $uri.html =404;
+ }
+ }
+ owner: 'nginx:nginx'
+ permissions: '0640'
+ defer: true
""")],
'frequency': frequency,
'type': 'object',
@@ -151,6 +170,15 @@ schema = {
``path`` exists. Default: **false**.
"""),
},
+ 'defer': {
+ 'type': 'boolean',
+ 'default': DEFAULT_DEFER,
+ 'description': dedent("""\
+ Defer writing the file until 'final' stage, after
+ users were created, and packages were installed.
+ Default: **{defer}**.
+ """.format(defer=DEFAULT_DEFER)),
+ },
},
'required': ['path'],
'additionalProperties': False
@@ -163,13 +191,18 @@ __doc__ = get_schema_doc(schema) # Supplement python help()
def handle(name, cfg, _cloud, log, _args):
- files = cfg.get('write_files')
- if not files:
+ validate_cloudconfig_schema(cfg, schema)
+ file_list = cfg.get('write_files', [])
+ filtered_files = [
+ f for f in file_list if not util.get_cfg_option_bool(f,
+ 'defer',
+ DEFAULT_DEFER)
+ ]
+ if not filtered_files:
log.debug(("Skipping module named %s,"
" no/empty 'write_files' key in configuration"), name)
return
- validate_cloudconfig_schema(cfg, schema)
- write_files(name, files)
+ write_files(name, filtered_files)
def canonicalize_extraction(encoding_type):
diff --git a/cloudinit/config/cc_write_files_deferred.py b/cloudinit/config/cc_write_files_deferred.py
new file mode 100644
index 00000000..0c75aa22
--- /dev/null
+++ b/cloudinit/config/cc_write_files_deferred.py
@@ -0,0 +1,55 @@
+# Copyright (C) 2021 Canonical Ltd.
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+"""Defer writing certain files"""
+
+from textwrap import dedent
+
+from cloudinit.config.schema import validate_cloudconfig_schema
+from cloudinit import util
+from cloudinit.config.cc_write_files import (
+ schema as write_files_schema, write_files, DEFAULT_DEFER)
+
+
+schema = util.mergemanydict([
+ {
+ 'id': 'cc_write_files_deferred',
+ 'name': 'Write Deferred Files',
+ 'title': dedent("""\
+ write certain files, whose creation as been deferred, during
+ final stage
+ """),
+ 'description': dedent("""\
+ This module is based on `'Write Files' <write-files>`__, and
+ will handle all files from the write_files list, that have been
+ marked as deferred and thus are not being processed by the
+ write-files module.
+
+ *Please note that his module is not exposed to the user through
+ its own dedicated top-level directive.*
+ """)
+ },
+ write_files_schema
+])
+
+# Not exposed, because related modules should document this behaviour
+__doc__ = None
+
+
+def handle(name, cfg, _cloud, log, _args):
+ validate_cloudconfig_schema(cfg, schema)
+ file_list = cfg.get('write_files', [])
+ filtered_files = [
+ f for f in file_list if util.get_cfg_option_bool(f,
+ 'defer',
+ DEFAULT_DEFER)
+ ]
+ if not filtered_files:
+ log.debug(("Skipping module named %s,"
+ " no deferred file defined in configuration"), name)
+ return
+ write_files(name, filtered_files)
+
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_yum_add_repo.py b/cloudinit/config/cc_yum_add_repo.py
index b7a48dcc..d66d3ae4 100644
--- a/cloudinit/config/cc_yum_add_repo.py
+++ b/cloudinit/config/cc_yum_add_repo.py
@@ -16,10 +16,10 @@ entry, the config entry will be skipped.
**Internal name:** ``cc_yum_add_repo``
-**Module frequency:** per always
+**Module frequency:** always
-**Supported distros:** almalinux, centos, eurolinux, fedora, photon, rhel,
- rocky, virtuozzo
+**Supported distros:** almalinux, centos, cloudlinux, eurolinux, fedora,
+ openEuler, photon, rhel, rocky, virtuozzo
**Config keys**::
@@ -37,8 +37,8 @@ from configparser import ConfigParser
from cloudinit import util
-distros = ['almalinux', 'centos', 'eurolinux', 'fedora', 'photon', 'rhel',
- 'rocky', 'virtuozzo']
+distros = ['almalinux', 'centos', 'cloudlinux', 'eurolinux', 'fedora',
+ 'openEuler', 'photon', 'rhel', 'rocky', 'virtuozzo']
def _canonicalize_id(repo_id):
diff --git a/cloudinit/config/tests/test_resolv_conf.py b/cloudinit/config/tests/test_resolv_conf.py
index 45a06c22..aff110e5 100644
--- a/cloudinit/config/tests/test_resolv_conf.py
+++ b/cloudinit/config/tests/test_resolv_conf.py
@@ -2,7 +2,7 @@ import pytest
from unittest import mock
from cloudinit.config.cc_resolv_conf import generate_resolv_conf
-from tests.unittests.test_distros.test_create_users import MyBaseDistro
+from tests.unittests.util import TestingDistro
EXPECTED_HEADER = """\
# Your system has been configured with 'manage-resolv-conf' set to true.
@@ -14,7 +14,7 @@ EXPECTED_HEADER = """\
class TestGenerateResolvConf:
- dist = MyBaseDistro()
+ dist = TestingDistro()
tmpl_fn = "templates/resolv.conf.tmpl"
@mock.patch("cloudinit.config.cc_resolv_conf.templater.render_to_file")
diff --git a/cloudinit/config/tests/test_set_passwords.py b/cloudinit/config/tests/test_set_passwords.py
index bbe2ee8f..79118a12 100644
--- a/cloudinit/config/tests/test_set_passwords.py
+++ b/cloudinit/config/tests/test_set_passwords.py
@@ -14,57 +14,52 @@ class TestHandleSshPwauth(CiTestCase):
with_logs = True
- @mock.patch(MODPATH + "subp.subp")
+ @mock.patch("cloudinit.distros.subp.subp")
def test_unknown_value_logs_warning(self, m_subp):
- setpass.handle_ssh_pwauth("floo")
+ cloud = self.tmp_cloud(distro='ubuntu')
+ setpass.handle_ssh_pwauth("floo", cloud.distro)
self.assertIn("Unrecognized value: ssh_pwauth=floo",
self.logs.getvalue())
m_subp.assert_not_called()
@mock.patch(MODPATH + "update_ssh_config", return_value=True)
- @mock.patch(MODPATH + "subp.subp")
+ @mock.patch("cloudinit.distros.subp.subp")
def test_systemctl_as_service_cmd(self, m_subp, m_update_ssh_config):
"""If systemctl in service cmd: systemctl restart name."""
- setpass.handle_ssh_pwauth(
- True, service_cmd=["systemctl"], service_name="myssh")
- self.assertEqual(mock.call(["systemctl", "restart", "myssh"]),
- m_subp.call_args)
-
- @mock.patch(MODPATH + "update_ssh_config", return_value=True)
- @mock.patch(MODPATH + "subp.subp")
- def test_service_as_service_cmd(self, m_subp, m_update_ssh_config):
- """If systemctl in service cmd: systemctl restart name."""
- setpass.handle_ssh_pwauth(
- True, service_cmd=["service"], service_name="myssh")
- self.assertEqual(mock.call(["service", "myssh", "restart"]),
- m_subp.call_args)
+ cloud = self.tmp_cloud(distro='ubuntu')
+ cloud.distro.init_cmd = ['systemctl']
+ setpass.handle_ssh_pwauth(True, cloud.distro)
+ m_subp.assert_called_with(
+ ["systemctl", "restart", "ssh"], capture=True)
@mock.patch(MODPATH + "update_ssh_config", return_value=False)
- @mock.patch(MODPATH + "subp.subp")
+ @mock.patch("cloudinit.distros.subp.subp")
def test_not_restarted_if_not_updated(self, m_subp, m_update_ssh_config):
"""If config is not updated, then no system restart should be done."""
- setpass.handle_ssh_pwauth(True)
+ cloud = self.tmp_cloud(distro='ubuntu')
+ setpass.handle_ssh_pwauth(True, cloud.distro)
m_subp.assert_not_called()
self.assertIn("No need to restart SSH", self.logs.getvalue())
@mock.patch(MODPATH + "update_ssh_config", return_value=True)
- @mock.patch(MODPATH + "subp.subp")
+ @mock.patch("cloudinit.distros.subp.subp")
def test_unchanged_does_nothing(self, m_subp, m_update_ssh_config):
"""If 'unchanged', then no updates to config and no restart."""
- setpass.handle_ssh_pwauth(
- "unchanged", service_cmd=["systemctl"], service_name="myssh")
+ cloud = self.tmp_cloud(distro='ubuntu')
+ setpass.handle_ssh_pwauth("unchanged", cloud.distro)
m_update_ssh_config.assert_not_called()
m_subp.assert_not_called()
- @mock.patch(MODPATH + "subp.subp")
+ @mock.patch("cloudinit.distros.subp.subp")
def test_valid_change_values(self, m_subp):
"""If value is a valid changen value, then update should be called."""
+ cloud = self.tmp_cloud(distro='ubuntu')
upname = MODPATH + "update_ssh_config"
optname = "PasswordAuthentication"
for value in util.FALSE_STRINGS + util.TRUE_STRINGS:
optval = "yes" if value in util.TRUE_STRINGS else "no"
with mock.patch(upname, return_value=False) as m_update:
- setpass.handle_ssh_pwauth(value)
+ setpass.handle_ssh_pwauth(value, cloud.distro)
m_update.assert_called_with({optname: optval})
m_subp.assert_not_called()
diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py
index a634623a..cf6aad14 100755
--- a/cloudinit/distros/__init__.py
+++ b/cloudinit/distros/__init__.py
@@ -16,7 +16,7 @@ import stat
import string
import urllib.parse
from io import StringIO
-from typing import Any, Mapping
+from typing import Any, Mapping # noqa: F401
from cloudinit import importer
from cloudinit import log as logging
@@ -49,8 +49,8 @@ OSFAMILIES = {
'debian': ['debian', 'ubuntu'],
'freebsd': ['freebsd'],
'gentoo': ['gentoo'],
- 'redhat': ['almalinux', 'amazon', 'centos', 'eurolinux', 'fedora',
- 'photon', 'rhel', 'rocky', 'virtuozzo'],
+ 'redhat': ['almalinux', 'amazon', 'centos', 'cloudlinux', 'eurolinux',
+ 'fedora', 'openEuler', 'photon', 'rhel', 'rocky', 'virtuozzo'],
'suse': ['opensuse', 'sles'],
}
@@ -227,8 +227,11 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta):
# Now try to bring them up
if bring_up:
+ LOG.debug('Bringing up newly configured network interfaces')
network_activator = activators.select_activator()
network_activator.bring_up_all_interfaces(network_state)
+ else:
+ LOG.debug("Not bringing up newly configured network interfaces")
return False
def apply_network_config_names(self, netconfig):
@@ -795,6 +798,34 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta):
args.append(message)
return args
+ def manage_service(self, action, service):
+ """
+ Perform the requested action on a service. This handles the common
+ 'systemctl' and 'service' cases and may be overridden in subclasses
+ as necessary.
+ May raise ProcessExecutionError
+ """
+ init_cmd = self.init_cmd
+ if self.uses_systemd() or 'systemctl' in init_cmd:
+ init_cmd = ['systemctl']
+ cmds = {'stop': ['stop', service],
+ 'start': ['start', service],
+ 'enable': ['enable', service],
+ 'restart': ['restart', service],
+ 'reload': ['reload-or-restart', service],
+ 'try-reload': ['reload-or-try-restart', service],
+ }
+ else:
+ cmds = {'stop': [service, 'stop'],
+ 'start': [service, 'start'],
+ 'enable': [service, 'start'],
+ 'restart': [service, 'restart'],
+ 'reload': [service, 'restart'],
+ 'try-reload': [service, 'restart'],
+ }
+ cmd = list(init_cmd) + list(cmds[action])
+ return subp.subp(cmd, capture=True)
+
def _apply_hostname_transformations_to_url(url: str, transformations: list):
"""
@@ -848,7 +879,7 @@ def _sanitize_mirror_url(url: str):
* Converts it to its IDN form (see below for details)
* Replaces any non-Letters/Digits/Hyphen (LDH) characters in it with
hyphens
- * TODO: Remove any leading/trailing hyphens from each domain name label
+ * Removes any leading/trailing hyphens from each domain name label
Before we replace any invalid domain name characters, we first need to
ensure that any valid non-ASCII characters in the hostname will not be
diff --git a/tests/cloud_tests/testcases/bugs/__init__.py b/cloudinit/distros/cloudlinux.py
index c6452f9c..edb3165d 100644
--- a/tests/cloud_tests/testcases/bugs/__init__.py
+++ b/cloudinit/distros/cloudlinux.py
@@ -1,8 +1,9 @@
# This file is part of cloud-init. See LICENSE file for license information.
-"""Test verifiers for cloud-init bugs.
+from cloudinit.distros import rhel
-See configs/bugs/README.md for more information
-"""
+
+class Distro(rhel.Distro):
+ pass
# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/examples/__init__.py b/cloudinit/distros/openEuler.py
index 39af88c2..edb3165d 100644
--- a/tests/cloud_tests/testcases/examples/__init__.py
+++ b/cloudinit/distros/openEuler.py
@@ -1,8 +1,9 @@
# This file is part of cloud-init. See LICENSE file for license information.
-"""Test verifiers for cloud-init examples.
+from cloudinit.distros import rhel
-See configs/examples/README.md for more information
-"""
+
+class Distro(rhel.Distro):
+ pass
# vi: ts=4 expandtab
diff --git a/cloudinit/distros/ug_util.py b/cloudinit/distros/ug_util.py
index 08446a95..600b743f 100755
--- a/cloudinit/distros/ug_util.py
+++ b/cloudinit/distros/ug_util.py
@@ -16,77 +16,59 @@ from cloudinit import util
LOG = logging.getLogger(__name__)
-# Normalizes a input group configuration
-# which can be a comma seperated list of
-# group names, or a list of group names
-# or a python dictionary of group names
-# to a list of members of that group.
+# Normalizes an input group configuration which can be:
+# Comma seperated string or a list or a dictionary
#
-# The output is a dictionary of group
-# names => members of that group which
-# is the standard form used in the rest
-# of cloud-init
+# Returns dictionary of group names => members of that group which is the
+# standard form used in the rest of cloud-init
def _normalize_groups(grp_cfg):
if isinstance(grp_cfg, str):
- grp_cfg = grp_cfg.strip().split(",")
+ grp_cfg = grp_cfg.strip().split(',')
+
if isinstance(grp_cfg, list):
c_grp_cfg = {}
for i in grp_cfg:
if isinstance(i, dict):
for k, v in i.items():
- if k not in c_grp_cfg:
- if isinstance(v, list):
- c_grp_cfg[k] = list(v)
- elif isinstance(v, str):
- c_grp_cfg[k] = [v]
- else:
- raise TypeError("Bad group member type %s" %
- type_utils.obj_name(v))
+ if not isinstance(v, (list, str)):
+ raise TypeError('Bad group member type %s'
+ % (type_utils.obj_name(v)))
+
+ if isinstance(v, list):
+ c_grp_cfg.setdefault(k, []).extend(v)
else:
- if isinstance(v, list):
- c_grp_cfg[k].extend(v)
- elif isinstance(v, str):
- c_grp_cfg[k].append(v)
- else:
- raise TypeError("Bad group member type %s" %
- type_utils.obj_name(v))
+ c_grp_cfg.setdefault(k, []).append(v)
elif isinstance(i, str):
if i not in c_grp_cfg:
c_grp_cfg[i] = []
else:
- raise TypeError("Unknown group name type %s" %
- type_utils.obj_name(i))
+ raise TypeError('Unknown group name type %s'
+ % (type_utils.obj_name(i)))
grp_cfg = c_grp_cfg
+
groups = {}
if isinstance(grp_cfg, dict):
- for (grp_name, grp_members) in grp_cfg.items():
+ for grp_name, grp_members in grp_cfg.items():
groups[grp_name] = util.uniq_merge_sorted(grp_members)
else:
- raise TypeError(("Group config must be list, dict "
- " or string types only and not %s") %
- type_utils.obj_name(grp_cfg))
+ raise TypeError(('Group config must be list, dict or string type only '
+ 'but found %s') % (type_utils.obj_name(grp_cfg)))
return groups
-# Normalizes a input group configuration
-# which can be a comma seperated list of
-# user names, or a list of string user names
-# or a list of dictionaries with components
-# that define the user config + 'name' (if
-# a 'name' field does not exist then the
-# default user is assumed to 'own' that
-# configuration.
+# Normalizes an input group configuration which can be: a list or a dictionary
#
-# The output is a dictionary of user
-# names => user config which is the standard
-# form used in the rest of cloud-init. Note
-# the default user will have a special config
-# entry 'default' which will be marked as true
-# all other users will be marked as false.
+# components that define the user config + 'name' (if a 'name' field does not
+# exist then the default user is assumed to 'own' that configuration.)
+#
+# Returns a dictionary of user names => user config which is the standard form
+# used in the rest of cloud-init. Note the default user will have a special
+# config entry 'default' which will be marked true and all other users will be
+# marked false.
def _normalize_users(u_cfg, def_user_cfg=None):
if isinstance(u_cfg, dict):
ad_ucfg = []
- for (k, v) in u_cfg.items():
+ for k, v in u_cfg.items():
if isinstance(v, (bool, int, float, str)):
if util.is_true(v):
ad_ucfg.append(str(k))
@@ -94,8 +76,8 @@ def _normalize_users(u_cfg, def_user_cfg=None):
v['name'] = k
ad_ucfg.append(v)
else:
- raise TypeError(("Unmappable user value type %s"
- " for key %s") % (type_utils.obj_name(v), k))
+ raise TypeError(('Unmappable user value type %s for key %s')
+ % (type_utils.obj_name(v), k))
u_cfg = ad_ucfg
elif isinstance(u_cfg, str):
u_cfg = util.uniq_merge_sorted(u_cfg)
@@ -107,181 +89,147 @@ def _normalize_users(u_cfg, def_user_cfg=None):
if u and u not in users:
users[u] = {}
elif isinstance(user_config, dict):
- if 'name' in user_config:
- n = user_config.pop('name')
- prev_config = users.get(n) or {}
- users[n] = util.mergemanydict([prev_config,
- user_config])
- else:
- # Assume the default user then
- prev_config = users.get('default') or {}
- users['default'] = util.mergemanydict([prev_config,
- user_config])
+ n = user_config.pop('name', 'default')
+ prev_config = users.get(n) or {}
+ users[n] = util.mergemanydict([prev_config, user_config])
else:
- raise TypeError(("User config must be dictionary/list "
- " or string types only and not %s") %
- type_utils.obj_name(user_config))
+ raise TypeError(('User config must be dictionary/list or string '
+ ' types only and not %s')
+ % (type_utils.obj_name(user_config)))
# Ensure user options are in the right python friendly format
if users:
c_users = {}
- for (uname, uconfig) in users.items():
+ for uname, uconfig in users.items():
c_uconfig = {}
- for (k, v) in uconfig.items():
+ for k, v in uconfig.items():
k = k.replace('-', '_').strip()
if k:
c_uconfig[k] = v
c_users[uname] = c_uconfig
users = c_users
- # Fixup the default user into the real
- # default user name and replace it...
+ # Fix the default user into the actual default user name and replace it.
def_user = None
if users and 'default' in users:
def_config = users.pop('default')
if def_user_cfg:
- # Pickup what the default 'real name' is
- # and any groups that are provided by the
- # default config
+ # Pickup what the default 'real name' is and any groups that are
+ # provided by the default config
def_user_cfg = def_user_cfg.copy()
def_user = def_user_cfg.pop('name')
def_groups = def_user_cfg.pop('groups', [])
- # Pickup any config + groups for that user name
- # that we may have previously extracted
+ # Pick any config + groups for the user name that we may have
+ # extracted previously
parsed_config = users.pop(def_user, {})
parsed_groups = parsed_config.get('groups', [])
- # Now merge our extracted groups with
- # anything the default config provided
+ # Now merge the extracted groups with the default config provided
users_groups = util.uniq_merge_sorted(parsed_groups, def_groups)
- parsed_config['groups'] = ",".join(users_groups)
- # The real config for the default user is the
- # combination of the default user config provided
- # by the distro, the default user config provided
- # by the above merging for the user 'default' and
- # then the parsed config from the user's 'real name'
- # which does not have to be 'default' (but could be)
- users[def_user] = util.mergemanydict([def_user_cfg,
- def_config,
+ parsed_config['groups'] = ','.join(users_groups)
+ # The real config for the default user is the combination of the
+ # default user config provided by the distro, the default user
+ # config provided by the above merging for the user 'default' and
+ # then the parsed config from the user's 'real name' which does not
+ # have to be 'default' (but could be)
+ users[def_user] = util.mergemanydict([def_user_cfg, def_config,
parsed_config])
- # Ensure that only the default user that we
- # found (if any) is actually marked as being
- # the default user
- if users:
- for (uname, uconfig) in users.items():
- if def_user and uname == def_user:
- uconfig['default'] = True
- else:
- uconfig['default'] = False
+ # Ensure that only the default user that we found (if any) is actually
+ # marked as the default user
+ for uname, uconfig in users.items():
+ uconfig['default'] = (uname == def_user if def_user else False)
return users
-# Normalizes a set of user/users and group
-# dictionary configuration into a useable
-# format that the rest of cloud-init can
-# understand using the default user
-# provided by the input distrobution (if any)
-# to allow for mapping of the 'default' user.
+# Normalizes a set of user/users and group dictionary configuration into an
+# usable format so that the rest of cloud-init can understand using the default
+# user provided by the input distribution (if any) to allow mapping of the
+# 'default' user.
#
# Output is a dictionary of group names -> [member] (list)
# and a dictionary of user names -> user configuration (dict)
#
-# If 'user' exists it will override
-# the 'users'[0] entry (if a list) otherwise it will
-# just become an entry in the returned dictionary (no override)
+# If 'user' exists, it will override
+# The 'users'[0] entry (if a list) otherwise it will just become an entry in
+# the returned dictionary (no override)
def normalize_users_groups(cfg, distro):
if not cfg:
cfg = {}
- users = {}
- groups = {}
- if 'groups' in cfg:
- groups = _normalize_groups(cfg['groups'])
-
# Handle the previous style of doing this where the first user
# overrides the concept of the default user if provided in the user: XYZ
# format.
old_user = {}
if 'user' in cfg and cfg['user']:
old_user = cfg['user']
- # Translate it into the format that is more useful
- # going forward
+ # Translate it into a format that will be more useful going forward
if isinstance(old_user, str):
- old_user = {
- 'name': old_user,
- }
- if not isinstance(old_user, dict):
+ old_user = {'name': old_user}
+ elif not isinstance(old_user, dict):
LOG.warning(("Format for 'user' key must be a string or dictionary"
" and not %s"), type_utils.obj_name(old_user))
old_user = {}
- # If no old user format, then assume the distro
- # provides what the 'default' user maps to, but notice
- # that if this is provided, we won't automatically inject
- # a 'default' user into the users list, while if a old user
- # format is provided we will.
+ # If no old user format, then assume the distro provides what the 'default'
+ # user maps to, but notice that if this is provided, we won't automatically
+ # inject a 'default' user into the users list, while if an old user format
+ # is provided we will.
distro_user_config = {}
try:
distro_user_config = distro.get_default_user()
except NotImplementedError:
- LOG.warning(("Distro has not implemented default user "
- "access. No distribution provided default user"
- " will be normalized."))
+ LOG.warning(('Distro has not implemented default user access. No '
+ 'distribution provided default user will be normalized.'))
- # Merge the old user (which may just be an empty dict when not
- # present with the distro provided default user configuration so
- # that the old user style picks up all the distribution specific
- # attributes (if any)
+ # Merge the old user (which may just be an empty dict when not present)
+ # with the distro provided default user configuration so that the old user
+ # style picks up all the distribution specific attributes (if any)
default_user_config = util.mergemanydict([old_user, distro_user_config])
base_users = cfg.get('users', [])
if not isinstance(base_users, (list, dict, str)):
LOG.warning(("Format for 'users' key must be a comma separated string"
- " or a dictionary or a list and not %s"),
+ " or a dictionary or a list but found %s"),
type_utils.obj_name(base_users))
base_users = []
if old_user:
- # Ensure that when user: is provided that this user
- # always gets added (as the default user)
+ # When 'user:' is provided, it should be made as the default user
if isinstance(base_users, list):
- # Just add it on at the end...
base_users.append({'name': 'default'})
elif isinstance(base_users, dict):
base_users['default'] = dict(base_users).get('default', True)
elif isinstance(base_users, str):
- # Just append it on to be re-parsed later
- base_users += ",default"
+ base_users += ',default'
+
+ groups = {}
+ if 'groups' in cfg:
+ groups = _normalize_groups(cfg['groups'])
users = _normalize_users(base_users, default_user_config)
return (users, groups)
-# Given a user dictionary config it will
-# extract the default user name and user config
-# from that list and return that tuple or
-# return (None, None) if no default user is
-# found in the given input
+# Given a user dictionary config, extract the default user name and user config
+# and return them or return (None, None) if no default user is found
def extract_default(users, default_name=None, default_config=None):
if not users:
- users = {}
+ return (default_name, default_config)
def safe_find(entry):
config = entry[1]
if not config or 'default' not in config:
return False
- else:
- return config['default']
+ return config['default']
- tmp_users = users.items()
- tmp_users = dict(filter(safe_find, tmp_users))
+ tmp_users = dict(filter(safe_find, users.items()))
if not tmp_users:
return (default_name, default_config)
- else:
- name = list(tmp_users)[0]
- config = tmp_users[name]
- config.pop('default', None)
- return (name, config)
+
+ name = list(tmp_users)[0]
+ config = tmp_users[name]
+ config.pop('default', None)
+ return (name, config)
# vi: ts=4 expandtab
diff --git a/cloudinit/dmi.py b/cloudinit/dmi.py
index f0e69a5a..bba3daf2 100644
--- a/cloudinit/dmi.py
+++ b/cloudinit/dmi.py
@@ -156,8 +156,8 @@ def read_dmi_data(key):
if dmidecode_path:
return _call_dmidecode(key, dmidecode_path)
- LOG.warning("did not find either path %s or dmidecode command",
- DMI_SYS_PATH)
+ LOG.debug("did not find either path %s or dmidecode command",
+ DMI_SYS_PATH)
return None
# vi: ts=4 expandtab
diff --git a/cloudinit/gpg.py b/cloudinit/gpg.py
index 3780326c..07d682d2 100644
--- a/cloudinit/gpg.py
+++ b/cloudinit/gpg.py
@@ -14,6 +14,9 @@ import time
LOG = logging.getLogger(__name__)
+GPG_LIST = ['gpg', '--with-fingerprint', '--no-default-keyring', '--list-keys',
+ '--keyring']
+
def export_armour(key):
"""Export gpg key, armoured key gets returned"""
@@ -27,6 +30,33 @@ def export_armour(key):
return armour
+def dearmor(key):
+ """Dearmor gpg key, dearmored key gets returned
+
+ note: man gpg(1) makes no mention of an --armour spelling, only --armor
+ """
+ return subp.subp(["gpg", "--dearmor"], data=key, decode=False)[0]
+
+
+def list(key_file, human_output=False):
+ """List keys from a keyring with fingerprints. Default to a stable machine
+ parseable format.
+
+ @param key_file: a string containing a filepath to a key
+ @param human_output: return output intended for human parsing
+ """
+ cmd = []
+ cmd.extend(GPG_LIST)
+ if not human_output:
+ cmd.append('--with-colons')
+
+ cmd.append(key_file)
+ (stdout, stderr) = subp.subp(cmd, capture=True)
+ if stderr:
+ LOG.warning('Failed to export armoured key "%s": %s', key_file, stderr)
+ return stdout
+
+
def recv_key(key, keyserver, retries=(1, 1)):
"""Receive gpg key from the specified keyserver.
diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py
index 017c50c5..7558745f 100644
--- a/cloudinit/net/__init__.py
+++ b/cloudinit/net/__init__.py
@@ -11,6 +11,7 @@ import ipaddress
import logging
import os
import re
+from typing import Any, Dict
from cloudinit import subp
from cloudinit import util
@@ -971,18 +972,33 @@ def get_ib_hwaddrs_by_interface():
return ret
-def has_url_connectivity(url):
- """Return true when the instance has access to the provided URL
+def has_url_connectivity(url_data: Dict[str, Any]) -> bool:
+ """Return true when the instance has access to the provided URL.
Logs a warning if url is not the expected format.
+
+ url_data is a dictionary of kwargs to send to readurl. E.g.:
+
+ has_url_connectivity({
+ "url": "http://example.invalid",
+ "headers": {"some": "header"},
+ "timeout": 10
+ })
"""
+ if 'url' not in url_data:
+ LOG.warning(
+ "Ignoring connectivity check. No 'url' to check in %s", url_data)
+ return False
+ url = url_data['url']
if not any([url.startswith('http://'), url.startswith('https://')]):
LOG.warning(
"Ignoring connectivity check. Expected URL beginning with http*://"
" received '%s'", url)
return False
+ if 'timeout' not in url_data:
+ url_data['timeout'] = 5
try:
- readurl(url, timeout=5)
+ readurl(**url_data)
except UrlError:
return False
return True
@@ -1025,14 +1041,15 @@ class EphemeralIPv4Network(object):
No operations are performed if the provided interface already has the
specified configuration.
- This can be verified with the connectivity_url.
+ This can be verified with the connectivity_url_data.
If unconnected, bring up the interface with valid ip, prefix and broadcast.
If router is provided setup a default route for that interface. Upon
context exit, clean up the interface leaving no configuration behind.
"""
def __init__(self, interface, ip, prefix_or_mask, broadcast, router=None,
- connectivity_url=None, static_routes=None):
+ connectivity_url_data: Dict[str, Any] = None,
+ static_routes=None):
"""Setup context manager and validate call signature.
@param interface: Name of the network interface to bring up.
@@ -1041,7 +1058,7 @@ class EphemeralIPv4Network(object):
prefix.
@param broadcast: Broadcast address for the IPv4 network.
@param router: Optionally the default gateway IP.
- @param connectivity_url: Optionally, a URL to verify if a usable
+ @param connectivity_url_data: Optionally, a URL to verify if a usable
connection already exists.
@param static_routes: Optionally a list of static routes from DHCP
"""
@@ -1056,7 +1073,7 @@ class EphemeralIPv4Network(object):
'Cannot setup network: {0}'.format(e)
) from e
- self.connectivity_url = connectivity_url
+ self.connectivity_url_data = connectivity_url_data
self.interface = interface
self.ip = ip
self.broadcast = broadcast
@@ -1066,11 +1083,11 @@ class EphemeralIPv4Network(object):
def __enter__(self):
"""Perform ephemeral network setup if interface is not connected."""
- if self.connectivity_url:
- if has_url_connectivity(self.connectivity_url):
+ if self.connectivity_url_data:
+ if has_url_connectivity(self.connectivity_url_data):
LOG.debug(
'Skip ephemeral network setup, instance has connectivity'
- ' to %s', self.connectivity_url)
+ ' to %s', self.connectivity_url_data['url'])
return
self._bringup_device()
diff --git a/cloudinit/net/dhcp.py b/cloudinit/net/dhcp.py
index 9b94c9a0..3f4b0418 100644
--- a/cloudinit/net/dhcp.py
+++ b/cloudinit/net/dhcp.py
@@ -4,6 +4,7 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
+from typing import Dict, Any
import configobj
import logging
import os
@@ -38,21 +39,26 @@ class NoDHCPLeaseError(Exception):
class EphemeralDHCPv4(object):
- def __init__(self, iface=None, connectivity_url=None, dhcp_log_func=None):
+ def __init__(
+ self,
+ iface=None,
+ connectivity_url_data: Dict[str, Any] = None,
+ dhcp_log_func=None
+ ):
self.iface = iface
self._ephipv4 = None
self.lease = None
self.dhcp_log_func = dhcp_log_func
- self.connectivity_url = connectivity_url
+ self.connectivity_url_data = connectivity_url_data
def __enter__(self):
"""Setup sandboxed dhcp context, unless connectivity_url can already be
reached."""
- if self.connectivity_url:
- if has_url_connectivity(self.connectivity_url):
+ if self.connectivity_url_data:
+ if has_url_connectivity(self.connectivity_url_data):
LOG.debug(
'Skip ephemeral DHCP setup, instance has connectivity'
- ' to %s', self.connectivity_url)
+ ' to %s', self.connectivity_url_data)
return
return self.obtain_lease()
@@ -104,8 +110,8 @@ class EphemeralDHCPv4(object):
if kwargs['static_routes']:
kwargs['static_routes'] = (
parse_static_routes(kwargs['static_routes']))
- if self.connectivity_url:
- kwargs['connectivity_url'] = self.connectivity_url
+ if self.connectivity_url_data:
+ kwargs['connectivity_url_data'] = self.connectivity_url_data
ephipv4 = EphemeralIPv4Network(**kwargs)
ephipv4.__enter__()
self._ephipv4 = ephipv4
diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py
index 95b064f0..4862bf91 100644
--- a/cloudinit/net/network_state.py
+++ b/cloudinit/net/network_state.py
@@ -710,6 +710,10 @@ class NetworkStateInterpreter(metaclass=CommandHandlerMeta):
def _v2_common(self, cfg):
LOG.debug('v2_common: handling config:\n%s', cfg)
for iface, dev_cfg in cfg.items():
+ if 'set-name' in dev_cfg:
+ set_name_iface = dev_cfg.get('set-name')
+ if set_name_iface:
+ iface = set_name_iface
if 'nameservers' in dev_cfg:
search = dev_cfg.get('nameservers').get('search', [])
dns = dev_cfg.get('nameservers').get('addresses', [])
diff --git a/cloudinit/net/networkd.py b/cloudinit/net/networkd.py
index a311572f..ee6fd2ad 100644
--- a/cloudinit/net/networkd.py
+++ b/cloudinit/net/networkd.py
@@ -160,6 +160,10 @@ class Renderer(renderer.Renderer):
cfg.update_section(sec, 'DHCP', dhcp)
+ if (dhcp in ['ipv6', 'yes'] and
+ isinstance(iface.get('accept-ra', ''), bool)):
+ cfg.update_section(sec, 'IPv6AcceptRA', iface['accept-ra'])
+
# This is to accommodate extra keys present in VMware config
def dhcp_domain(self, d, cfg):
for item in ['dhcp4domain', 'dhcp6domain']:
@@ -247,7 +251,7 @@ class Renderer(renderer.Renderer):
def available(target=None):
expected = ['ip', 'systemctl']
- search = ['/usr/bin', '/bin']
+ search = ['/usr/sbin', '/bin']
for p in expected:
if not subp.which(p, search=search, target=target):
return False
diff --git a/cloudinit/net/renderer.py b/cloudinit/net/renderer.py
index 27447bc2..54a83b51 100644
--- a/cloudinit/net/renderer.py
+++ b/cloudinit/net/renderer.py
@@ -8,8 +8,8 @@
import abc
import io
-from .network_state import parse_net_config_data
-from .udev import generate_udev_rule
+from cloudinit.net.network_state import parse_net_config_data
+from cloudinit.net.udev import generate_udev_rule
def filter_by_type(match_type):
diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py
index 06f7255e..ef4543b4 100644
--- a/cloudinit/net/sysconfig.py
+++ b/cloudinit/net/sysconfig.py
@@ -18,8 +18,8 @@ from .network_state import (
is_ipv6_addr, net_prefix_to_ipv4_mask, subnet_is_ipv6, IPV6_DYNAMIC_TYPES)
LOG = logging.getLogger(__name__)
-KNOWN_DISTROS = ['almalinux', 'centos', 'eurolinux', 'fedora', 'rhel', 'rocky',
- 'suse', 'virtuozzo']
+KNOWN_DISTROS = ['almalinux', 'centos', 'cloudlinux', 'eurolinux', 'fedora',
+ 'openEuler', 'rhel', 'rocky', 'suse', 'virtuozzo']
NM_CFG_FILE = "/etc/NetworkManager/NetworkManager.conf"
diff --git a/cloudinit/net/tests/test_dhcp.py b/cloudinit/net/tests/test_dhcp.py
index 5ae048e2..28b4ecf7 100644
--- a/cloudinit/net/tests/test_dhcp.py
+++ b/cloudinit/net/tests/test_dhcp.py
@@ -617,7 +617,9 @@ class TestEphemeralDhcpNoNetworkSetup(HttprettyTestCase):
url = 'http://example.org/index.html'
httpretty.register_uri(httpretty.GET, url)
- with net.dhcp.EphemeralDHCPv4(connectivity_url=url) as lease:
+ with net.dhcp.EphemeralDHCPv4(
+ connectivity_url_data={'url': url},
+ ) as lease:
self.assertIsNone(lease)
# Ensure that no teardown happens:
m_dhcp.assert_not_called()
@@ -635,7 +637,9 @@ class TestEphemeralDhcpNoNetworkSetup(HttprettyTestCase):
m_subp.return_value = ('', '')
httpretty.register_uri(httpretty.GET, url, body={}, status=404)
- with net.dhcp.EphemeralDHCPv4(connectivity_url=url) as lease:
+ with net.dhcp.EphemeralDHCPv4(
+ connectivity_url_data={'url': url},
+ ) as lease:
self.assertEqual(fake_lease, lease)
# Ensure that dhcp discovery occurs
m_dhcp.called_once_with()
diff --git a/cloudinit/net/tests/test_init.py b/cloudinit/net/tests/test_init.py
index ad9c90ff..f9102f7b 100644
--- a/cloudinit/net/tests/test_init.py
+++ b/cloudinit/net/tests/test_init.py
@@ -622,11 +622,14 @@ class TestEphemeralIPV4Network(CiTestCase):
params = {
'interface': 'eth0', 'ip': '192.168.2.2',
'prefix_or_mask': '255.255.255.0', 'broadcast': '192.168.2.255',
- 'connectivity_url': 'http://example.org/index.html'}
+ 'connectivity_url_data': {'url': 'http://example.org/index.html'}
+ }
with net.EphemeralIPv4Network(**params):
- self.assertEqual([mock.call('http://example.org/index.html',
- timeout=5)], m_readurl.call_args_list)
+ self.assertEqual(
+ [mock.call(url='http://example.org/index.html', timeout=5)],
+ m_readurl.call_args_list
+ )
# Ensure that no teardown happens:
m_subp.assert_has_calls([])
@@ -850,25 +853,28 @@ class TestHasURLConnectivity(HttprettyTestCase):
def test_url_timeout_on_connectivity_check(self, m_readurl):
"""A timeout of 5 seconds is provided when reading a url."""
self.assertTrue(
- net.has_url_connectivity(self.url), 'Expected True on url connect')
+ net.has_url_connectivity({'url': self.url}),
+ 'Expected True on url connect')
def test_true_on_url_connectivity_success(self):
httpretty.register_uri(httpretty.GET, self.url)
self.assertTrue(
- net.has_url_connectivity(self.url), 'Expected True on url connect')
+ net.has_url_connectivity({'url': self.url}),
+ 'Expected True on url connect')
@mock.patch('requests.Session.request')
def test_true_on_url_connectivity_timeout(self, m_request):
"""A timeout raised accessing the url will return False."""
m_request.side_effect = requests.Timeout('Fake Connection Timeout')
self.assertFalse(
- net.has_url_connectivity(self.url),
+ net.has_url_connectivity({'url': self.url}),
'Expected False on url timeout')
def test_true_on_url_connectivity_failure(self):
httpretty.register_uri(httpretty.GET, self.url, body={}, status=404)
self.assertFalse(
- net.has_url_connectivity(self.url), 'Expected False on url fail')
+ net.has_url_connectivity({'url': self.url}),
+ 'Expected False on url fail')
def _mk_v1_phys(mac, name, driver, device_id):
diff --git a/cloudinit/net/tests/test_network_state.py b/cloudinit/net/tests/test_network_state.py
index 84e8308a..45e99171 100644
--- a/cloudinit/net/tests/test_network_state.py
+++ b/cloudinit/net/tests/test_network_state.py
@@ -52,6 +52,7 @@ network:
eth1:
match:
macaddress: '66:77:88:99:00:11'
+ set-name: "ens92"
nameservers:
search: [foo.local, bar.local]
addresses: [4.4.4.4]
diff --git a/cloudinit/reporting/handlers.py b/cloudinit/reporting/handlers.py
index 0a8c7af3..e32739ef 100755
--- a/cloudinit/reporting/handlers.py
+++ b/cloudinit/reporting/handlers.py
@@ -137,8 +137,8 @@ class HyperVKvpReportingHandler(ReportingHandler):
self._event_types = event_types
self.q = queue.Queue()
self.incarnation_no = self._get_incarnation_no()
- self.event_key_prefix = u"{0}|{1}".format(self.EVENT_PREFIX,
- self.incarnation_no)
+ self.event_key_prefix = "{0}|{1}".format(self.EVENT_PREFIX,
+ self.incarnation_no)
self.publish_thread = threading.Thread(
target=self._publish_event_routine
)
@@ -200,9 +200,9 @@ class HyperVKvpReportingHandler(ReportingHandler):
CLOUD_INIT|<incarnation number>|<event_type>|<event_name>|<uuid>
[|subevent_index]
"""
- return u"{0}|{1}|{2}|{3}".format(self.event_key_prefix,
- event.event_type, event.name,
- uuid.uuid4())
+ return "{0}|{1}|{2}|{3}".format(self.event_key_prefix,
+ event.event_type, event.name,
+ uuid.uuid4())
def _encode_kvp_item(self, key, value):
data = struct.pack(
diff --git a/cloudinit/safeyaml.py b/cloudinit/safeyaml.py
index d6f5f95b..b95df27d 100644
--- a/cloudinit/safeyaml.py
+++ b/cloudinit/safeyaml.py
@@ -15,7 +15,7 @@ class _CustomSafeLoader(yaml.SafeLoader):
_CustomSafeLoader.add_constructor(
- u'tag:yaml.org,2002:python/unicode',
+ 'tag:yaml.org,2002:python/unicode',
_CustomSafeLoader.construct_python_unicode)
diff --git a/cloudinit/settings.py b/cloudinit/settings.py
index f69005ea..43c8fa24 100644
--- a/cloudinit/settings.py
+++ b/cloudinit/settings.py
@@ -21,6 +21,7 @@ CFG_BUILTIN = {
'datasource_list': [
'NoCloud',
'ConfigDrive',
+ 'LXD',
'OpenNebula',
'DigitalOcean',
'Azure',
diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
index fddfe363..93493fa0 100755
--- a/cloudinit/sources/DataSourceAzure.py
+++ b/cloudinit/sources/DataSourceAzure.py
@@ -84,8 +84,8 @@ DEFAULT_PROVISIONING_ISO_DEV = '/dev/sr0'
IMDS_TIMEOUT_IN_SECONDS = 2
IMDS_URL = "http://169.254.169.254/metadata"
IMDS_VER_MIN = "2019-06-01"
-IMDS_VER_WANT = "2021-01-01"
-
+IMDS_VER_WANT = "2021-08-01"
+IMDS_EXTENDED_VER_MIN = "2021-03-01"
# This holds SSH key data including if the source was
# from IMDS, as well as the SSH key data itself.
@@ -93,7 +93,7 @@ SSHKeys = namedtuple("SSHKeys", ("keys_from_imds", "ssh_keys"))
class metadata_type(Enum):
- compute = "{}/instance".format(IMDS_URL)
+ all = "{}/instance".format(IMDS_URL)
network = "{}/instance/network".format(IMDS_URL)
reprovisiondata = "{}/reprovisiondata".format(IMDS_URL)
@@ -339,13 +339,10 @@ def temporary_hostname(temp_hostname, cfg, hostname_command='hostname'):
class DataSourceAzure(sources.DataSource):
dsname = 'Azure'
- # Regenerate network config new_instance boot and every boot
default_update_events = {EventScope.NETWORK: {
EventType.BOOT_NEW_INSTANCE,
EventType.BOOT,
- EventType.BOOT_LEGACY
}}
-
_negotiated = False
_metadata_imds = sources.UNSET
_ci_pkl_version = 1
@@ -366,7 +363,9 @@ class DataSourceAzure(sources.DataSource):
def _unpickle(self, ci_pkl_version: int) -> None:
super()._unpickle(ci_pkl_version)
- if "iso_dev" not in self.__dict__:
+ if not hasattr(self, "failed_desired_api_version"):
+ self.failed_desired_api_version = False
+ if not hasattr(self, "iso_dev"):
self.iso_dev = None
def __str__(self):
@@ -495,10 +494,26 @@ class DataSourceAzure(sources.DataSource):
"Found provisioning metadata in %s" % metadata_source,
logger_func=LOG.debug)
- perform_reprovision = reprovision or self._should_reprovision(ret)
+ imds_md = self.get_imds_data_with_api_fallback(
+ self.fallback_interface,
+ retries=10
+ )
+
+ # reset _fallback_interface so that if the code enters reprovisioning
+ # flow, it will force re-evaluation of new fallback nic.
+ self._fallback_interface = None
+
+ if not imds_md and not ovf_is_accessible:
+ msg = 'No OVF or IMDS available'
+ report_diagnostic_event(msg)
+ raise sources.InvalidMetaDataException(msg)
+
+ perform_reprovision = (
+ reprovision or
+ self._should_reprovision(ret, imds_md))
perform_reprovision_after_nic_attach = (
reprovision_after_nic_attach or
- self._should_reprovision_after_nic_attach(ret))
+ self._should_reprovision_after_nic_attach(ret, imds_md))
if perform_reprovision or perform_reprovision_after_nic_attach:
if util.is_FreeBSD():
@@ -508,15 +523,12 @@ class DataSourceAzure(sources.DataSource):
if perform_reprovision_after_nic_attach:
self._wait_for_all_nics_ready()
ret = self._reprovision()
+ # fetch metadata again as it has changed after reprovisioning
+ imds_md = self.get_imds_data_with_api_fallback(
+ self.fallback_interface,
+ retries=10
+ )
- imds_md = self.get_imds_data_with_api_fallback(
- self.fallback_interface,
- retries=10
- )
- if not imds_md and not ovf_is_accessible:
- msg = 'No OVF or IMDS available'
- report_diagnostic_event(msg)
- raise sources.InvalidMetaDataException(msg)
(md, userdata_raw, cfg, files) = ret
self.seed = metadata_source
crawled_data.update({
@@ -692,7 +704,7 @@ class DataSourceAzure(sources.DataSource):
self,
fallback_nic,
retries,
- md_type=metadata_type.compute,
+ md_type=metadata_type.all,
exc_cb=retry_on_url_exc,
infinite=False):
"""
@@ -1317,6 +1329,10 @@ class DataSourceAzure(sources.DataSource):
except UrlError:
# Teardown our EphemeralDHCPv4 context on failure as we retry
self._ephemeral_dhcp_ctx.clean_network()
+
+ # Also reset this flag which determines if we should do dhcp
+ # during retries.
+ is_ephemeral_ctx_present = False
finally:
if nl_sock:
nl_sock.close()
@@ -1404,7 +1420,17 @@ class DataSourceAzure(sources.DataSource):
"connectivity issues: %s" % e, logger_func=LOG.warning)
return False
- def _should_reprovision_after_nic_attach(self, candidate_metadata) -> bool:
+ def _ppstype_from_imds(self, imds_md: dict = None) -> str:
+ try:
+ return imds_md['extended']['compute']['ppsType']
+ except Exception as e:
+ report_diagnostic_event(
+ "Could not retrieve pps configuration from IMDS: %s" %
+ e, logger_func=LOG.debug)
+ return None
+
+ def _should_reprovision_after_nic_attach(
+ self, ovf_md, imds_md=None) -> bool:
"""Whether or not we should wait for nic attach and then poll
IMDS for reprovisioning data. Also sets a marker file to poll IMDS.
@@ -1416,14 +1442,16 @@ class DataSourceAzure(sources.DataSource):
the ISO, thus cloud-init needs to have a way of knowing that it should
jump back into the waiting mode in order to retrieve the ovf_env.
- @param candidate_metadata: Metadata obtained from reading ovf-env.
+ @param ovf_md: Metadata obtained from reading ovf-env.
+ @param imds_md: Metadata obtained from IMDS
@return: Whether to reprovision after waiting for nics to be attached.
"""
- if not candidate_metadata:
+ if not ovf_md:
return False
- (_md, _userdata_raw, cfg, _files) = candidate_metadata
+ (_md, _userdata_raw, cfg, _files) = ovf_md
path = REPROVISION_NIC_ATTACH_MARKER_FILE
if (cfg.get('PreprovisionedVMType', None) == "Savable" or
+ self._ppstype_from_imds(imds_md) == "Savable" or
os.path.isfile(path)):
if not os.path.isfile(path):
LOG.info("Creating a marker file to wait for nic attach: %s",
@@ -1433,7 +1461,7 @@ class DataSourceAzure(sources.DataSource):
return True
return False
- def _should_reprovision(self, ret):
+ def _should_reprovision(self, ovf_md, imds_md=None):
"""Whether or not we should poll IMDS for reprovisioning data.
Also sets a marker file to poll IMDS.
@@ -1444,12 +1472,13 @@ class DataSourceAzure(sources.DataSource):
However, since the VM reports ready to the Fabric, we will not attach
the ISO, thus cloud-init needs to have a way of knowing that it should
jump back into the polling loop in order to retrieve the ovf_env."""
- if not ret:
+ if not ovf_md:
return False
- (_md, _userdata_raw, cfg, _files) = ret
+ (_md, _userdata_raw, cfg, _files) = ovf_md
path = REPROVISION_MARKER_FILE
if (cfg.get('PreprovisionedVm') is True or
- cfg.get('PreprovisionedVMType', None) == 'Running' or
+ cfg.get('PreprovisionedVMType', None) == 'Running' or
+ self._ppstype_from_imds(imds_md) == "Running" or
os.path.isfile(path)):
if not os.path.isfile(path):
LOG.info("Creating a marker file to poll imds: %s",
@@ -2236,7 +2265,7 @@ def _generate_network_config_from_fallback_config() -> dict:
@azure_ds_telemetry_reporter
def get_metadata_from_imds(fallback_nic,
retries,
- md_type=metadata_type.compute,
+ md_type=metadata_type.all,
api_version=IMDS_VER_MIN,
exc_cb=retry_on_url_exc,
infinite=False):
@@ -2277,11 +2306,16 @@ def get_metadata_from_imds(fallback_nic,
def _get_metadata_from_imds(
retries,
exc_cb,
- md_type=metadata_type.compute,
+ md_type=metadata_type.all,
api_version=IMDS_VER_MIN,
infinite=False):
url = "{}?api-version={}".format(md_type.value, api_version)
headers = {"Metadata": "true"}
+
+ # support for extended metadata begins with 2021-03-01
+ if api_version >= IMDS_EXTENDED_VER_MIN and md_type == metadata_type.all:
+ url = url + "&extended=true"
+
try:
response = readurl(
url, timeout=IMDS_TIMEOUT_IN_SECONDS, headers=headers,
diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py
index 54810439..8cb0d5a7 100644
--- a/cloudinit/sources/DataSourceCloudStack.py
+++ b/cloudinit/sources/DataSourceCloudStack.py
@@ -160,7 +160,7 @@ class DataSourceCloudStack(sources.DataSource):
def get_data_server():
# Returns the metadataserver from dns
try:
- addrinfo = getaddrinfo("data-server.", 80)
+ addrinfo = getaddrinfo("data-server", 80)
except gaierror:
LOG.debug("DNS Entry data-server not found")
return None
diff --git a/cloudinit/sources/DataSourceGCE.py b/cloudinit/sources/DataSourceGCE.py
index 746caddb..9f838bd4 100644
--- a/cloudinit/sources/DataSourceGCE.py
+++ b/cloudinit/sources/DataSourceGCE.py
@@ -27,8 +27,10 @@ HEADERS = {'Metadata-Flavor': 'Google'}
class GoogleMetadataFetcher(object):
- def __init__(self, metadata_address):
+ def __init__(self, metadata_address, num_retries, sec_between_retries):
self.metadata_address = metadata_address
+ self.num_retries = num_retries
+ self.sec_between_retries = sec_between_retries
def get_value(self, path, is_text, is_recursive=False):
value = None
@@ -36,7 +38,9 @@ class GoogleMetadataFetcher(object):
url = self.metadata_address + path
if is_recursive:
url += '/?recursive=True'
- resp = url_helper.readurl(url=url, headers=HEADERS)
+ resp = url_helper.readurl(url=url, headers=HEADERS,
+ retries=self.num_retries,
+ sec_between=self.sec_between_retries)
except url_helper.UrlError as exc:
msg = "url %s raised exception %s"
LOG.debug(msg, path, exc)
@@ -68,9 +72,11 @@ class DataSourceGCE(sources.DataSource):
self.metadata_address = self.ds_cfg['metadata_url']
def _get_data(self):
+ url_params = self.get_url_params()
ret = util.log_time(
LOG.debug, 'Crawl of GCE metadata service',
- read_md, kwargs={'address': self.metadata_address})
+ read_md, kwargs={'address': self.metadata_address,
+ 'url_params': url_params})
if not ret['success']:
if ret['platform_reports_gce']:
@@ -176,7 +182,7 @@ def _parse_public_keys(public_keys_data, default_user=None):
return public_keys
-def read_md(address=None, platform_check=True):
+def read_md(address=None, url_params=None, platform_check=True):
if address is None:
address = MD_V1_URL
@@ -203,8 +209,9 @@ def read_md(address=None, platform_check=True):
('instance-data', ('instance/attributes',), False, False, True),
('project-data', ('project/attributes',), False, False, True),
]
-
- metadata_fetcher = GoogleMetadataFetcher(address)
+ metadata_fetcher = GoogleMetadataFetcher(address,
+ url_params.num_retries,
+ url_params.sec_between_retries)
md = {}
# Iterate over url_map keys to get metadata items.
for (mkey, paths, required, is_text, is_recursive) in url_map:
@@ -250,7 +257,7 @@ def read_md(address=None, platform_check=True):
def platform_reports_gce():
pname = dmi.read_dmi_data('system-product-name') or "N/A"
- if pname == "Google Compute Engine":
+ if pname == "Google Compute Engine" or pname == "Google":
return True
# system-product-name is not always guaranteed (LP: #1674861)
diff --git a/cloudinit/sources/DataSourceLXD.py b/cloudinit/sources/DataSourceLXD.py
new file mode 100644
index 00000000..732b32ff
--- /dev/null
+++ b/cloudinit/sources/DataSourceLXD.py
@@ -0,0 +1,358 @@
+
+"""Datasource for LXD, reads /dev/lxd/sock representaton of instance data.
+
+Notes:
+ * This datasource replaces previous NoCloud datasource for LXD.
+ * Older LXD images may not have updates for cloud-init so NoCloud may
+ still be detected on those images.
+ * Detect LXD datasource when /dev/lxd/sock is an active socket file.
+ * Info on dev-lxd API: https://linuxcontainers.org/lxd/docs/master/dev-lxd
+ * TODO( Hotplug support using websockets API 1.0/events )
+"""
+
+import os
+
+import requests
+from requests.adapters import HTTPAdapter
+
+# pylint fails to import the two modules below.
+# These are imported via requests.packages rather than urllib3 because:
+# a.) the provider of the requests package should ensure that urllib3
+# contained in it is consistent/correct.
+# b.) cloud-init does not specifically have a dependency on urllib3
+#
+# For future reference, see:
+# https://github.com/kennethreitz/requests/pull/2375
+# https://github.com/requests/requests/issues/4104
+# pylint: disable=E0401
+from requests.packages.urllib3.connection import HTTPConnection
+from requests.packages.urllib3.connectionpool import HTTPConnectionPool
+
+import socket
+import stat
+
+from cloudinit import log as logging
+from cloudinit import sources, subp, util
+
+LOG = logging.getLogger(__name__)
+
+LXD_SOCKET_PATH = "/dev/lxd/sock"
+LXD_SOCKET_API_VERSION = "1.0"
+
+# Config key mappings to alias as top-level instance data keys
+CONFIG_KEY_ALIASES = {
+ "user.user-data": "user-data",
+ "user.network-config": "network-config",
+ "user.network_mode": "network_mode",
+ "user.vendor-data": "vendor-data"
+}
+
+
+def generate_fallback_network_config(network_mode: str = "") -> dict:
+ """Return network config V1 dict representing instance network config."""
+ network_v1 = {
+ "version": 1,
+ "config": [
+ {
+ "type": "physical", "name": "eth0",
+ "subnets": [{"type": "dhcp", "control": "auto"}]
+ }
+ ]
+ }
+ if subp.which("systemd-detect-virt"):
+ try:
+ virt_type, _ = subp.subp(['systemd-detect-virt'])
+ except subp.ProcessExecutionError as err:
+ LOG.warning(
+ "Unable to run systemd-detect-virt: %s."
+ " Rendering default network config.", err
+ )
+ return network_v1
+ if virt_type.strip() == "kvm": # instance.type VIRTUAL-MACHINE
+ arch = util.system_info()["uname"][4]
+ if arch == "ppc64le":
+ network_v1["config"][0]["name"] = "enp0s5"
+ elif arch == "s390x":
+ network_v1["config"][0]["name"] = "enc9"
+ else:
+ network_v1["config"][0]["name"] = "enp5s0"
+ if network_mode == "link-local":
+ network_v1["config"][0]["subnets"][0]["control"] = "manual"
+ elif network_mode not in ("", "dhcp"):
+ LOG.warning(
+ "Ignoring unexpected value user.network_mode: %s", network_mode
+ )
+ return network_v1
+
+
+class SocketHTTPConnection(HTTPConnection):
+ def __init__(self, socket_path):
+ super().__init__('localhost')
+ self.socket_path = socket_path
+
+ def connect(self):
+ self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+ self.sock.connect(self.socket_path)
+
+
+class SocketConnectionPool(HTTPConnectionPool):
+ def __init__(self, socket_path):
+ self.socket_path = socket_path
+ super().__init__('localhost')
+
+ def _new_conn(self):
+ return SocketHTTPConnection(self.socket_path)
+
+
+class LXDSocketAdapter(HTTPAdapter):
+ def get_connection(self, url, proxies=None):
+ return SocketConnectionPool(LXD_SOCKET_PATH)
+
+
+def _maybe_remove_top_network(cfg):
+ """If network-config contains top level 'network' key, then remove it.
+
+ Some providers of network configuration may provide a top level
+ 'network' key (LP: #1798117) even though it is not necessary.
+
+ Be friendly and remove it if it really seems so.
+
+ Return the original value if no change or the updated value if changed."""
+ if "network" not in cfg:
+ return cfg
+ network_val = cfg["network"]
+ bmsg = 'Top level network key in network-config %s: %s'
+ if not isinstance(network_val, dict):
+ LOG.debug(bmsg, "was not a dict", cfg)
+ return cfg
+ if len(list(cfg.keys())) != 1:
+ LOG.debug(bmsg, "had multiple top level keys", cfg)
+ return cfg
+ if network_val.get('config') == "disabled":
+ LOG.debug(bmsg, "was config/disabled", cfg)
+ elif not all(('config' in network_val, 'version' in network_val)):
+ LOG.debug(bmsg, "but missing 'config' or 'version'", cfg)
+ return cfg
+ LOG.debug(bmsg, "fixed by removing shifting network.", cfg)
+ return network_val
+
+
+def _raw_instance_data_to_dict(metadata_type: str, metadata_value) -> dict:
+ """Convert raw instance data from str, bytes, YAML to dict
+
+ :param metadata_type: string, one of as: meta-data, vendor-data, user-data
+ network-config
+
+ :param metadata_value: str, bytes or dict representing or instance-data.
+
+ :raises: InvalidMetaDataError on invalid instance-data content.
+ """
+ if isinstance(metadata_value, dict):
+ return metadata_value
+ if metadata_value is None:
+ return {}
+ try:
+ parsed_metadata = util.load_yaml(metadata_value)
+ except AttributeError as exc: # not str or bytes
+ raise sources.InvalidMetaDataException(
+ "Invalid {md_type}. Expected str, bytes or dict but found:"
+ " {value}".format(md_type=metadata_type, value=metadata_value)
+ ) from exc
+ if parsed_metadata is None:
+ raise sources.InvalidMetaDataException(
+ "Invalid {md_type} format. Expected YAML but found:"
+ " {value}".format(md_type=metadata_type, value=metadata_value)
+ )
+ return parsed_metadata
+
+
+class DataSourceLXD(sources.DataSource):
+
+ dsname = 'LXD'
+
+ _network_config = sources.UNSET
+ _crawled_metadata = sources.UNSET
+
+ sensitive_metadata_keys = (
+ 'merged_cfg', 'user.meta-data', 'user.vendor-data', 'user.user-data',
+ )
+
+ def _is_platform_viable(self) -> bool:
+ """Check platform environment to report if this datasource may run."""
+ return is_platform_viable()
+
+ def _get_data(self) -> bool:
+ """Crawl LXD socket API instance data and return True on success"""
+ if not self._is_platform_viable():
+ LOG.debug("Not an LXD datasource: No LXD socket found.")
+ return False
+
+ self._crawled_metadata = util.log_time(
+ logfunc=LOG.debug, msg='Crawl of metadata service',
+ func=read_metadata)
+ self.metadata = _raw_instance_data_to_dict(
+ "meta-data", self._crawled_metadata.get("meta-data")
+ )
+ if LXD_SOCKET_API_VERSION in self._crawled_metadata:
+ config = self._crawled_metadata[LXD_SOCKET_API_VERSION].get(
+ "config", {}
+ )
+ user_metadata = config.get("user.meta-data", {})
+ if user_metadata:
+ user_metadata = _raw_instance_data_to_dict(
+ "user.meta-data", user_metadata
+ )
+ if not isinstance(self.metadata, dict):
+ self.metadata = util.mergemanydict(
+ [util.load_yaml(self.metadata), user_metadata]
+ )
+ if "user-data" in self._crawled_metadata:
+ self.userdata_raw = self._crawled_metadata["user-data"]
+ if "network-config" in self._crawled_metadata:
+ self._network_config = _maybe_remove_top_network(
+ _raw_instance_data_to_dict(
+ "network-config", self._crawled_metadata["network-config"]
+ )
+ )
+ if "vendor-data" in self._crawled_metadata:
+ self.vendordata_raw = self._crawled_metadata["vendor-data"]
+ return True
+
+ def _get_subplatform(self) -> str:
+ """Return subplatform details for this datasource"""
+ return "LXD socket API v. {ver} ({socket})".format(
+ ver=LXD_SOCKET_API_VERSION, socket=LXD_SOCKET_PATH
+ )
+
+ def check_instance_id(self, sys_cfg) -> str:
+ """Return True if instance_id unchanged."""
+ response = read_metadata(metadata_only=True)
+ md = response.get("meta-data", {})
+ if not isinstance(md, dict):
+ md = util.load_yaml(md)
+ return md.get("instance-id") == self.metadata.get("instance-id")
+
+ @property
+ def network_config(self) -> dict:
+ """Network config read from LXD socket config/user.network-config.
+
+ If none is present, then we generate fallback configuration.
+ """
+ if self._network_config == sources.UNSET:
+ if self._crawled_metadata.get("network-config"):
+ self._network_config = self._crawled_metadata.get(
+ "network-config"
+ )
+ else:
+ network_mode = self._crawled_metadata.get("network_mode", "")
+ self._network_config = generate_fallback_network_config(
+ network_mode
+ )
+ return self._network_config
+
+
+def is_platform_viable() -> bool:
+ """Return True when this platform appears to have an LXD socket."""
+ if os.path.exists(LXD_SOCKET_PATH):
+ return stat.S_ISSOCK(os.lstat(LXD_SOCKET_PATH).st_mode)
+ return False
+
+
+def read_metadata(
+ api_version: str = LXD_SOCKET_API_VERSION, metadata_only: bool = False
+) -> dict:
+ """Fetch metadata from the /dev/lxd/socket routes.
+
+ Perform a number of HTTP GETs on known routes on the devlxd socket API.
+ Minimally all containers must respond to http://lxd/1.0/meta-data when
+ the LXD configuration setting `security.devlxd` is true.
+
+ When `security.devlxd` is false, no /dev/lxd/socket file exists. This
+ datasource will return False from `is_platform_viable` in that case.
+
+ Perform a GET of <LXD_SOCKET_API_VERSION>/config` and walk all `user.*`
+ configuration keys, storing all keys and values under a dict key
+ LXD_SOCKET_API_VERSION: config {...}.
+
+ In the presence of the following optional user config keys,
+ create top level aliases:
+ - user.user-data -> user-data
+ - user.vendor-data -> vendor-data
+ - user.network-config -> network-config
+
+ :return:
+ A dict with the following mandatory key: meta-data.
+ Optional keys: user-data, vendor-data, network-config, network_mode
+
+ Below <LXD_SOCKET_API_VERSION> is a dict representation of all raw
+ configuration keys and values provided to the container surfaced by
+ the socket under the /1.0/config/ route.
+ """
+ md = {}
+ lxd_url = "http://lxd"
+ version_url = lxd_url + "/" + api_version + "/"
+ with requests.Session() as session:
+ session.mount(version_url, LXDSocketAdapter())
+ # Raw meta-data as text
+ md_route = "{route}/meta-data".format(route=version_url)
+ response = session.get(md_route)
+ LOG.debug("[GET] [HTTP:%d] %s", response.status_code, md_route)
+ if not response.ok:
+ raise sources.InvalidMetaDataException(
+ "Invalid HTTP response [{code}] from {route}: {resp}".format(
+ code=response.status_code,
+ route=md_route,
+ resp=response.txt
+ )
+ )
+
+ md["meta-data"] = response.text
+ if metadata_only:
+ return md # Skip network-data, vendor-data, user-data
+
+ config_url = version_url + "config"
+ # Represent all advertized/available config routes under
+ # the dict path {LXD_SOCKET_API_VERSION: {config: {...}}.
+ LOG.debug("[GET] %s", config_url)
+ config_routes = session.get(config_url).json()
+ md[LXD_SOCKET_API_VERSION] = {
+ "config": {},
+ "meta-data": md["meta-data"]
+ }
+ for config_route in config_routes:
+ url = "http://lxd{route}".format(route=config_route)
+ LOG.debug("[GET] %s", url)
+ response = session.get(url)
+ if response.ok:
+ cfg_key = config_route.rpartition("/")[-1]
+ # Leave raw data values/format unchanged to represent it in
+ # instance-data.json for cloud-init query or jinja template
+ # use.
+ md[LXD_SOCKET_API_VERSION]["config"][cfg_key] = response.text
+ # Promote common CONFIG_KEY_ALIASES to top-level keys.
+ if cfg_key in CONFIG_KEY_ALIASES:
+ md[CONFIG_KEY_ALIASES[cfg_key]] = response.text
+ else:
+ LOG.debug("Skipping %s on invalid response", url)
+ return md
+
+
+# Used to match classes to dependencies
+datasources = [
+ (DataSourceLXD, (sources.DEP_FILESYSTEM,)),
+]
+
+
+# Return a list of data sources that match this set of dependencies
+def get_datasource_list(depends):
+ return sources.list_from_depends(depends, datasources)
+
+
+if __name__ == "__main__":
+ import argparse
+
+ description = """Query LXD metadata and emit a JSON object."""
+ parser = argparse.ArgumentParser(description=description)
+ parser.parse_args()
+ print(util.json_dumps(read_metadata()))
+# vi: ts=4 expandtab
diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py
index a126aad3..2d9e86b4 100644
--- a/cloudinit/sources/DataSourceNoCloud.py
+++ b/cloudinit/sources/DataSourceNoCloud.py
@@ -247,7 +247,7 @@ def _quick_read_instance_id(dirs=None):
try:
data = util.pathprefix2dict(d, required=['meta-data'])
md = util.load_yaml(data['meta-data'])
- if iid_key in md:
+ if md and iid_key in md:
return md[iid_key]
except ValueError:
pass
diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py
index e909f058..5257a534 100644
--- a/cloudinit/sources/DataSourceOVF.py
+++ b/cloudinit/sources/DataSourceOVF.py
@@ -115,7 +115,9 @@ class DataSourceOVF(sources.DataSource):
else:
search_paths = (
"/usr/lib/vmware-tools", "/usr/lib64/vmware-tools",
- "/usr/lib/open-vm-tools", "/usr/lib64/open-vm-tools")
+ "/usr/lib/open-vm-tools", "/usr/lib64/open-vm-tools",
+ "/usr/lib/x86_64-linux-gnu/open-vm-tools",
+ "/usr/lib/aarch64-linux-gnu/open-vm-tools")
plugin = "libdeployPkgPlugin.so"
deployPkgPluginPath = None
@@ -358,8 +360,7 @@ class DataSourceOVF(sources.DataSource):
if contents:
break
if contents:
- read_network = ('com.vmware.guestinfo' == name)
- (md, ud, cfg) = read_ovf_environment(contents, read_network)
+ (md, ud, cfg) = read_ovf_environment(contents, True)
self.environment = contents
if 'network-config' in md and md['network-config']:
self._network_config = md['network-config']
diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py
index 730ec586..21603fbd 100644
--- a/cloudinit/sources/DataSourceOpenNebula.py
+++ b/cloudinit/sources/DataSourceOpenNebula.py
@@ -195,7 +195,11 @@ class OpenNebulaNetwork(object):
return self.get_field(dev, "gateway")
def get_gateway6(self, dev):
- return self.get_field(dev, "gateway6")
+ # OpenNebula 6.1.80 introduced new context parameter ETHx_IP6_GATEWAY
+ # to replace old ETHx_GATEWAY6. Old ETHx_GATEWAY6 will be removed in
+ # OpenNebula 6.4.0 (https://github.com/OpenNebula/one/issues/5536).
+ return self.get_field(dev, "ip6_gateway",
+ self.get_field(dev, "gateway6"))
def get_mask(self, dev):
return self.get_field(dev, "mask", "255.255.255.0")
@@ -440,7 +444,7 @@ def read_context_disk_dir(source_dir, distro, asuser=None):
# custom hostname -- try hostname or leave cloud-init
# itself create hostname from IP address later
- for k in ('HOSTNAME', 'PUBLIC_IP', 'IP_PUBLIC', 'ETH0_IP'):
+ for k in ('SET_HOSTNAME', 'HOSTNAME', 'PUBLIC_IP', 'IP_PUBLIC', 'ETH0_IP'):
if k in context:
results['metadata']['local-hostname'] = context[k]
break
diff --git a/cloudinit/sources/DataSourceOracle.py b/cloudinit/sources/DataSourceOracle.py
index bf81b10b..fbb5312a 100644
--- a/cloudinit/sources/DataSourceOracle.py
+++ b/cloudinit/sources/DataSourceOracle.py
@@ -40,6 +40,7 @@ METADATA_PATTERN = METADATA_ROOT + "{path}/"
# https://docs.cloud.oracle.com/iaas/Content/Network/Troubleshoot/connectionhang.htm#Overview,
# indicates that an MTU of 9000 is used within OCI
MTU = 9000
+V2_HEADERS = {"Authorization": "Bearer Oracle"}
OpcMetadata = namedtuple("OpcMetadata", "version instance_data vnics_data")
@@ -134,7 +135,13 @@ class DataSourceOracle(sources.DataSource):
)
network_context = noop()
if not _is_iscsi_root():
- network_context = dhcp.EphemeralDHCPv4(net.find_fallback_nic())
+ network_context = dhcp.EphemeralDHCPv4(
+ iface=net.find_fallback_nic(),
+ connectivity_url_data={
+ "url": METADATA_PATTERN.format(version=2, path="instance"),
+ "headers": V2_HEADERS,
+ }
+ )
with network_context:
fetched_metadata = read_opc_metadata(
fetch_vnics_data=fetch_vnics_data
@@ -304,11 +311,9 @@ def read_opc_metadata(*, fetch_vnics_data: bool = False):
retries = 2
def _fetch(metadata_version: int, path: str) -> dict:
- headers = {
- "Authorization": "Bearer Oracle"} if metadata_version > 1 else None
return readurl(
url=METADATA_PATTERN.format(version=metadata_version, path=path),
- headers=headers,
+ headers=V2_HEADERS if metadata_version > 1 else None,
retries=retries,
)._response.json()
diff --git a/cloudinit/sources/DataSourceVultr.py b/cloudinit/sources/DataSourceVultr.py
index c08ff848..68e1ff0b 100644
--- a/cloudinit/sources/DataSourceVultr.py
+++ b/cloudinit/sources/DataSourceVultr.py
@@ -8,6 +8,7 @@
from cloudinit import log as log
from cloudinit import sources
from cloudinit import util
+from cloudinit import version
import cloudinit.sources.helpers.vultr as vultr
@@ -16,7 +17,11 @@ BUILTIN_DS_CONFIG = {
'url': 'http://169.254.169.254',
'retries': 30,
'timeout': 2,
- 'wait': 2
+ 'wait': 2,
+ 'user-agent': 'Cloud-Init/%s - OS: %s Variant: %s' %
+ (version.version_string(),
+ util.system_info()['system'],
+ util.system_info()['variant'])
}
@@ -40,21 +45,18 @@ class DataSourceVultr(sources.DataSource):
LOG.debug("Machine is a Vultr instance")
# Fetch metadata
- md = self.get_metadata()
-
- self.metadata_full = md
- self.metadata['instanceid'] = md['instanceid']
- self.metadata['local-hostname'] = md['hostname']
- self.metadata['public-keys'] = md["public-keys"]
- self.userdata_raw = md["user-data"]
+ self.metadata = self.get_metadata()
+ self.metadata['instance-id'] = self.metadata['instanceid']
+ self.metadata['local-hostname'] = self.metadata['hostname']
+ self.userdata_raw = self.metadata["user-data"]
# Generate config and process data
- self.get_datasource_data(md)
+ self.get_datasource_data(self.metadata)
# Dump some data so diagnosing failures is manageable
LOG.debug("Vultr Vendor Config:")
- LOG.debug(md['vendor-data']['config'])
- LOG.debug("SUBID: %s", self.metadata['instanceid'])
+ LOG.debug(util.json_dumps(self.metadata['vendor-data']))
+ LOG.debug("SUBID: %s", self.metadata['instance-id'])
LOG.debug("Hostname: %s", self.metadata['local-hostname'])
if self.userdata_raw is not None:
LOG.debug("User-Data:")
@@ -64,14 +66,16 @@ class DataSourceVultr(sources.DataSource):
# Process metadata
def get_datasource_data(self, md):
- # Grab config
- config = md['vendor-data']['config']
-
# Generate network config
- self.netcfg = vultr.generate_network_config(md['interfaces'])
+ if "cloud_interfaces" in md:
+ # In the future we will just drop pre-configured
+ # network configs into the array. They need names though.
+ self.netcfg = vultr.add_interface_names(md['cloud_interfaces'])
+ else:
+ self.netcfg = vultr.generate_network_config(md['interfaces'])
- # This requires info generated in the vendor config
- user_scripts = vultr.generate_user_scripts(md, self.netcfg['config'])
+ # Grab vendordata
+ self.vendordata_raw = md['vendor-data']
# Default hostname is "guest" for whitelabel
if self.metadata['local-hostname'] == "":
@@ -81,18 +85,13 @@ class DataSourceVultr(sources.DataSource):
if self.userdata_raw == "":
self.userdata_raw = None
- # Assemble vendor-data
- # This adds provided scripts and the config
- self.vendordata_raw = []
- self.vendordata_raw.extend(user_scripts)
- self.vendordata_raw.append("#cloud-config\n%s" % config)
-
# Get the metadata by flag
def get_metadata(self):
return vultr.get_metadata(self.ds_cfg['url'],
self.ds_cfg['timeout'],
self.ds_cfg['retries'],
- self.ds_cfg['wait'])
+ self.ds_cfg['wait'],
+ self.ds_cfg['user-agent'])
# Compare subid as instance id
def check_instance_id(self, sys_cfg):
@@ -137,11 +136,12 @@ if __name__ == "__main__":
md = vultr.get_metadata(BUILTIN_DS_CONFIG['url'],
BUILTIN_DS_CONFIG['timeout'],
BUILTIN_DS_CONFIG['retries'],
- BUILTIN_DS_CONFIG['wait'])
- config = md['vendor-data']['config']
+ BUILTIN_DS_CONFIG['wait'],
+ BUILTIN_DS_CONFIG['user-agent'])
+ config = md['vendor-data']
sysinfo = vultr.get_sysinfo()
print(util.json_dumps(sysinfo))
- print(config)
+ print(util.json_dumps(config))
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py
index cc7e1c3c..f2f2343c 100644
--- a/cloudinit/sources/__init__.py
+++ b/cloudinit/sources/__init__.py
@@ -13,7 +13,7 @@ import copy
import json
import os
from collections import namedtuple
-from typing import Dict, List
+from typing import Dict, List # noqa: F401
from cloudinit import dmi
from cloudinit import importer
@@ -138,7 +138,8 @@ def redact_sensitive_keys(metadata, redact_value=REDACT_SENSITIVE_VALUE):
URLParams = namedtuple(
- 'URLParms', ['max_wait_seconds', 'timeout_seconds', 'num_retries'])
+ 'URLParms', ['max_wait_seconds', 'timeout_seconds',
+ 'num_retries', 'sec_between_retries'])
class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta):
@@ -175,9 +176,10 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta):
NetworkConfigSource.ds)
# read_url_params
- url_max_wait = -1 # max_wait < 0 means do not wait
- url_timeout = 10 # timeout for each metadata url read attempt
- url_retries = 5 # number of times to retry url upon 404
+ url_max_wait = -1 # max_wait < 0 means do not wait
+ url_timeout = 10 # timeout for each metadata url read attempt
+ url_retries = 5 # number of times to retry url upon 404
+ url_sec_between_retries = 1 # amount of seconds to wait between retries
# The datasource defines a set of supported EventTypes during which
# the datasource can react to changes in metadata and regenerate
@@ -194,6 +196,7 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta):
EventType.BOOT_NEW_INSTANCE,
EventType.BOOT,
EventType.BOOT_LEGACY,
+ EventType.HOTPLUG,
}}
default_update_events = {EventScope.NETWORK: {
EventType.BOOT_NEW_INSTANCE,
@@ -422,7 +425,18 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta):
LOG, "Config retries '%s' is not an int, using default '%s'",
self.ds_cfg.get('retries'), retries)
- return URLParams(max_wait, timeout, retries)
+ sec_between_retries = self.url_sec_between_retries
+ try:
+ sec_between_retries = int(self.ds_cfg.get(
+ "sec_between_retries",
+ self.url_sec_between_retries))
+ except Exception:
+ util.logexc(
+ LOG, "Config sec_between_retries '%s' is not an int,"
+ " using default '%s'",
+ self.ds_cfg.get("sec_between_retries"), sec_between_retries)
+
+ return URLParams(max_wait, timeout, retries, sec_between_retries)
def get_userdata(self, apply_filter=False):
if self.userdata is None:
diff --git a/cloudinit/sources/helpers/vultr.py b/cloudinit/sources/helpers/vultr.py
index c22cd0b1..55487ac3 100644
--- a/cloudinit/sources/helpers/vultr.py
+++ b/cloudinit/sources/helpers/vultr.py
@@ -17,20 +17,17 @@ LOG = log.getLogger(__name__)
@lru_cache()
-def get_metadata(url, timeout, retries, sec_between):
+def get_metadata(url, timeout, retries, sec_between, agent):
# Bring up interface
try:
- with EphemeralDHCPv4(connectivity_url=url):
+ with EphemeralDHCPv4(connectivity_url_data={"url": url}):
# Fetch the metadata
- v1 = read_metadata(url, timeout, retries, sec_between)
+ v1 = read_metadata(url, timeout, retries, sec_between, agent)
except (NoDHCPLeaseError) as exc:
LOG.error("Bailing, DHCP Exception: %s", exc)
raise
- v1_json = json.loads(v1)
- metadata = v1_json
-
- return metadata
+ return json.loads(v1)
# Read the system information from SMBIOS
@@ -64,12 +61,20 @@ def is_vultr():
# Read Metadata endpoint
-def read_metadata(url, timeout, retries, sec_between):
+def read_metadata(url, timeout, retries, sec_between, agent):
url = "%s/v1.json" % url
+
+ # Announce os details so we can handle non Vultr origin
+ # images and provide correct vendordata generation.
+ headers = {
+ 'Metadata-Token': 'cloudinit',
+ 'User-Agent': agent
+ }
+
response = url_helper.readurl(url,
timeout=timeout,
retries=retries,
- headers={'Metadata-Token': 'vultr'},
+ headers=headers,
sec_between=sec_between)
if not response.ok():
@@ -114,9 +119,9 @@ def generate_network_config(interfaces):
public = generate_public_network_interface(interfaces[0])
network['config'].append(public)
- # Prepare interface 1, private
- if len(interfaces) > 1:
- private = generate_private_network_interface(interfaces[1])
+ # Prepare additional interfaces, private
+ for i in range(1, len(interfaces)):
+ private = generate_private_network_interface(interfaces[i])
network['config'].append(private)
return network
@@ -141,12 +146,22 @@ def generate_public_network_interface(interface):
"control": "auto"
},
{
- "type": "dhcp6",
+ "type": "ipv6_slaac",
"control": "auto"
},
]
}
+ # Options that may or may not be used
+ if "mtu" in interface:
+ netcfg['mtu'] = interface['mtu']
+
+ if "accept-ra" in interface:
+ netcfg['accept-ra'] = interface['accept-ra']
+
+ if "routes" in interface:
+ netcfg['subnets'][0]['routes'] = interface['routes']
+
# Check for additional IP's
additional_count = len(interface['ipv4']['additional'])
if "ipv4" in interface and additional_count > 0:
@@ -157,6 +172,10 @@ def generate_public_network_interface(interface):
"address": additional['address'],
"netmask": additional['netmask']
}
+
+ if "routes" in additional:
+ add['routes'] = additional['routes']
+
netcfg['subnets'].append(add)
# Check for additional IPv6's
@@ -169,6 +188,10 @@ def generate_public_network_interface(interface):
"address": additional['address'],
"netmask": additional['netmask']
}
+
+ if "routes" in additional:
+ add['routes'] = additional['routes']
+
netcfg['subnets'].append(add)
# Add config to template
@@ -187,7 +210,6 @@ def generate_private_network_interface(interface):
"name": interface_name,
"type": "physical",
"mac_address": interface['mac'],
- "accept-ra": 1,
"subnets": [
{
"type": "static",
@@ -198,45 +220,30 @@ def generate_private_network_interface(interface):
]
}
- return netcfg
-
+ # Options that may or may not be used
+ if "mtu" in interface:
+ netcfg['mtu'] = interface['mtu']
-# This is for the vendor and startup scripts
-def generate_user_scripts(md, network_config):
- user_scripts = []
+ if "accept-ra" in interface:
+ netcfg['accept-ra'] = interface['accept-ra']
- # Raid 1 script
- if md['vendor-data']['raid1-script']:
- user_scripts.append(md['vendor-data']['raid1-script'])
+ if "routes" in interface:
+ netcfg['subnets'][0]['routes'] = interface['routes']
- # Enable multi-queue on linux
- if util.is_Linux() and md['vendor-data']['ethtool-script']:
- ethtool_script = md['vendor-data']['ethtool-script']
-
- # Tool location
- tool = "/opt/vultr/ethtool"
-
- # Go through the interfaces
- for netcfg in network_config:
- # If the interface has a mac and is physical
- if "mac_address" in netcfg and netcfg['type'] == "physical":
- # Set its multi-queue to num of cores as per RHEL Docs
- name = netcfg['name']
- command = "%s -L %s combined $(nproc --all)" % (tool, name)
- ethtool_script = '%s\n%s' % (ethtool_script, command)
-
- user_scripts.append(ethtool_script)
+ return netcfg
- # This is for vendor scripts
- if md['vendor-data']['vendor-script']:
- user_scripts.append(md['vendor-data']['vendor-script'])
- # Startup script
- script = md['startup-script']
- if script and script != "echo No configured startup script":
- user_scripts.append(script)
+# Make required adjustments to the network configs provided
+def add_interface_names(interfaces):
+ for interface in interfaces:
+ interface_name = get_interface_name(interface['mac'])
+ if not interface_name:
+ raise RuntimeError(
+ "Interface: %s could not be found on the system" %
+ interface['mac'])
+ interface['name'] = interface_name
- return user_scripts
+ return interfaces
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/tests/test_init.py b/cloudinit/sources/tests/test_init.py
index a2b052a6..ae09cb17 100644
--- a/cloudinit/sources/tests/test_init.py
+++ b/cloudinit/sources/tests/test_init.py
@@ -97,6 +97,8 @@ class TestDataSource(CiTestCase):
self.assertEqual(params.max_wait_seconds, self.datasource.url_max_wait)
self.assertEqual(params.timeout_seconds, self.datasource.url_timeout)
self.assertEqual(params.num_retries, self.datasource.url_retries)
+ self.assertEqual(params.sec_between_retries,
+ self.datasource.url_sec_between_retries)
def test_datasource_get_url_params_subclassed(self):
"""Subclasses can override get_url_params defaults."""
@@ -104,7 +106,7 @@ class TestDataSource(CiTestCase):
distro = 'distrotest' # generally should be a Distro object
datasource = DataSourceTestSubclassNet(sys_cfg, distro, self.paths)
expected = (datasource.url_max_wait, datasource.url_timeout,
- datasource.url_retries)
+ datasource.url_retries, datasource.url_sec_between_retries)
url_params = datasource.get_url_params()
self.assertNotEqual(self.datasource.get_url_params(), url_params)
self.assertEqual(expected, url_params)
@@ -114,14 +116,16 @@ class TestDataSource(CiTestCase):
sys_cfg = {
'datasource': {
'MyTestSubclass': {
- 'max_wait': '1', 'timeout': '2', 'retries': '3'}}}
+ 'max_wait': '1', 'timeout': '2',
+ 'retries': '3', 'sec_between_retries': 4
+ }}}
datasource = DataSourceTestSubclassNet(
sys_cfg, self.distro, self.paths)
- expected = (1, 2, 3)
+ expected = (1, 2, 3, 4)
url_params = datasource.get_url_params()
self.assertNotEqual(
(datasource.url_max_wait, datasource.url_timeout,
- datasource.url_retries),
+ datasource.url_retries, datasource.url_sec_between_retries),
url_params)
self.assertEqual(expected, url_params)
@@ -130,7 +134,8 @@ class TestDataSource(CiTestCase):
# Set an override that is below 0 which gets ignored.
sys_cfg = {'datasource': {'_undef': {'timeout': '-1'}}}
datasource = DataSource(sys_cfg, self.distro, self.paths)
- (_max_wait, timeout, _retries) = datasource.get_url_params()
+ (_max_wait, timeout, _retries,
+ _sec_between_retries) = datasource.get_url_params()
self.assertEqual(0, timeout)
def test_datasource_get_url_uses_defaults_on_errors(self):
@@ -142,7 +147,7 @@ class TestDataSource(CiTestCase):
datasource = DataSource(sys_cfg, self.distro, self.paths)
url_params = datasource.get_url_params()
expected = (datasource.url_max_wait, datasource.url_timeout,
- datasource.url_retries)
+ datasource.url_retries, datasource.url_sec_between_retries)
self.assertEqual(expected, url_params)
logs = self.logs.getvalue()
expected_logs = [
diff --git a/cloudinit/sources/tests/test_lxd.py b/cloudinit/sources/tests/test_lxd.py
new file mode 100644
index 00000000..c2027616
--- /dev/null
+++ b/cloudinit/sources/tests/test_lxd.py
@@ -0,0 +1,185 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+from collections import namedtuple
+from copy import deepcopy
+import stat
+from unittest import mock
+import yaml
+
+import pytest
+
+from cloudinit.sources import DataSourceLXD as lxd, UNSET
+DS_PATH = "cloudinit.sources.DataSourceLXD."
+
+
+LStatResponse = namedtuple("lstatresponse", "st_mode")
+
+
+NETWORK_V1 = {
+ "version": 1,
+ "config": [
+ {
+ "type": "physical", "name": "eth0",
+ "subnets": [{"type": "dhcp", "control": "auto"}]
+ }
+ ]
+}
+NETWORK_V1_MANUAL = deepcopy(NETWORK_V1)
+NETWORK_V1_MANUAL["config"][0]["subnets"][0]["control"] = "manual"
+
+
+def _add_network_v1_device(devname) -> dict:
+ """Helper to inject device name into default network v1 config."""
+ network_cfg = deepcopy(NETWORK_V1)
+ network_cfg["config"][0]["name"] = devname
+ return network_cfg
+
+
+LXD_V1_METADATA = {
+ "meta-data": "instance-id: my-lxc\nlocal-hostname: my-lxc\n\n",
+ "network-config": NETWORK_V1,
+ "user-data": "#cloud-config\npackages: [sl]\n",
+ "vendor-data": "#cloud-config\nruncmd: ['echo vendor-data']\n",
+ "1.0": {
+ "meta-data": "instance-id: my-lxc\nlocal-hostname: my-lxc\n\n",
+ "config": {
+ "user.user-data":
+ "instance-id: my-lxc\nlocal-hostname: my-lxc\n\n",
+ "user.vendor-data":
+ "#cloud-config\nruncmd: ['echo vendor-data']\n",
+ "user.network-config": yaml.safe_dump(NETWORK_V1),
+ }
+ }
+}
+
+
+@pytest.fixture
+def lxd_metadata():
+ return LXD_V1_METADATA
+
+
+@pytest.yield_fixture
+def lxd_ds(request, paths, lxd_metadata):
+ """
+ Return an instantiated DataSourceLXD.
+
+ This also performs the mocking required for the default test case:
+ * ``is_platform_viable`` returns True,
+ * ``read_metadata`` returns ``LXD_V1_METADATA``
+
+ (This uses the paths fixture for the required helpers.Paths object)
+ """
+ with mock.patch(DS_PATH + "is_platform_viable", return_value=True):
+ with mock.patch(DS_PATH + "read_metadata", return_value=lxd_metadata):
+ yield lxd.DataSourceLXD(
+ sys_cfg={}, distro=mock.Mock(), paths=paths
+ )
+
+
+class TestGenerateFallbackNetworkConfig:
+
+ @pytest.mark.parametrize(
+ "uname_machine,systemd_detect_virt,network_mode,expected", (
+ # None for systemd_detect_virt returns None from which
+ ({}, None, "", NETWORK_V1),
+ ({}, None, "dhcp", NETWORK_V1),
+ # invalid network_mode logs warning
+ ({}, None, "bogus", NETWORK_V1),
+ ({}, None, "link-local", NETWORK_V1_MANUAL),
+ ("anything", "lxc\n", "", NETWORK_V1),
+ # `uname -m` on kvm determines devname
+ ("x86_64", "kvm\n", "", _add_network_v1_device("enp5s0")),
+ ("ppc64le", "kvm\n", "", _add_network_v1_device("enp0s5")),
+ ("s390x", "kvm\n", "", _add_network_v1_device("enc9"))
+ )
+ )
+ @mock.patch(DS_PATH + "util.system_info")
+ @mock.patch(DS_PATH + "subp.subp")
+ @mock.patch(DS_PATH + "subp.which")
+ def test_net_v2_based_on_network_mode_virt_type_and_uname_machine(
+ self,
+ m_which,
+ m_subp,
+ m_system_info,
+ uname_machine,
+ systemd_detect_virt,
+ network_mode,
+ expected,
+ caplog
+ ):
+ """Return network config v2 based on uname -m, systemd-detect-virt.
+
+ LXC config network_mode of "link-local" will determine whether to set
+ "activation-mode: manual", leaving the interface down.
+ """
+ if systemd_detect_virt is None:
+ m_which.return_value = None
+ m_system_info.return_value = {"uname": ["", "", "", "", uname_machine]}
+ m_subp.return_value = (systemd_detect_virt, "")
+ assert expected == lxd.generate_fallback_network_config(
+ network_mode=network_mode
+ )
+ if systemd_detect_virt is None:
+ assert 0 == m_subp.call_count
+ assert 0 == m_system_info.call_count
+ else:
+ assert [
+ mock.call(["systemd-detect-virt"])
+ ] == m_subp.call_args_list
+ if systemd_detect_virt != "kvm\n":
+ assert 0 == m_system_info.call_count
+ else:
+ assert 1 == m_system_info.call_count
+ if network_mode not in ("dhcp", "", "link-local"):
+ assert "Ignoring unexpected value user.network_mode: {}".format(
+ network_mode
+ ) in caplog.text
+
+
+class TestDataSourceLXD:
+ def test_platform_info(self, lxd_ds):
+ assert "LXD" == lxd_ds.dsname
+ assert "lxd" == lxd_ds.cloud_name
+ assert "lxd" == lxd_ds.platform_type
+
+ def test_subplatform(self, lxd_ds):
+ assert "LXD socket API v. 1.0 (/dev/lxd/sock)" == lxd_ds.subplatform
+
+ def test__get_data(self, lxd_ds):
+ """get_data calls read_metadata, setting appropiate instance attrs."""
+ assert UNSET == lxd_ds._crawled_metadata
+ assert UNSET == lxd_ds._network_config
+ assert None is lxd_ds.userdata_raw
+ assert True is lxd_ds._get_data()
+ assert LXD_V1_METADATA == lxd_ds._crawled_metadata
+ # network-config is dumped from YAML
+ assert NETWORK_V1 == lxd_ds._network_config
+ # Any user-data and vendor-data are saved as raw
+ assert LXD_V1_METADATA["user-data"] == lxd_ds.userdata_raw
+ assert LXD_V1_METADATA["vendor-data"] == lxd_ds.vendordata_raw
+
+
+class TestIsPlatformViable:
+ @pytest.mark.parametrize(
+ "exists,lstat_mode,expected", (
+ (False, None, False),
+ (True, stat.S_IFREG, False),
+ (True, stat.S_IFSOCK, True),
+ )
+ )
+ @mock.patch(DS_PATH + "os.lstat")
+ @mock.patch(DS_PATH + "os.path.exists")
+ def test_expected_viable(
+ self, m_exists, m_lstat, exists, lstat_mode, expected
+ ):
+ """Return True only when LXD_SOCKET_PATH exists and is a socket."""
+ m_exists.return_value = exists
+ m_lstat.return_value = LStatResponse(lstat_mode)
+ assert expected is lxd.is_platform_viable()
+ m_exists.assert_has_calls([mock.call(lxd.LXD_SOCKET_PATH)])
+ if exists:
+ m_lstat.assert_has_calls([mock.call(lxd.LXD_SOCKET_PATH)])
+ else:
+ assert 0 == m_lstat.call_count
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/sources/tests/test_oracle.py b/cloudinit/sources/tests/test_oracle.py
index dcf33b9b..5f608cbb 100644
--- a/cloudinit/sources/tests/test_oracle.py
+++ b/cloudinit/sources/tests/test_oracle.py
@@ -694,7 +694,15 @@ class TestNonIscsiRoot_GetDataBehaviour:
assert oracle_ds._get_data()
assert [
- mock.call(m_find_fallback_nic.return_value)
+ mock.call(
+ iface=m_find_fallback_nic.return_value,
+ connectivity_url_data={
+ 'headers': {
+ 'Authorization': 'Bearer Oracle'
+ },
+ 'url': 'http://169.254.169.254/opc/v2/instance/'
+ }
+ )
] == m_EphemeralDHCPv4.call_args_list
diff --git a/cloudinit/ssh_util.py b/cloudinit/ssh_util.py
index 9ccadf09..33679dcc 100644
--- a/cloudinit/ssh_util.py
+++ b/cloudinit/ssh_util.py
@@ -484,7 +484,13 @@ def parse_ssh_config_lines(lines):
try:
key, val = line.split(None, 1)
except ValueError:
- key, val = line.split('=', 1)
+ try:
+ key, val = line.split('=', 1)
+ except ValueError:
+ LOG.debug(
+ "sshd_config: option \"%s\" has no key/value pair,"
+ " skipping it", line)
+ continue
ret.append(SshdConfigLine(line, key, val))
return ret
diff --git a/cloudinit/stages.py b/cloudinit/stages.py
index bc164fa0..731b2982 100644
--- a/cloudinit/stages.py
+++ b/cloudinit/stages.py
@@ -9,7 +9,7 @@ import os
import pickle
import sys
from collections import namedtuple
-from typing import Dict, Set
+from typing import Dict, Set # noqa: F401
from cloudinit.settings import (
FREQUENCIES, CLOUD_CONFIG, PER_INSTANCE, PER_ONCE, RUN_CLOUD_CONFIG)
@@ -49,6 +49,54 @@ NULL_DATA_SOURCE = None
NO_PREVIOUS_INSTANCE_ID = "NO_PREVIOUS_INSTANCE_ID"
+def update_event_enabled(
+ datasource: sources.DataSource,
+ cfg: dict,
+ event_source_type: EventType,
+ scope: EventScope = None
+) -> bool:
+ """Determine if a particular EventType is enabled.
+
+ For the `event_source_type` passed in, check whether this EventType
+ is enabled in the `updates` section of the userdata. If `updates`
+ is not enabled in userdata, check if defined as one of the
+ `default_events` on the datasource. `scope` may be used to
+ narrow the check to a particular `EventScope`.
+
+ Note that on first boot, userdata may NOT be available yet. In this
+ case, we only have the data source's `default_update_events`,
+ so an event that should be enabled in userdata may be denied.
+ """
+ default_events = datasource.default_update_events # type: Dict[EventScope, Set[EventType]] # noqa: E501
+ user_events = userdata_to_events(cfg.get('updates', {})) # type: Dict[EventScope, Set[EventType]] # noqa: E501
+ # A value in the first will override a value in the second
+ allowed = util.mergemanydict([
+ copy.deepcopy(user_events),
+ copy.deepcopy(default_events),
+ ])
+ LOG.debug('Allowed events: %s', allowed)
+
+ if not scope:
+ scopes = allowed.keys()
+ else:
+ scopes = [scope]
+ scope_values = [s.value for s in scopes]
+
+ for evt_scope in scopes:
+ if event_source_type in allowed.get(evt_scope, []):
+ LOG.debug(
+ 'Event Allowed: scope=%s EventType=%s',
+ evt_scope.value, event_source_type
+ )
+ return True
+
+ LOG.debug(
+ 'Event Denied: scopes=%s EventType=%s',
+ scope_values, event_source_type
+ )
+ return False
+
+
class Init(object):
def __init__(self, ds_deps=None, reporter=None):
if ds_deps is not None:
@@ -715,46 +763,6 @@ class Init(object):
return (self.distro.generate_fallback_config(),
NetworkConfigSource.fallback)
- def update_event_enabled(
- self, event_source_type: EventType, scope: EventScope = None
- ) -> bool:
- """Determine if a particular EventType is enabled.
-
- For the `event_source_type` passed in, check whether this EventType
- is enabled in the `updates` section of the userdata. If `updates`
- is not enabled in userdata, check if defined as one of the
- `default_events` on the datasource. `scope` may be used to
- narrow the check to a particular `EventScope`.
-
- Note that on first boot, userdata may NOT be available yet. In this
- case, we only have the data source's `default_update_events`,
- so an event that should be enabled in userdata may be denied.
- """
- default_events = self.datasource.default_update_events # type: Dict[EventScope, Set[EventType]] # noqa: E501
- user_events = userdata_to_events(self.cfg.get('updates', {})) # type: Dict[EventScope, Set[EventType]] # noqa: E501
- # A value in the first will override a value in the second
- allowed = util.mergemanydict([
- copy.deepcopy(user_events),
- copy.deepcopy(default_events),
- ])
- LOG.debug('Allowed events: %s', allowed)
-
- if not scope:
- scopes = allowed.keys()
- else:
- scopes = [scope]
- scope_values = [s.value for s in scopes]
-
- for evt_scope in scopes:
- if event_source_type in allowed.get(evt_scope, []):
- LOG.debug('Event Allowed: scope=%s EventType=%s',
- evt_scope.value, event_source_type)
- return True
-
- LOG.debug('Event Denied: scopes=%s EventType=%s',
- scope_values, event_source_type)
- return False
-
def _apply_netcfg_names(self, netcfg):
try:
LOG.debug("applying net config names for %s", netcfg)
@@ -784,8 +792,11 @@ class Init(object):
return
def event_enabled_and_metadata_updated(event_type):
- return self.update_event_enabled(
- event_type, scope=EventScope.NETWORK
+ return update_event_enabled(
+ datasource=self.datasource,
+ cfg=self.cfg,
+ event_source_type=event_type,
+ scope=EventScope.NETWORK
) and self.datasource.update_metadata_if_supported([event_type])
def should_run_on_boot_event():
diff --git a/cloudinit/templater.py b/cloudinit/templater.py
index a00ade20..009bed32 100644
--- a/cloudinit/templater.py
+++ b/cloudinit/templater.py
@@ -36,14 +36,14 @@ from cloudinit import util
LOG = logging.getLogger(__name__)
TYPE_MATCHER = re.compile(r"##\s*template:(.*)", re.I)
BASIC_MATCHER = re.compile(r'\$\{([A-Za-z0-9_.]+)\}|\$([A-Za-z0-9_.]+)')
-MISSING_JINJA_PREFIX = u'CI_MISSING_JINJA_VAR/'
+MISSING_JINJA_PREFIX = 'CI_MISSING_JINJA_VAR/'
class UndefinedJinjaVariable(JUndefined):
"""Class used to represent any undefined jinja template variable."""
def __str__(self):
- return u'%s%s' % (MISSING_JINJA_PREFIX, self._undefined_name)
+ return '%s%s' % (MISSING_JINJA_PREFIX, self._undefined_name)
def __sub__(self, other):
other = str(other).replace(MISSING_JINJA_PREFIX, '')
diff --git a/cloudinit/tests/test_subp.py b/cloudinit/tests/test_subp.py
index 911c1f3d..515d5d64 100644
--- a/cloudinit/tests/test_subp.py
+++ b/cloudinit/tests/test_subp.py
@@ -91,8 +91,8 @@ class TestSubp(CiTestCase):
tmp_file = self.tmp_path('test.out')
cmd = 'echo HI MOM >> {tmp_file}'.format(tmp_file=tmp_file)
(out, _err) = subp.subp(cmd.encode('utf-8'), shell=True)
- self.assertEqual(u'', out)
- self.assertEqual(u'', _err)
+ self.assertEqual('', out)
+ self.assertEqual('', _err)
self.assertEqual('HI MOM\n', util.load_file(tmp_file))
def test_subp_handles_strings(self):
@@ -100,8 +100,8 @@ class TestSubp(CiTestCase):
tmp_file = self.tmp_path('test.out')
cmd = 'echo HI MOM >> {tmp_file}'.format(tmp_file=tmp_file)
(out, _err) = subp.subp(cmd, shell=True)
- self.assertEqual(u'', out)
- self.assertEqual(u'', _err)
+ self.assertEqual('', out)
+ self.assertEqual('', _err)
self.assertEqual('HI MOM\n', util.load_file(tmp_file))
def test_subp_handles_utf8(self):
diff --git a/cloudinit/tests/test_util.py b/cloudinit/tests/test_util.py
index 9dd01158..ab5eb35c 100644
--- a/cloudinit/tests/test_util.py
+++ b/cloudinit/tests/test_util.py
@@ -186,6 +186,29 @@ OS_RELEASE_VIRTUOZZO_8 = dedent("""\
BUG_REPORT_URL="https://bugs.openvz.org"
""")
+OS_RELEASE_CLOUDLINUX_8 = dedent("""\
+ NAME="CloudLinux"
+ VERSION="8.4 (Valery Rozhdestvensky)"
+ ID="cloudlinux"
+ ID_LIKE="rhel fedora centos"
+ VERSION_ID="8.4"
+ PLATFORM_ID="platform:el8"
+ PRETTY_NAME="CloudLinux 8.4 (Valery Rozhdestvensky)"
+ ANSI_COLOR="0;31"
+ CPE_NAME="cpe:/o:cloudlinux:cloudlinux:8.4:GA:server"
+ HOME_URL="https://www.cloudlinux.com/"
+ BUG_REPORT_URL="https://www.cloudlinux.com/support"
+""")
+
+OS_RELEASE_OPENEULER_20 = dedent("""\
+ NAME="openEuler"
+ VERSION="20.03 (LTS-SP2)"
+ ID="openEuler"
+ VERSION_ID="20.03"
+ PRETTY_NAME="openEuler 20.03 (LTS-SP2)"
+ ANSI_COLOR="0;31"
+""")
+
REDHAT_RELEASE_CENTOS_6 = "CentOS release 6.10 (Final)"
REDHAT_RELEASE_CENTOS_7 = "CentOS Linux release 7.5.1804 (Core)"
REDHAT_RELEASE_REDHAT_6 = (
@@ -200,7 +223,8 @@ REDHAT_RELEASE_ROCKY_8 = (
"Rocky Linux release 8.3 (Green Obsidian)")
REDHAT_RELEASE_VIRTUOZZO_8 = (
"Virtuozzo Linux release 8")
-
+REDHAT_RELEASE_CLOUDLINUX_8 = (
+ "CloudLinux release 8.4 (Valery Rozhdestvensky)")
OS_RELEASE_DEBIAN = dedent("""\
PRETTY_NAME="Debian GNU/Linux 9 (stretch)"
NAME="Debian GNU/Linux"
@@ -325,6 +349,11 @@ class TestShellify(CiTestCase):
util.shellify(["echo hi mom", ["echo", "hi dad"],
('echo', 'hi', 'sis')]))
+ def test_supports_comments(self):
+ self.assertEqual(
+ '\n'.join(["#!/bin/sh", "echo start", "echo end", ""]),
+ util.shellify(["echo start", None, "echo end"]))
+
class TestGetHostnameFqdn(CiTestCase):
@@ -680,6 +709,22 @@ class TestGetLinuxDistro(CiTestCase):
self.assertEqual(('virtuozzo', '8', 'Virtuozzo Linux'), dist)
@mock.patch('cloudinit.util.load_file')
+ def test_get_linux_cloud8_rhrelease(self, m_os_release, m_path_exists):
+ """Verify cloudlinux 8 read from redhat-release."""
+ m_os_release.return_value = REDHAT_RELEASE_CLOUDLINUX_8
+ m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(('cloudlinux', '8.4', 'Valery Rozhdestvensky'), dist)
+
+ @mock.patch('cloudinit.util.load_file')
+ def test_get_linux_cloud8_osrelease(self, m_os_release, m_path_exists):
+ """Verify cloudlinux 8 read from os-release."""
+ m_os_release.return_value = OS_RELEASE_CLOUDLINUX_8
+ m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(('cloudlinux', '8.4', 'Valery Rozhdestvensky'), dist)
+
+ @mock.patch('cloudinit.util.load_file')
def test_get_linux_debian(self, m_os_release, m_path_exists):
"""Verify we get the correct name and release name on Debian."""
m_os_release.return_value = OS_RELEASE_DEBIAN
@@ -688,6 +733,14 @@ class TestGetLinuxDistro(CiTestCase):
self.assertEqual(('debian', '9', 'stretch'), dist)
@mock.patch('cloudinit.util.load_file')
+ def test_get_linux_openeuler(self, m_os_release, m_path_exists):
+ """Verify get the correct name and release name on Openeuler."""
+ m_os_release.return_value = OS_RELEASE_OPENEULER_20
+ m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(('openEuler', '20.03', 'LTS-SP2'), dist)
+
+ @mock.patch('cloudinit.util.load_file')
def test_get_linux_opensuse(self, m_os_release, m_path_exists):
"""Verify we get the correct name and machine arch on openSUSE
prior to openSUSE Leap 15.
diff --git a/cloudinit/util.py b/cloudinit/util.py
index c53f6453..575a1fef 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -297,7 +297,7 @@ def uniq_merge(*lists):
if isinstance(a_list, str):
a_list = a_list.strip().split(",")
# Kickout the empty ones
- a_list = [a for a in a_list if len(a)]
+ a_list = [a for a in a_list if a]
combined_list.extend(a_list)
return uniq_list(combined_list)
@@ -548,8 +548,9 @@ def system_info():
if system == "linux":
linux_dist = info['dist'][0].lower()
if linux_dist in (
- 'almalinux', 'alpine', 'arch', 'centos', 'debian', 'eurolinux',
- 'fedora', 'photon', 'rhel', 'rocky', 'suse', 'virtuozzo'):
+ 'almalinux', 'alpine', 'arch', 'centos', 'cloudlinux',
+ 'debian', 'eurolinux', 'fedora', 'openEuler', 'photon',
+ 'rhel', 'rocky', 'suse', 'virtuozzo'):
var = linux_dist
elif linux_dist in ('ubuntu', 'linuxmint', 'mint'):
var = 'ubuntu'
@@ -1879,6 +1880,20 @@ def chmod(path, mode):
os.chmod(path, real_mode)
+def get_group_id(grp_name: str) -> int:
+ """
+ Returns the group id of a group name, or -1 if no group exists
+
+ @param grp_name: the name of the group
+ """
+ gid = -1
+ try:
+ gid = grp.getgrnam(grp_name).gr_gid
+ except KeyError:
+ LOG.debug("Group %s is not a valid group name", grp_name)
+ return gid
+
+
def get_permissions(path: str) -> int:
"""
Returns the octal permissions of the file/folder pointed by the path,
@@ -2030,6 +2045,9 @@ def shellify(cmdlist, add_header=True):
elif isinstance(args, str):
content = "%s%s\n" % (content, args)
cmds_made += 1
+ # Yaml parsing of a comment results in None
+ elif args is None:
+ pass
else:
raise TypeError(
"Unable to shellify type '%s'. Expected list, string, tuple. "
diff --git a/cloudinit/version.py b/cloudinit/version.py
index b798a6d7..ab93f902 100644
--- a/cloudinit/version.py
+++ b/cloudinit/version.py
@@ -4,7 +4,7 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-__VERSION__ = "21.3"
+__VERSION__ = "21.4"
_PACKAGED_VERSION = '@@PACKAGED_VERSION@@'
FEATURES = [
diff --git a/config/cloud.cfg.tmpl b/config/cloud.cfg.tmpl
index 825deff4..b66bbe60 100644
--- a/config/cloud.cfg.tmpl
+++ b/config/cloud.cfg.tmpl
@@ -32,8 +32,8 @@ disable_root: false
disable_root: true
{% endif %}
-{% if variant in ["almalinux", "alpine", "amazon", "centos", "eurolinux",
- "fedora", "rhel", "rocky", "virtuozzo"] %}
+{% if variant in ["almalinux", "alpine", "amazon", "centos", "cloudlinux", "eurolinux",
+ "fedora", "openEuler", "rhel", "rocky", "virtuozzo"] %}
mount_default_fields: [~, ~, 'auto', 'defaults,nofail', '0', '2']
{% if variant == "amazon" %}
resize_rootfs: noblock
@@ -151,6 +151,7 @@ cloud_final_modules:
{% if variant in ["ubuntu", "unknown"] %}
- ubuntu-drivers
{% endif %}
+ - write-files-deferred
- puppet
- chef
- mcollective
@@ -165,6 +166,7 @@ cloud_final_modules:
- scripts-user
- ssh-authkey-fingerprints
- keys-to-console
+ - install-hotplug
- phone-home
- final-message
- power-state-change
@@ -173,8 +175,8 @@ cloud_final_modules:
# (not accessible to handlers/transforms)
system_info:
# This will affect which distro class gets used
-{% if variant in ["almalinux", "alpine", "amazon", "arch", "centos", "debian",
- "eurolinux", "fedora", "freebsd", "netbsd", "openbsd",
+{% if variant in ["almalinux", "alpine", "amazon", "arch", "centos", "cloudlinux", "debian",
+ "eurolinux", "fedora", "freebsd", "netbsd", "openbsd", "openEuler",
"photon", "rhel", "rocky", "suse", "ubuntu", "virtuozzo"] %}
distro: {{ variant }}
{% elif variant in ["dragonfly"] %}
@@ -228,8 +230,8 @@ system_info:
primary: http://ports.ubuntu.com/ubuntu-ports
security: http://ports.ubuntu.com/ubuntu-ports
ssh_svcname: ssh
-{% elif variant in ["almalinux", "alpine", "amazon", "arch", "centos", "eurolinux",
- "fedora", "rhel", "rocky", "suse", "virtuozzo"] %}
+{% elif variant in ["almalinux", "alpine", "amazon", "arch", "centos", "cloudlinux", "eurolinux",
+ "fedora", "openEuler", "rhel", "rocky", "suse", "virtuozzo"] %}
# Default user name + that default users groups (if added/used)
default_user:
{% if variant == "amazon" %}
diff --git a/debian/changelog b/debian/changelog
index 44ff03c4..749b04a1 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,114 @@
+cloud-init (21.4-0ubuntu1~22.04.1) jammy; urgency=medium
+
+ * d/upstream/metadata: Change contact to James Falcon
+ * d/cloud-init.templates: Add LXD to default datasource_list with
+ translations
+ * drop the following cherry-picks now included:
+ + cpick-28e56d99-Azure-Retry-dhcp-on-timeouts-when-polling
+ + cpick-e69a8874-Set-Azure-to-only-update-metadata-on-BOOT_NEW_INSTANCE
+ + cpick-612e3908-Add-connectivity_url-to-Oracle-s-EphemeralDHCPv4-988
+ + cpick-dc227869-Set-Azure-to-apply-networking-config-every-BOOT-1023
+ + cpick-9c147e83-Allow-disabling-of-network-activation-SC-307-1048
+ * New upstream release.
+ - Release 21.4 (#1091) (LP: #1949405)
+ - Azure: fallback nic needs to be reevaluated during reprovisioning
+ (#1094) [Anh Vo]
+ - azure: pps imds (#1093) [Anh Vo]
+ - testing: Remove calls to 'install_new_cloud_init' (#1092)
+ - Add LXD datasource (#1040)
+ - Fix unhandled apt_configure case. (#1065) [Brett Holman]
+ - Allow libexec for hotplug (#1088)
+ - Add necessary mocks to test_ovf unit tests (#1087)
+ - Remove (deprecated) apt-key (#1068) [Brett Holman] (LP: #1836336)
+ - distros: Remove a completed "TODO" comment (#1086)
+ - cc_ssh.py: Add configuration for controlling ssh-keygen output (#1083)
+ [dermotbradley]
+ - Add "install hotplug" module (SC-476) (#1069) (LP: #1946003)
+ - hosts.alpine.tmpl: rearrange the order of short and long hostnames
+ (#1084) [dermotbradley]
+ - Add max version to docutils
+ - cloudinit/dmi.py: Change warning to debug to prevent console display
+ (#1082) [dermotbradley]
+ - remove unnecessary EOF string in
+ disable-sshd-keygen-if-cloud-init-active.conf (#1075) [Emanuele
+ Giuseppe Esposito]
+ - Add module 'write-files-deferred' executed in stage 'final' (#916)
+ [Lucendio]
+ - Bump pycloudlib to fix CI (#1080)
+ - Remove pin in dependencies for jsonschema (#1078)
+ - Add "Google" as possible system-product-name (#1077) [vteratipally]
+ - Update Debian security suite for bullseye (#1076) [Johann Queuniet]
+ - Leave the details of service management to the distro (#1074)
+ [Andy Fiddaman]
+ - Fix typos in setup.py (#1059) [Christian Clauss]
+ - Update Azure _unpickle (SC-500) (#1067) (LP: #1946644)
+ - cc_ssh.py: fix private key group owner and permissions (#1070)
+ [Emanuele Giuseppe Esposito]
+ - VMware: read network-config from ISO (#1066) [Thomas Weißschuh]
+ - testing: mock sleep in gce unit tests (#1072)
+ - CloudStack: fix data-server DNS resolution (#1004)
+ [Olivier Lemasle] (LP: #1942232)
+ - Fix unit test broken by pyyaml upgrade (#1071)
+ - testing: add get_cloud function (SC-461) (#1038)
+ - Inhibit sshd-keygen@.service if cloud-init is active (#1028)
+ [Ryan Harper]
+ - VMWARE: search the deployPkg plugin in multiarch dir (#1061)
+ [xiaofengw-vmware] (LP: #1944946)
+ - Fix set-name/interface DNS bug (#1058) [Andrew Kutz] (LP: #1946493)
+ - Use specified tmp location for growpart (#1046) [jshen28]
+ - .gitignore: ignore tags file for ctags users (#1057) [Brett Holman]
+ - Allow comments in runcmd and report failed commands correctly (#1049)
+ [Brett Holman] (LP: #1853146)
+ - tox integration: pass the *_proxy, GOOGLE_*, GCP_* env vars (#1050)
+ [Paride Legovini]
+ - Allow disabling of network activation (SC-307) (#1048) (LP: #1938299)
+ - renderer: convert relative imports to absolute (#1052) [Paride Legovini]
+ - Support ETHx_IP6_GATEWAY, SET_HOSTNAME on OpenNebula (#1045)
+ [Vlastimil Holer]
+ - integration-requirements: bump the pycloudlib commit (#1047)
+ [Paride Legovini]
+ - Allow Vultr to set MTU and use as-is configs (#1037) [eb3095]
+ - pin jsonschema in requirements.txt (#1043)
+ - testing: remove cloud_tests (#1020)
+ - Add andgein as contributor (#1042) [Andrew Gein]
+ - Make wording for module frequency consistent (#1039) [Nicolas Bock]
+ - Use ascii code for growpart (#1036) [jshen28]
+ - Add jshen28 as contributor (#1035) [jshen28]
+ - Skip test_cache_purged_on_version_change on Azure (#1033)
+ - Remove invalid ssh_import_id from examples (#1031)
+ - Cleanup Vultr support (#987) [eb3095]
+ - docs: update cc_disk_setup for fs to raw disk (#1017)
+ - HACKING.rst: change contact info to James Falcon (#1030)
+ - tox: bump the pinned flake8 and pylint version (#1029)
+ [Paride Legovini] (LP: #1944414)
+ - Add retries to DataSourceGCE.py when connecting to GCE (#1005)
+ [vteratipally]
+ - Set Azure to apply networking config every BOOT (#1023)
+ - Add connectivity_url to Oracle's EphemeralDHCPv4 (#988) (LP: #1939603)
+ - docs: fix typo and include sudo for report bugs commands (#1022)
+ [Renan Rodrigo] (LP: #1940236)
+ - VMware: Fix typo introduced in #947 and add test (#1019) [PengpengSun]
+ - Update IPv6 entries in /etc/hosts (#1021) [Richard Hansen] (LP: #1943798)
+ - Integration test upgrades for the 21.3-1 SRU (#1001)
+ - Add Jille to tools/.github-cla-signers (#1016) [Jille Timmermans]
+ - Improve ug_util.py (#1013) [Shreenidhi Shedi]
+ - Support openEuler OS (#1012) [zhuzaifangxuele]
+ - ssh_utils.py: ignore when sshd_config options are not key/value pairs
+ (#1007) [Emanuele Giuseppe Esposito]
+ - Set Azure to only update metadata on BOOT_NEW_INSTANCE (#1006)
+ - cc_update_etc_hosts: Use the distribution-defined path for the hosts
+ file (#983) [Andy Fiddaman]
+ - Add CloudLinux OS support (#1003) [Alexandr Kravchenko]
+ - puppet config: add the start_agent option (#1002) [Andrew Bogott]
+ - Fix `make style-check` errors (#1000) [Shreenidhi Shedi]
+ - Make cloud-id copyright year (#991) [Andrii Podanenko]
+ - Add support to accept-ra in networkd renderer (#999) [Shreenidhi Shedi]
+ - Update ds-identify to pass shellcheck (#979) [Andrew Kutz]
+ - Azure: Retry dhcp on timeouts when polling reprovisiondata (#998)
+ [aswinrajamannar]
+
+ -- James Falcon <james.falcon@canonical.com> Tue, 02 Nov 2021 18:07:49 -0500
+
cloud-init (21.3-1-g6803368d-0ubuntu3) impish; urgency=medium
* cherry-pick 9c147e83: Allow disabling of network activation (SC-307)
diff --git a/debian/cloud-init.templates b/debian/cloud-init.templates
index 75ac07b8..6e131cec 100644
--- a/debian/cloud-init.templates
+++ b/debian/cloud-init.templates
@@ -1,8 +1,8 @@
Template: cloud-init/datasources
Type: multiselect
-Default: NoCloud, ConfigDrive, OpenNebula, DigitalOcean, Azure, AltCloud, OVF, MAAS, GCE, OpenStack, CloudSigma, SmartOS, Bigstep, Scaleway, AliYun, Ec2, CloudStack, Hetzner, IBMCloud, Oracle, Exoscale, RbxCloud, UpCloud, VMware, Vultr, None
-Choices-C: NoCloud, ConfigDrive, OpenNebula, DigitalOcean, Azure, AltCloud, OVF, MAAS, GCE, OpenStack, CloudSigma, SmartOS, Bigstep, Scaleway, AliYun, Ec2, CloudStack, Hetzner, IBMCloud, Oracle, Exoscale, RbxCloud, UpCloud, VMware, Vultr, None
-__Choices: NoCloud: Reads info from /var/lib/cloud/seed only, ConfigDrive: Reads data from Openstack Config Drive, OpenNebula: read from OpenNebula context disk, DigitalOcean: reads data from Droplet datasource, Azure: read from MS Azure cdrom. Requires walinux-agent, AltCloud: config disks for RHEVm and vSphere, OVF: Reads data from OVF Transports, MAAS: Reads data from Ubuntu MAAS, GCE: google compute metadata service, OpenStack: native openstack metadata service, CloudSigma: metadata over serial for cloudsigma.com, SmartOS: Read from SmartOS metadata service, Bigstep: Bigstep metadata service, Scaleway: Scaleway metadata service, AliYun: Alibaba metadata service, Ec2: reads data from EC2 Metadata service, CloudStack: Read from CloudStack metadata service, Hetzner: Hetzner Cloud, IBMCloud: IBM Cloud. Previously softlayer or bluemix., Oracle: Oracle Compute Infrastructure, Exoscale: Exoscale, RbxCloud: HyperOne and Rootbox platforms, UpCloud: UpCloud, VMware: reads data from guestinfo table or env vars, Vultr: Vultr Cloud, None: Failsafe datasource
+Default: NoCloud, ConfigDrive, LXD, OpenNebula, DigitalOcean, Azure, AltCloud, OVF, MAAS, GCE, OpenStack, CloudSigma, SmartOS, Bigstep, Scaleway, AliYun, Ec2, CloudStack, Hetzner, IBMCloud, Oracle, Exoscale, RbxCloud, UpCloud, VMware, Vultr, None
+Choices-C: NoCloud, ConfigDrive, LXD, OpenNebula, DigitalOcean, Azure, AltCloud, OVF, MAAS, GCE, OpenStack, CloudSigma, SmartOS, Bigstep, Scaleway, AliYun, Ec2, CloudStack, Hetzner, IBMCloud, Oracle, Exoscale, RbxCloud, UpCloud, VMware, Vultr, None
+__Choices: NoCloud: Reads info from /var/lib/cloud/seed only, ConfigDrive: Reads data from Openstack Config Drive, LXD: Reads /dev/lxd/sock representation of instance data, OpenNebula: read from OpenNebula context disk, DigitalOcean: reads data from Droplet datasource, Azure: read from MS Azure cdrom. Requires walinux-agent, AltCloud: config disks for RHEVm and vSphere, OVF: Reads data from OVF Transports, MAAS: Reads data from Ubuntu MAAS, GCE: google compute metadata service, OpenStack: native openstack metadata service, CloudSigma: metadata over serial for cloudsigma.com, SmartOS: Read from SmartOS metadata service, Bigstep: Bigstep metadata service, Scaleway: Scaleway metadata service, AliYun: Alibaba metadata service, Ec2: reads data from EC2 Metadata service, CloudStack: Read from CloudStack metadata service, Hetzner: Hetzner Cloud, IBMCloud: IBM Cloud. Previously softlayer or bluemix., Oracle: Oracle Compute Infrastructure, Exoscale: Exoscale, RbxCloud: HyperOne and Rootbox platforms, UpCloud: UpCloud, VMware: reads data from guestinfo table or env vars, Vultr: Vultr Cloud, None: Failsafe datasource
_Description: Which data sources should be searched?
Cloud-init supports searching different "Data Sources" for information
that it uses to configure a cloud instance.
diff --git a/debian/patches/cpick-28e56d99-Azure-Retry-dhcp-on-timeouts-when-polling b/debian/patches/cpick-28e56d99-Azure-Retry-dhcp-on-timeouts-when-polling
deleted file mode 100644
index db914b45..00000000
--- a/debian/patches/cpick-28e56d99-Azure-Retry-dhcp-on-timeouts-when-polling
+++ /dev/null
@@ -1,73 +0,0 @@
-From 28e56d993fc40feab139f149dacc10cae51a3fe0 Mon Sep 17 00:00:00 2001
-From: aswinrajamannar <39812128+aswinrajamannar@users.noreply.github.com>
-Date: Tue, 24 Aug 2021 13:45:41 -0700
-Subject: [PATCH] Azure: Retry dhcp on timeouts when polling reprovisiondata
- (#998)
-
-In the nic attach path, we skip doing dhcp since we already did it
-when bringing the interface up. However when polling for
-reprovisiondata, it is possible for the request to timeout due to
-platform issues. In those cases we still need to do dhcp and try again
-since we tear down the context. We can only skip the first dhcp
-attempt.
----
- cloudinit/sources/DataSourceAzure.py | 4 +++
- tests/unittests/test_datasource/test_azure.py | 34 +++++++++++++++++++
- 2 files changed, 38 insertions(+)
-
---- a/cloudinit/sources/DataSourceAzure.py
-+++ b/cloudinit/sources/DataSourceAzure.py
-@@ -1317,6 +1317,10 @@ class DataSourceAzure(sources.DataSource
- except UrlError:
- # Teardown our EphemeralDHCPv4 context on failure as we retry
- self._ephemeral_dhcp_ctx.clean_network()
-+
-+ # Also reset this flag which determines if we should do dhcp
-+ # during retries.
-+ is_ephemeral_ctx_present = False
- finally:
- if nl_sock:
- nl_sock.close()
---- a/tests/unittests/test_datasource/test_azure.py
-+++ b/tests/unittests/test_datasource/test_azure.py
-@@ -3055,6 +3055,40 @@ class TestPreprovisioningPollIMDS(CiTest
- self.assertEqual(0, m_dhcp.call_count)
- self.assertEqual(0, m_media_switch.call_count)
-
-+ @mock.patch('os.path.isfile')
-+ @mock.patch(MOCKPATH + 'EphemeralDHCPv4')
-+ def test_poll_imds_does_dhcp_on_retries_if_ctx_present(
-+ self, m_ephemeral_dhcpv4, m_isfile, report_ready_func, m_request,
-+ m_media_switch, m_dhcp, m_net):
-+ """The poll_imds function should reuse the dhcp ctx if it is already
-+ present. This happens when we wait for nic to be hot-attached before
-+ polling for reprovisiondata. Note that if this ctx is set when
-+ _poll_imds is called, then it is not expected to be waiting for
-+ media_disconnect_connect either."""
-+
-+ tries = 0
-+
-+ def fake_timeout_once(**kwargs):
-+ nonlocal tries
-+ tries += 1
-+ if tries == 1:
-+ raise requests.Timeout('Fake connection timeout')
-+ return mock.MagicMock(status_code=200, text="good", content="good")
-+
-+ m_request.side_effect = fake_timeout_once
-+ report_file = self.tmp_path('report_marker', self.tmp)
-+ m_isfile.return_value = True
-+ dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
-+ with mock.patch(MOCKPATH + 'REPORTED_READY_MARKER_FILE', report_file),\
-+ mock.patch.object(dsa, '_ephemeral_dhcp_ctx') as m_dhcp_ctx:
-+ m_dhcp_ctx.obtain_lease.return_value = "Dummy lease"
-+ dsa._ephemeral_dhcp_ctx = m_dhcp_ctx
-+ dsa._poll_imds()
-+ self.assertEqual(1, m_dhcp_ctx.clean_network.call_count)
-+ self.assertEqual(1, m_ephemeral_dhcpv4.call_count)
-+ self.assertEqual(0, m_media_switch.call_count)
-+ self.assertEqual(2, m_request.call_count)
-+
- def test_does_not_poll_imds_report_ready_when_marker_file_exists(
- self, m_report_ready, m_request, m_media_switch, m_dhcp, m_net):
- """poll_imds should not call report ready when the reported ready
diff --git a/debian/patches/cpick-612e3908-Add-connectivity_url-to-Oracle-s-EphemeralDHCPv4-988 b/debian/patches/cpick-612e3908-Add-connectivity_url-to-Oracle-s-EphemeralDHCPv4-988
deleted file mode 100644
index f3492fc3..00000000
--- a/debian/patches/cpick-612e3908-Add-connectivity_url-to-Oracle-s-EphemeralDHCPv4-988
+++ /dev/null
@@ -1,322 +0,0 @@
-From 612e39087aee3b1242765e7c4f463f54a6ebd723 Mon Sep 17 00:00:00 2001
-From: James Falcon <therealfalcon@gmail.com>
-Date: Fri, 17 Sep 2021 13:04:07 -0500
-Subject: [PATCH] Add connectivity_url to Oracle's EphemeralDHCPv4 (#988)
-
-Add connectivity_url to Oracle's EphemeralDHCPv4
-
-On bionic, when trying to bring up the EphemeralDHCPv4, it's possible
-that we already have a route defined, which will result in an error when
-trying to add the DHCP route. Use the connectivity_url to check if we
-can reach the metadata service, and if so, skip the EphemeralDHCPv4.
-
-The has_url_connectivity function has also been modified to take
-a dict of kwargs to send to readurl.
-
-LP: #1939603
----
- cloudinit/net/__init__.py | 37 +++++++++++++++++++-------
- cloudinit/net/dhcp.py | 20 +++++++++-----
- cloudinit/net/tests/test_dhcp.py | 8 ++++--
- cloudinit/net/tests/test_init.py | 20 +++++++++-----
- cloudinit/sources/DataSourceOracle.py | 13 ++++++---
- cloudinit/sources/helpers/vultr.py | 2 +-
- cloudinit/sources/tests/test_oracle.py | 10 ++++++-
- 7 files changed, 78 insertions(+), 32 deletions(-)
-
---- a/cloudinit/net/__init__.py
-+++ b/cloudinit/net/__init__.py
-@@ -11,6 +11,7 @@ import ipaddress
- import logging
- import os
- import re
-+from typing import Any, Dict
-
- from cloudinit import subp
- from cloudinit import util
-@@ -971,18 +972,33 @@ def get_ib_hwaddrs_by_interface():
- return ret
-
-
--def has_url_connectivity(url):
-- """Return true when the instance has access to the provided URL
-+def has_url_connectivity(url_data: Dict[str, Any]) -> bool:
-+ """Return true when the instance has access to the provided URL.
-
- Logs a warning if url is not the expected format.
-+
-+ url_data is a dictionary of kwargs to send to readurl. E.g.:
-+
-+ has_url_connectivity({
-+ "url": "http://example.invalid",
-+ "headers": {"some": "header"},
-+ "timeout": 10
-+ })
- """
-+ if 'url' not in url_data:
-+ LOG.warning(
-+ "Ignoring connectivity check. No 'url' to check in %s", url_data)
-+ return False
-+ url = url_data['url']
- if not any([url.startswith('http://'), url.startswith('https://')]):
- LOG.warning(
- "Ignoring connectivity check. Expected URL beginning with http*://"
- " received '%s'", url)
- return False
-+ if 'timeout' not in url_data:
-+ url_data['timeout'] = 5
- try:
-- readurl(url, timeout=5)
-+ readurl(**url_data)
- except UrlError:
- return False
- return True
-@@ -1025,14 +1041,15 @@ class EphemeralIPv4Network(object):
-
- No operations are performed if the provided interface already has the
- specified configuration.
-- This can be verified with the connectivity_url.
-+ This can be verified with the connectivity_url_data.
- If unconnected, bring up the interface with valid ip, prefix and broadcast.
- If router is provided setup a default route for that interface. Upon
- context exit, clean up the interface leaving no configuration behind.
- """
-
- def __init__(self, interface, ip, prefix_or_mask, broadcast, router=None,
-- connectivity_url=None, static_routes=None):
-+ connectivity_url_data: Dict[str, Any] = None,
-+ static_routes=None):
- """Setup context manager and validate call signature.
-
- @param interface: Name of the network interface to bring up.
-@@ -1041,7 +1058,7 @@ class EphemeralIPv4Network(object):
- prefix.
- @param broadcast: Broadcast address for the IPv4 network.
- @param router: Optionally the default gateway IP.
-- @param connectivity_url: Optionally, a URL to verify if a usable
-+ @param connectivity_url_data: Optionally, a URL to verify if a usable
- connection already exists.
- @param static_routes: Optionally a list of static routes from DHCP
- """
-@@ -1056,7 +1073,7 @@ class EphemeralIPv4Network(object):
- 'Cannot setup network: {0}'.format(e)
- ) from e
-
-- self.connectivity_url = connectivity_url
-+ self.connectivity_url_data = connectivity_url_data
- self.interface = interface
- self.ip = ip
- self.broadcast = broadcast
-@@ -1066,11 +1083,11 @@ class EphemeralIPv4Network(object):
-
- def __enter__(self):
- """Perform ephemeral network setup if interface is not connected."""
-- if self.connectivity_url:
-- if has_url_connectivity(self.connectivity_url):
-+ if self.connectivity_url_data:
-+ if has_url_connectivity(self.connectivity_url_data):
- LOG.debug(
- 'Skip ephemeral network setup, instance has connectivity'
-- ' to %s', self.connectivity_url)
-+ ' to %s', self.connectivity_url_data['url'])
- return
-
- self._bringup_device()
---- a/cloudinit/net/dhcp.py
-+++ b/cloudinit/net/dhcp.py
-@@ -4,6 +4,7 @@
- #
- # This file is part of cloud-init. See LICENSE file for license information.
-
-+from typing import Dict, Any
- import configobj
- import logging
- import os
-@@ -38,21 +39,26 @@ class NoDHCPLeaseError(Exception):
-
-
- class EphemeralDHCPv4(object):
-- def __init__(self, iface=None, connectivity_url=None, dhcp_log_func=None):
-+ def __init__(
-+ self,
-+ iface=None,
-+ connectivity_url_data: Dict[str, Any] = None,
-+ dhcp_log_func=None
-+ ):
- self.iface = iface
- self._ephipv4 = None
- self.lease = None
- self.dhcp_log_func = dhcp_log_func
-- self.connectivity_url = connectivity_url
-+ self.connectivity_url_data = connectivity_url_data
-
- def __enter__(self):
- """Setup sandboxed dhcp context, unless connectivity_url can already be
- reached."""
-- if self.connectivity_url:
-- if has_url_connectivity(self.connectivity_url):
-+ if self.connectivity_url_data:
-+ if has_url_connectivity(self.connectivity_url_data):
- LOG.debug(
- 'Skip ephemeral DHCP setup, instance has connectivity'
-- ' to %s', self.connectivity_url)
-+ ' to %s', self.connectivity_url_data)
- return
- return self.obtain_lease()
-
-@@ -104,8 +110,8 @@ class EphemeralDHCPv4(object):
- if kwargs['static_routes']:
- kwargs['static_routes'] = (
- parse_static_routes(kwargs['static_routes']))
-- if self.connectivity_url:
-- kwargs['connectivity_url'] = self.connectivity_url
-+ if self.connectivity_url_data:
-+ kwargs['connectivity_url_data'] = self.connectivity_url_data
- ephipv4 = EphemeralIPv4Network(**kwargs)
- ephipv4.__enter__()
- self._ephipv4 = ephipv4
---- a/cloudinit/net/tests/test_dhcp.py
-+++ b/cloudinit/net/tests/test_dhcp.py
-@@ -617,7 +617,9 @@ class TestEphemeralDhcpNoNetworkSetup(Ht
- url = 'http://example.org/index.html'
-
- httpretty.register_uri(httpretty.GET, url)
-- with net.dhcp.EphemeralDHCPv4(connectivity_url=url) as lease:
-+ with net.dhcp.EphemeralDHCPv4(
-+ connectivity_url_data={'url': url},
-+ ) as lease:
- self.assertIsNone(lease)
- # Ensure that no teardown happens:
- m_dhcp.assert_not_called()
-@@ -635,7 +637,9 @@ class TestEphemeralDhcpNoNetworkSetup(Ht
- m_subp.return_value = ('', '')
-
- httpretty.register_uri(httpretty.GET, url, body={}, status=404)
-- with net.dhcp.EphemeralDHCPv4(connectivity_url=url) as lease:
-+ with net.dhcp.EphemeralDHCPv4(
-+ connectivity_url_data={'url': url},
-+ ) as lease:
- self.assertEqual(fake_lease, lease)
- # Ensure that dhcp discovery occurs
- m_dhcp.called_once_with()
---- a/cloudinit/net/tests/test_init.py
-+++ b/cloudinit/net/tests/test_init.py
-@@ -622,11 +622,14 @@ class TestEphemeralIPV4Network(CiTestCas
- params = {
- 'interface': 'eth0', 'ip': '192.168.2.2',
- 'prefix_or_mask': '255.255.255.0', 'broadcast': '192.168.2.255',
-- 'connectivity_url': 'http://example.org/index.html'}
-+ 'connectivity_url_data': {'url': 'http://example.org/index.html'}
-+ }
-
- with net.EphemeralIPv4Network(**params):
-- self.assertEqual([mock.call('http://example.org/index.html',
-- timeout=5)], m_readurl.call_args_list)
-+ self.assertEqual(
-+ [mock.call(url='http://example.org/index.html', timeout=5)],
-+ m_readurl.call_args_list
-+ )
- # Ensure that no teardown happens:
- m_subp.assert_has_calls([])
-
-@@ -850,25 +853,28 @@ class TestHasURLConnectivity(HttprettyTe
- def test_url_timeout_on_connectivity_check(self, m_readurl):
- """A timeout of 5 seconds is provided when reading a url."""
- self.assertTrue(
-- net.has_url_connectivity(self.url), 'Expected True on url connect')
-+ net.has_url_connectivity({'url': self.url}),
-+ 'Expected True on url connect')
-
- def test_true_on_url_connectivity_success(self):
- httpretty.register_uri(httpretty.GET, self.url)
- self.assertTrue(
-- net.has_url_connectivity(self.url), 'Expected True on url connect')
-+ net.has_url_connectivity({'url': self.url}),
-+ 'Expected True on url connect')
-
- @mock.patch('requests.Session.request')
- def test_true_on_url_connectivity_timeout(self, m_request):
- """A timeout raised accessing the url will return False."""
- m_request.side_effect = requests.Timeout('Fake Connection Timeout')
- self.assertFalse(
-- net.has_url_connectivity(self.url),
-+ net.has_url_connectivity({'url': self.url}),
- 'Expected False on url timeout')
-
- def test_true_on_url_connectivity_failure(self):
- httpretty.register_uri(httpretty.GET, self.url, body={}, status=404)
- self.assertFalse(
-- net.has_url_connectivity(self.url), 'Expected False on url fail')
-+ net.has_url_connectivity({'url': self.url}),
-+ 'Expected False on url fail')
-
-
- def _mk_v1_phys(mac, name, driver, device_id):
---- a/cloudinit/sources/DataSourceOracle.py
-+++ b/cloudinit/sources/DataSourceOracle.py
-@@ -40,6 +40,7 @@ METADATA_PATTERN = METADATA_ROOT + "{pat
- # https://docs.cloud.oracle.com/iaas/Content/Network/Troubleshoot/connectionhang.htm#Overview,
- # indicates that an MTU of 9000 is used within OCI
- MTU = 9000
-+V2_HEADERS = {"Authorization": "Bearer Oracle"}
-
- OpcMetadata = namedtuple("OpcMetadata", "version instance_data vnics_data")
-
-@@ -134,7 +135,13 @@ class DataSourceOracle(sources.DataSourc
- )
- network_context = noop()
- if not _is_iscsi_root():
-- network_context = dhcp.EphemeralDHCPv4(net.find_fallback_nic())
-+ network_context = dhcp.EphemeralDHCPv4(
-+ iface=net.find_fallback_nic(),
-+ connectivity_url_data={
-+ "url": METADATA_PATTERN.format(version=2, path="instance"),
-+ "headers": V2_HEADERS,
-+ }
-+ )
- with network_context:
- fetched_metadata = read_opc_metadata(
- fetch_vnics_data=fetch_vnics_data
-@@ -304,11 +311,9 @@ def read_opc_metadata(*, fetch_vnics_dat
- retries = 2
-
- def _fetch(metadata_version: int, path: str) -> dict:
-- headers = {
-- "Authorization": "Bearer Oracle"} if metadata_version > 1 else None
- return readurl(
- url=METADATA_PATTERN.format(version=metadata_version, path=path),
-- headers=headers,
-+ headers=V2_HEADERS if metadata_version > 1 else None,
- retries=retries,
- )._response.json()
-
---- a/cloudinit/sources/helpers/vultr.py
-+++ b/cloudinit/sources/helpers/vultr.py
-@@ -20,7 +20,7 @@ LOG = log.getLogger(__name__)
- def get_metadata(url, timeout, retries, sec_between):
- # Bring up interface
- try:
-- with EphemeralDHCPv4(connectivity_url=url):
-+ with EphemeralDHCPv4(connectivity_url_data={"url": url}):
- # Fetch the metadata
- v1 = read_metadata(url, timeout, retries, sec_between)
- except (NoDHCPLeaseError) as exc:
---- a/cloudinit/sources/tests/test_oracle.py
-+++ b/cloudinit/sources/tests/test_oracle.py
-@@ -694,7 +694,15 @@ class TestNonIscsiRoot_GetDataBehaviour:
- assert oracle_ds._get_data()
-
- assert [
-- mock.call(m_find_fallback_nic.return_value)
-+ mock.call(
-+ iface=m_find_fallback_nic.return_value,
-+ connectivity_url_data={
-+ 'headers': {
-+ 'Authorization': 'Bearer Oracle'
-+ },
-+ 'url': 'http://169.254.169.254/opc/v2/instance/'
-+ }
-+ )
- ] == m_EphemeralDHCPv4.call_args_list
-
-
diff --git a/debian/patches/cpick-9c147e83-Allow-disabling-of-network-activation-SC-307-1048 b/debian/patches/cpick-9c147e83-Allow-disabling-of-network-activation-SC-307-1048
deleted file mode 100644
index e33ca16f..00000000
--- a/debian/patches/cpick-9c147e83-Allow-disabling-of-network-activation-SC-307-1048
+++ /dev/null
@@ -1,190 +0,0 @@
-From 9c147e8341e287366790e60658f646cdcc59bef2 Mon Sep 17 00:00:00 2001
-From: James Falcon <james.falcon@canonical.com>
-Date: Thu, 7 Oct 2021 11:27:36 -0500
-Subject: [PATCH] Allow disabling of network activation (SC-307) (#1048)
-
-In #919 (81299de), we refactored some of the code used to bring up
-networks across distros. Previously, the call to bring up network
-interfaces during 'init' stage unintentionally resulted in a no-op
-such that network interfaces were NEVER brought up by cloud-init, even
-if new network interfaces were found after crawling the metadata.
-
-The code was altered to bring up these discovered network interfaces.
-On ubuntu, this results in a 'netplan apply' call during 'init' stage
-for any ubuntu-based distro on a datasource that has a NETWORK
-dependency. On GCE, this additional 'netplan apply' conflicts with the
-google-guest-agent service, resulting in an instance that can no
-be connected to.
-
-This commit adds a 'disable_network_activation' option that can be
-enabled in /etc/cloud.cfg to disable the activation of network
-interfaces in 'init' stage.
-
-LP: #1938299
----
- cloudinit/cmd/main.py | 11 ++++-
- cloudinit/cmd/tests/test_main.py | 23 ++++++++++
- cloudinit/distros/__init__.py | 3 ++
- doc/rtd/topics/network-config.rst | 11 +++++
- .../datasources/test_network_dependency.py | 43 +++++++++++++++++++
- 5 files changed, 89 insertions(+), 2 deletions(-)
- create mode 100644 tests/integration_tests/datasources/test_network_dependency.py
-
---- a/cloudinit/cmd/main.py
-+++ b/cloudinit/cmd/main.py
-@@ -239,6 +239,12 @@ def purge_cache_on_python_version_change
- util.write_file(python_version_path, current_python_version)
-
-
-+def _should_bring_up_interfaces(init, args):
-+ if util.get_cfg_option_bool(init.cfg, 'disable_network_activation'):
-+ return False
-+ return not args.local
-+
-+
- def main_init(name, args):
- deps = [sources.DEP_FILESYSTEM, sources.DEP_NETWORK]
- if args.local:
-@@ -348,6 +354,7 @@ def main_init(name, args):
- util.del_file(os.path.join(path_helper.get_cpath("data"), "no-net"))
-
- # Stage 5
-+ bring_up_interfaces = _should_bring_up_interfaces(init, args)
- try:
- init.fetch(existing=existing)
- # if in network mode, and the datasource is local
-@@ -367,7 +374,7 @@ def main_init(name, args):
- util.logexc(LOG, ("No instance datasource found!"
- " Likely bad things to come!"))
- if not args.force:
-- init.apply_network_config(bring_up=not args.local)
-+ init.apply_network_config(bring_up=bring_up_interfaces)
- LOG.debug("[%s] Exiting without datasource", mode)
- if mode == sources.DSMODE_LOCAL:
- return (None, [])
-@@ -388,7 +395,7 @@ def main_init(name, args):
- # dhcp clients to advertize this hostname to any DDNS services
- # LP: #1746455.
- _maybe_set_hostname(init, stage='local', retry_stage='network')
-- init.apply_network_config(bring_up=bool(mode != sources.DSMODE_LOCAL))
-+ init.apply_network_config(bring_up=bring_up_interfaces)
-
- if mode == sources.DSMODE_LOCAL:
- if init.datasource.dsmode != mode:
---- a/cloudinit/cmd/tests/test_main.py
-+++ b/cloudinit/cmd/tests/test_main.py
-@@ -4,6 +4,9 @@ from collections import namedtuple
- import copy
- import os
- from io import StringIO
-+from unittest import mock
-+
-+import pytest
-
- from cloudinit.cmd import main
- from cloudinit import safeyaml
-@@ -162,4 +165,24 @@ class TestMain(FilesystemMockingTestCase
- for log in expected_logs:
- self.assertIn(log, self.stderr.getvalue())
-
-+
-+class TestShouldBringUpInterfaces:
-+ @pytest.mark.parametrize('cfg_disable,args_local,expected', [
-+ (True, True, False),
-+ (True, False, False),
-+ (False, True, False),
-+ (False, False, True),
-+ ])
-+ def test_should_bring_up_interfaces(
-+ self, cfg_disable, args_local, expected
-+ ):
-+ init = mock.Mock()
-+ init.cfg = {'disable_network_activation': cfg_disable}
-+
-+ args = mock.Mock()
-+ args.local = args_local
-+
-+ result = main._should_bring_up_interfaces(init, args)
-+ assert result == expected
-+
- # vi: ts=4 expandtab
---- a/cloudinit/distros/__init__.py
-+++ b/cloudinit/distros/__init__.py
-@@ -227,8 +227,11 @@ class Distro(persistence.CloudInitPickle
-
- # Now try to bring them up
- if bring_up:
-+ LOG.debug('Bringing up newly configured network interfaces')
- network_activator = activators.select_activator()
- network_activator.bring_up_all_interfaces(network_state)
-+ else:
-+ LOG.debug("Not bringing up newly configured network interfaces")
- return False
-
- def apply_network_config_names(self, netconfig):
---- a/doc/rtd/topics/network-config.rst
-+++ b/doc/rtd/topics/network-config.rst
-@@ -75,6 +75,17 @@ If `Cloud-init`_ 's networking config ha
- no other network information is found, then it will proceed
- to generate a fallback networking configuration.
-
-+Disabling Network Activation
-+----------------------------
-+
-+Some datasources may not be initialized until after network has been brought
-+up. In this case, cloud-init will attempt to bring up the interfaces specified
-+by the datasource metadata.
-+
-+This behavior can be disabled in the cloud-init configuration dictionary,
-+merged from ``/etc/cloud/cloud.cfg`` and ``/etc/cloud/cloud.cfg.d/*``::
-+
-+ disable_network_activation: true
-
- Fallback Network Configuration
- ==============================
---- /dev/null
-+++ b/tests/integration_tests/datasources/test_network_dependency.py
-@@ -0,0 +1,43 @@
-+import pytest
-+
-+from tests.integration_tests.clouds import IntegrationCloud
-+from tests.integration_tests.conftest import get_validated_source
-+
-+
-+def _setup_custom_image(session_cloud: IntegrationCloud):
-+ """Like `setup_image` in conftest.py, but with customized content."""
-+ source = get_validated_source(session_cloud)
-+ if not source.installs_new_version():
-+ return
-+ client = session_cloud.launch()
-+
-+ # Insert our "disable_network_activation" file here
-+ client.write_to_file(
-+ '/etc/cloud/cloud.cfg.d/99-disable-network-activation.cfg',
-+ 'disable_network_activation: true\n',
-+ )
-+
-+ client.install_new_cloud_init(source)
-+ # Even if we're keeping instances, we don't want to keep this
-+ # one around as it was just for image creation
-+ client.destroy()
-+
-+
-+# This test should be able to work on any cloud whose datasource specifies
-+# a NETWORK dependency
-+@pytest.mark.gce
-+@pytest.mark.ubuntu # Because netplan
-+def test_network_activation_disabled(session_cloud: IntegrationCloud):
-+ """Test that the network is not activated during init mode."""
-+ _setup_custom_image(session_cloud)
-+ with session_cloud.launch() as client:
-+ result = client.execute('systemctl status google-guest-agent.service')
-+ if not result.ok:
-+ raise AssertionError('google-guest-agent is not active:\n%s',
-+ result.stdout)
-+ log = client.read_from_file('/var/log/cloud-init.log')
-+
-+ assert "Running command ['netplan', 'apply']" not in log
-+
-+ assert 'Not bringing up newly configured network interfaces' in log
-+ assert 'Bringing up newly configured network interfaces' not in log
diff --git a/debian/patches/cpick-dc227869-Set-Azure-to-apply-networking-config-every-BOOT-1023 b/debian/patches/cpick-dc227869-Set-Azure-to-apply-networking-config-every-BOOT-1023
deleted file mode 100644
index 739f761c..00000000
--- a/debian/patches/cpick-dc227869-Set-Azure-to-apply-networking-config-every-BOOT-1023
+++ /dev/null
@@ -1,80 +0,0 @@
-From dc22786980a05129c5971e68ae37b1a9f76f882d Mon Sep 17 00:00:00 2001
-From: James Falcon <therealfalcon@gmail.com>
-Date: Fri, 17 Sep 2021 16:25:22 -0500
-Subject: [PATCH] Set Azure to apply networking config every BOOT (#1023)
-
-In #1006, we set Azure to apply networking config every
-BOOT_NEW_INSTANCE because the BOOT_LEGACY option was causing problems
-applying networking the second time per boot. However,
-BOOT_NEW_INSTANCE is also wrong as Azure needs to apply networking
-once per boot, during init-local phase.
----
- cloudinit/sources/DataSourceAzure.py | 6 +++++-
- tests/integration_tests/modules/test_user_events.py | 10 ++++++----
- 2 files changed, 11 insertions(+), 5 deletions(-)
-
-Index: cloud-init/cloudinit/sources/DataSourceAzure.py
-===================================================================
---- cloud-init.orig/cloudinit/sources/DataSourceAzure.py
-+++ cloud-init/cloudinit/sources/DataSourceAzure.py
-@@ -22,7 +22,7 @@ import requests
- from cloudinit import dmi
- from cloudinit import log as logging
- from cloudinit import net
--from cloudinit.event import EventType
-+from cloudinit.event import EventScope, EventType
- from cloudinit.net import device_driver
- from cloudinit.net.dhcp import EphemeralDHCPv4
- from cloudinit import sources
-@@ -339,6 +339,10 @@ def temporary_hostname(temp_hostname, cf
- class DataSourceAzure(sources.DataSource):
-
- dsname = 'Azure'
-+ default_update_events = {EventScope.NETWORK: {
-+ EventType.BOOT_NEW_INSTANCE,
-+ EventType.BOOT,
-+ }}
- _negotiated = False
- _metadata_imds = sources.UNSET
- _ci_pkl_version = 1
-Index: cloud-init/tests/integration_tests/modules/test_user_events.py
-===================================================================
---- cloud-init.orig/tests/integration_tests/modules/test_user_events.py
-+++ cloud-init/tests/integration_tests/modules/test_user_events.py
-@@ -31,7 +31,6 @@ def _add_dummy_bridge_to_netplan(client:
- @pytest.mark.gce
- @pytest.mark.oci
- @pytest.mark.openstack
--@pytest.mark.not_xenial
- def test_boot_event_disabled_by_default(client: IntegrationInstance):
- log = client.read_from_file('/var/log/cloud-init.log')
- assert 'Applying network configuration' in log
-@@ -66,7 +65,7 @@ def _test_network_config_applied_on_rebo
- assert 'dummy0' not in client.execute('ls /sys/class/net')
-
- _add_dummy_bridge_to_netplan(client)
-- client.execute('rm /var/log/cloud-init.log')
-+ client.execute('echo "" > /var/log/cloud-init.log')
- client.restart()
- log = client.read_from_file('/var/log/cloud-init.log')
-
-@@ -81,6 +80,11 @@ def test_boot_event_enabled_by_default(c
- _test_network_config_applied_on_reboot(client)
-
-
-+@pytest.mark.azure
-+def test_boot_event_enabled_by_default(client: IntegrationInstance):
-+ _test_network_config_applied_on_reboot(client)
-+
-+
- USER_DATA = """\
- #cloud-config
- updates:
-@@ -89,7 +93,6 @@ updates:
- """
-
-
--@pytest.mark.not_xenial
- @pytest.mark.user_data(USER_DATA)
- def test_boot_event_enabled(client: IntegrationInstance):
- _test_network_config_applied_on_reboot(client)
diff --git a/debian/patches/cpick-e69a8874-Set-Azure-to-only-update-metadata-on-BOOT_NEW_INSTANCE b/debian/patches/cpick-e69a8874-Set-Azure-to-only-update-metadata-on-BOOT_NEW_INSTANCE
deleted file mode 100644
index c6ff7345..00000000
--- a/debian/patches/cpick-e69a8874-Set-Azure-to-only-update-metadata-on-BOOT_NEW_INSTANCE
+++ /dev/null
@@ -1,55 +0,0 @@
-From e69a88745e37061e0ab0a1e67ad11015cca610c1 Mon Sep 17 00:00:00 2001
-From: James Falcon <therealfalcon@gmail.com>
-Date: Fri, 3 Sep 2021 12:57:20 -0500
-Subject: [PATCH] Set Azure to only update metadata on BOOT_NEW_INSTANCE
- (#1006)
-
-In #834, we refactored the handling of events for fetching new metadata.
-Previously, in Azure's __init__, the BOOT event was added to the
-update_events, so it was assumed that Azure required the standard BOOT
-behavior, which is to apply metadata twice every boot: once during
-local-init, then again during standard init phase.
-https://github.com/canonical/cloud-init/blob/21.2/cloudinit/sources/DataSourceAzure.py#L356
-
-However, this line was effectively meaningless. After the metadata was
-fetched in local-init, it was then pickled out to disk. Because
-"update_events" was a class variable, the EventType.BOOT was not
-persisted into the pickle. When the pickle was then unpickled in the
-init phase, metadata did not get re-fetched because EventType.BOOT was
-not present, so Azure is effectely only BOOT_NEW_INSTANCE.
-
-Fetching metadata twice during boot causes some issue for
-pre-provisioning on Azure because updating metadata during
-re-provisioning will cause cloud-init to poll for reprovisiondata again
-in DataSourceAzure, which will infinitely return 404(reprovisiondata
-is deleted from IMDS after health signal was sent by cloud-init during
-init-local). This makes cloud-init stuck in 'init'
----
- cloudinit/sources/DataSourceAzure.py | 9 +--------
- 1 file changed, 1 insertion(+), 8 deletions(-)
-
---- a/cloudinit/sources/DataSourceAzure.py
-+++ b/cloudinit/sources/DataSourceAzure.py
-@@ -22,7 +22,7 @@ import requests
- from cloudinit import dmi
- from cloudinit import log as logging
- from cloudinit import net
--from cloudinit.event import EventScope, EventType
-+from cloudinit.event import EventType
- from cloudinit.net import device_driver
- from cloudinit.net.dhcp import EphemeralDHCPv4
- from cloudinit import sources
-@@ -339,13 +339,6 @@ def temporary_hostname(temp_hostname, cf
- class DataSourceAzure(sources.DataSource):
-
- dsname = 'Azure'
-- # Regenerate network config new_instance boot and every boot
-- default_update_events = {EventScope.NETWORK: {
-- EventType.BOOT_NEW_INSTANCE,
-- EventType.BOOT,
-- EventType.BOOT_LEGACY
-- }}
--
- _negotiated = False
- _metadata_imds = sources.UNSET
- _ci_pkl_version = 1
diff --git a/debian/patches/series b/debian/patches/series
deleted file mode 100644
index 2c952985..00000000
--- a/debian/patches/series
+++ /dev/null
@@ -1,5 +0,0 @@
-cpick-28e56d99-Azure-Retry-dhcp-on-timeouts-when-polling
-cpick-e69a8874-Set-Azure-to-only-update-metadata-on-BOOT_NEW_INSTANCE
-cpick-612e3908-Add-connectivity_url-to-Oracle-s-EphemeralDHCPv4-988
-cpick-dc227869-Set-Azure-to-apply-networking-config-every-BOOT-1023
-cpick-9c147e83-Allow-disabling-of-network-activation-SC-307-1048
diff --git a/debian/po/templates.pot b/debian/po/templates.pot
index fb0df2c0..ba0fb51b 100644
--- a/debian/po/templates.pot
+++ b/debian/po/templates.pot
@@ -32,6 +32,12 @@ msgstr ""
#. Type: multiselect
#. Choices
#: ../cloud-init.templates:1001
+msgid "LXD: Reads /dev/lxd/sock representation of instance data"
+msgstr ""
+
+#. Type: multiselect
+#. Choices
+#: ../cloud-init.templates:1001
msgid "OpenNebula: read from OpenNebula context disk"
msgstr ""
diff --git a/debian/upstream/metadata b/debian/upstream/metadata
index 46b4cd69..239532a9 100644
--- a/debian/upstream/metadata
+++ b/debian/upstream/metadata
@@ -1,5 +1,5 @@
Name: cloud-init
-Contact: Josh Powers <josh.powers@canonical.com>
+Contact: James Falcon <james.falcon@canonical.com>
Repository: https://github.com/canonical/cloud-init.git
Repository-Browse: https://github.com/canonical/cloud-init
Bug-Submit: https://bugs.launchpad.net/cloud-init
diff --git a/doc-requirements.txt b/doc-requirements.txt
index d5f921e3..b8d6ba90 100644
--- a/doc-requirements.txt
+++ b/doc-requirements.txt
@@ -3,3 +3,6 @@ m2r
sphinx<2
sphinx_rtd_theme
pyyaml
+
+# Indirect dependencies
+docutils<0.18
diff --git a/doc/examples/cloud-config-apt.txt b/doc/examples/cloud-config-apt.txt
index f4392326..778187b5 100644
--- a/doc/examples/cloud-config-apt.txt
+++ b/doc/examples/cloud-config-apt.txt
@@ -149,6 +149,7 @@ apt:
# security is optional, if not defined it is set to the same value as primary
security:
- uri: http://security.ubuntu.com/ubuntu
+ arches: [default]
# If search_dns is set for security the searched pattern is:
# <distro>-security-mirror
@@ -212,14 +213,14 @@ apt:
#
# The key of each source entry is the filename and will be prepended by
# /etc/apt/sources.list.d/ if it doesn't start with a '/'.
- # If it doesn't end with .list it will be appended so that apt picks up it's
+ # If it doesn't end with .list it will be appended so that apt picks up its
# configuration.
#
# Whenever there is no content to be written into such a file, the key is
# not used as filename - yet it can still be used as index for merging
# configuration.
#
- # The values inside the entries consost of the following optional entries:
+ # The values inside the entries consist of the following optional entries:
# 'source': a sources.list entry (some variable replacements apply)
# 'keyid': providing a key to import via shortid or fingerprint
# 'key': providing a raw PGP key
@@ -276,13 +277,14 @@ apt:
my-repo2.list:
# 2.4 replacement variables
#
- # sources can use $MIRROR, $PRIMARY, $SECURITY and $RELEASE replacement
- # variables.
+ # sources can use $MIRROR, $PRIMARY, $SECURITY, $RELEASE and $KEY_FILE
+ # replacement variables.
# They will be replaced with the default or specified mirrors and the
# running release.
# The entry below would be possibly turned into:
# source: deb http://archive.ubuntu.com/ubuntu xenial multiverse
- source: deb $MIRROR $RELEASE multiverse
+ source: deb [signed-by=$KEY_FILE] $MIRROR $RELEASE multiverse
+ keyid: F430BBA5
my-repo3.list:
# this would have the same end effect as 'ppa:curtin-dev/test-archive'
@@ -310,9 +312,19 @@ apt:
keyid: B59D 5F15 97A5 04B7 E230 6DCA 0620 BBCF 0368 3F77
keyserver: pgp.mit.edu
+ ignored5:
+ # 2.8 signed-by
+ #
+ # One can specify [signed-by=$KEY_FILE] in the source definition, which
+ # will make the key be installed in the directory /etc/cloud-init.gpg.d/
+ # and the $KEY_FILE replacement variable will be replaced with the path
+ # to the specified key. If $KEY_FILE is used, but no key is specified,
+ # apt update will (rightfully) fail due to an invalid value.
+ source: deb [signed-by=$KEY_FILE] $MIRROR $RELEASE multiverse
+ keyid: B59D 5F15 97A5 04B7 E230 6DCA 0620 BBCF 0368 3F77
my-repo4.list:
- # 2.8 raw key
+ # 2.9 raw key
#
# The apt signing key can also be specified by providing a pgp public key
# block. Providing the PGP key this way is the most robust method for
diff --git a/doc/examples/cloud-config-user-groups.txt b/doc/examples/cloud-config-user-groups.txt
index 1faecf75..30cd3f97 100644
--- a/doc/examples/cloud-config-user-groups.txt
+++ b/doc/examples/cloud-config-user-groups.txt
@@ -20,14 +20,18 @@ users:
groups: users
selinux_user: staff_u
expiredate: '2032-09-01'
- ssh_import_id: foobar
+ ssh_import_id:
+ - lp:falcojr
+ - gh:TheRealFalcon
lock_passwd: false
passwd: $6$j212wezy$7H/1LT4f9/N3wpgNunhsIqtMj62OKiS3nyNwuizouQc3u7MbYCarYeAHWYPYb2FT.lbioDm2RrkJPb9BZMN1O/
- name: barfoo
gecos: Bar B. Foo
sudo: ALL=(ALL) NOPASSWD:ALL
groups: users, admin
- ssh_import_id: None
+ ssh_import_id:
+ - lp:falcojr
+ - gh:TheRealFalcon
lock_passwd: true
ssh_authorized_keys:
- <ssh pub key 1>
diff --git a/doc/man/cloud-id.1 b/doc/man/cloud-id.1
index 98ce130c..59fecdd9 100644
--- a/doc/man/cloud-id.1
+++ b/doc/man/cloud-id.1
@@ -25,7 +25,7 @@ Path to instance-data.json file. Default is
/run/cloud-init/instance-data.json
.SH COPYRIGHT
-Copyright (C) 2020 Canonical Ltd. License GPL-3 or Apache-2.0
+Copyright (C) 2021 Canonical Ltd. License GPL-3 or Apache-2.0
.SH SEE ALSO
Full documentation at: <https://cloudinit.readthedocs.io>
diff --git a/doc/rtd/index.rst b/doc/rtd/index.rst
index 67d6a9e3..69cf2068 100644
--- a/doc/rtd/index.rst
+++ b/doc/rtd/index.rst
@@ -78,6 +78,5 @@ Having trouble? We would like to help!
topics/docs.rst
topics/testing.rst
topics/integration_tests.rst
- topics/cloud_tests.rst
.. vi: textwidth=79
diff --git a/doc/rtd/topics/bugs.rst b/doc/rtd/topics/bugs.rst
index 4b60776b..ee3828de 100644
--- a/doc/rtd/topics/bugs.rst
+++ b/doc/rtd/topics/bugs.rst
@@ -17,7 +17,7 @@ To aid in debugging, please collect the necessary logs. To do so, run the
.. code-block:: shell-session
- $ cloud-init collect-logs
+ $ sudo cloud-init collect-logs
Wrote /home/ubuntu/cloud-init.tar.gz
If your version of cloud-init does not have the `collect-logs` subcommand,
@@ -25,7 +25,7 @@ then please manually collect the base log files by doing the following:
.. code-block:: shell-session
- $ dmesg > dmesg.txt
+ $ sudo dmesg > dmesg.txt
$ sudo journalctl -o short-precise > journal.txt
$ sudo tar -cvf cloud-init.tar dmesg.txt journal.txt /run/cloud-init \
/var/log/cloud-init.log /var/log/cloud-init-output.log
diff --git a/doc/rtd/topics/cloud_tests.rst b/doc/rtd/topics/cloud_tests.rst
deleted file mode 100644
index 0fbb1301..00000000
--- a/doc/rtd/topics/cloud_tests.rst
+++ /dev/null
@@ -1,764 +0,0 @@
-************************
-Cloud tests (Deprecated)
-************************
-
-Cloud tests are longer be maintained. For writing integration
-tests, see the :ref:`integration_tests` page.
-
-Overview
-========
-
-This page describes the execution, development, and architecture of the
-cloud-init integration tests:
-
-* Execution explains the options available and running of tests
-* Development shows how to write test cases
-* Architecture explains the internal processes
-
-Execution
-=========
-
-Overview
---------
-
-In order to avoid the need for dependencies and ease the setup and
-configuration users can run the integration tests via tox:
-
-.. code-block:: shell-session
-
- $ git clone https://github.com/canonical/cloud-init
- $ cd cloud-init
- $ tox -e citest -- -h
-
-Everything after the double dash will be passed to the integration tests.
-Executing tests has several options:
-
-* ``run`` an alias to run both ``collect`` and ``verify``. The ``tree_run``
- command does the same thing, except uses a deb built from the current
- working tree.
-
-* ``collect`` deploys on the specified platform and distro, patches with the
- requested deb or rpm, and finally collects output of the arbitrary
- commands. Similarly, ```tree_collect`` will collect output using a deb
- built from the current working tree.
-
-* ``verify`` given a directory of test data, run the Python unit tests on
- it to generate results.
-
-* ``bddeb`` will build a deb of the current working tree.
-
-Run
----
-
-The first example will provide a complete end-to-end run of data
-collection and verification. There are additional examples below
-explaining how to run one or the other independently.
-
-.. code-block:: shell-session
-
- $ git clone https://github.com/canonical/cloud-init
- $ cd cloud-init
- $ tox -e citest -- run --verbose \
- --os-name stretch --os-name xenial \
- --deb cloud-init_0.7.8~my_patch_all.deb \
- --preserve-data --data-dir ~/collection \
- --preserve-instance
-
-The above command will do the following:
-
-* ``run`` both collect output and run tests the output
-
-* ``--verbose`` verbose output
-
-* ``--os-name stretch`` on the Debian Stretch release
-
-* ``--os-name xenial`` on the Ubuntu Xenial release
-
-* ``--deb cloud-init_0.7.8~patch_all.deb`` use this deb as the version of
- cloud-init to run with
-
-* ``--preserve-data`` always preserve collected data, do not remove data
- after successful test run
-
-* ``--preserve-instance`` do not destroy the instance after test to allow
- for debugging the stopped instance during integration test development. By
- default, test instances are destroyed after the test completes.
-
-* ``--data-dir ~/collection`` write collected data into `~/collection`,
- rather than using a temporary directory
-
-For a more detailed explanation of each option see below.
-
-.. note::
- By default, data collected by the run command will be written into a
- temporary directory and deleted after a successful. If you would
- like to preserve this data, please use the option ``--preserve-data``.
-
-Collect
--------
-
-If developing tests it may be necessary to see if cloud-config works as
-expected and the correct files are pulled down. In this case only a
-collect can be ran by running:
-
-.. code-block:: shell-session
-
- $ tox -e citest -- collect -n xenial --data-dir /tmp/collection
-
-The above command will run the collection tests on xenial and place
-all results into `/tmp/collection`.
-
-Verify
-------
-
-When developing tests it is much easier to simply rerun the verify scripts
-without the more lengthy collect process. This can be done by running:
-
-.. code-block:: shell-session
-
- $ tox -e citest -- verify --data-dir /tmp/collection
-
-The above command will run the verify scripts on the data discovered in
-`/tmp/collection`.
-
-TreeRun and TreeCollect
------------------------
-
-If working on a cloud-init feature or resolving a bug, it may be useful to
-run the current copy of cloud-init in the integration testing environment.
-The integration testing suite can automatically build a deb based on the
-current working tree of cloud-init and run the test suite using this deb.
-
-The ``tree_run`` and ``tree_collect`` commands take the same arguments as
-the ``run`` and ``collect`` commands. These commands will build a deb and
-write it into a temporary file, then start the test suite and pass that deb
-in. To build a deb only, and not run the test suite, the ``bddeb`` command
-can be used.
-
-Note that code in the cloud-init working tree that has not been committed
-when the cloud-init deb is built will still be included. To build a
-cloud-init deb from or use the ``tree_run`` command using a copy of
-cloud-init located in a different directory, use the option ``--cloud-init
-/path/to/cloud-init``.
-
-.. code-block:: shell-session
-
- $ tox -e citest -- tree_run --verbose \
- --os-name xenial --os-name stretch \
- --test modules/final_message --test modules/write_files \
- --result /tmp/result.yaml
-
-Bddeb
------
-
-The ``bddeb`` command can be used to generate a deb file. This is used by the
-tree_run and tree_collect commands to build a deb of the current working tree
-using the packaging template contained in the ``packages/debian/`` directory.
-It can also be used to generate a deb for use in other situations and avoid
-needing to have all the build and test dependencies installed locally.
-
-* ``--bddeb-args``: arguments to pass through to bddeb
-* ``--build-os``: distribution to use as build system (default is xenial)
-* ``--build-platform``: platform to use for build system (default is lxd)
-* ``--cloud-init``: path to base of cloud-init tree (default is '.')
-* ``--deb``: path to write output deb to (default is '.')
-* ``--packaging-branch``: import the ``debian/`` packaging directory
- from the specified branch (default: ``ubuntu/devel``) instead of using
- the packaging template.
-
-Setup Image
------------
-
-By default an image that is used will remain unmodified, but certain
-scenarios may require image modification. For example, many images may use
-a much older cloud-init. As a result tests looking at newer functionality
-will fail because a newer version of cloud-init may be required. The
-following options can be used for further customization:
-
-* ``--deb``: install the specified deb into the image
-* ``--rpm``: install the specified rpm into the image
-* ``--repo``: enable a repository and upgrade cloud-init afterwards
-* ``--ppa``: enable a ppa and upgrade cloud-init afterwards
-* ``--upgrade``: upgrade cloud-init from repos
-* ``--upgrade-full``: run a full system upgrade
-* ``--script``: execute a script in the image. This can perform any setup
- required that is not covered by the other options
-
-Test Case Development
-=====================
-
-Overview
---------
-
-As a test writer you need to develop a test configuration and a
-verification file:
-
- * The test configuration specifies a specific cloud-config to be used by
- cloud-init and a list of arbitrary commands to capture the output of
- (e.g my_test.yaml)
-
- * The verification file runs tests on the collected output to determine
- the result of the test (e.g. my_test.py)
-
-The names must match, however the extensions will of course be different,
-yaml vs py.
-
-Configuration
--------------
-
-The test configuration is a YAML file such as *ntp_server.yaml* below:
-
-.. code-block:: yaml
-
- #
- # Empty NTP config to setup using defaults
- #
- # NOTE: this should not require apt feature, use 'which' rather than 'dpkg -l'
- # NOTE: this should not require no_ntpdate feature, use 'which' to check for
- # installation rather than 'dpkg -l', as 'grep ntp' matches 'ntpdate'
- # NOTE: the verifier should check for any ntp server not 'ubuntu.pool.ntp.org'
- cloud_config: |
- #cloud-config
- ntp:
- servers:
- - pool.ntp.org
- required_features:
- - apt
- - no_ntpdate
- - ubuntu_ntp
- collect_scripts:
- ntp_installed_servers: |
- #!/bin/bash
- dpkg -l | grep ntp | wc -l
- ntp_conf_dist_servers: |
- #!/bin/bash
- ls /etc/ntp.conf.dist | wc -l
- ntp_conf_servers: |
- #!/bin/bash
- cat /etc/ntp.conf | grep '^server'
-
-There are several keys, 1 required and some optional, in the YAML file:
-
-1. The required key is ``cloud_config``. This should be a string of valid
- YAML that is exactly what would normally be placed in a cloud-config
- file, including the cloud-config header. This essentially sets up the
- scenario under test.
-
-2. One optional key is ``collect_scripts``. This key has one or more
- sub-keys containing strings of arbitrary commands to execute (e.g.
- ```cat /var/log/cloud-config-output.log```). In the example above the
- output of dpkg is captured, grep for ntp, and the number of lines
- reported. The name of the sub-key is important. The sub-key is used by
- the verification script to recall the output of the commands ran.
-
-3. The optional ``enabled`` key enables or disables the test case. By
- default the test case will be enabled.
-
-4. The optional ``required_features`` key may be used to specify a list
- of features flags that an image must have to be able to run the test
- case. For example, if a test case relies on an image supporting apt,
- then the config for the test case should include ``required_features:
- [ apt ]``.
-
-
-Default Collect Scripts
------------------------
-
-By default the following files will be collected for every test. There is
-no need to specify these items:
-
-* ``/var/log/cloud-init.log``
-* ``/var/log/cloud-init-output.log``
-* ``/run/cloud-init/.instance-id``
-* ``/run/cloud-init/result.json``
-* ``/run/cloud-init/status.json``
-* ```dpkg-query -W -f='${Version}' cloud-init```
-
-Verification
-------------
-
-The verification script is a Python file with unit tests like the one,
-`ntp_server.py`, below:
-
-.. code-block:: python
-
- # This file is part of cloud-init. See LICENSE file for license information.
-
- """cloud-init Integration Test Verify Script"""
- from tests.cloud_tests.testcases import base
-
-
- class TestNtp(base.CloudTestCase):
- """Test ntp module"""
-
- def test_ntp_installed(self):
- """Test ntp installed"""
- out = self.get_data_file('ntp_installed_empty')
- self.assertEqual(1, int(out))
-
- def test_ntp_dist_entries(self):
- """Test dist config file has one entry"""
- out = self.get_data_file('ntp_conf_dist_empty')
- self.assertEqual(1, int(out))
-
- def test_ntp_entires(self):
- """Test config entries"""
- out = self.get_data_file('ntp_conf_empty')
- self.assertIn('pool 0.ubuntu.pool.ntp.org iburst', out)
- self.assertIn('pool 1.ubuntu.pool.ntp.org iburst', out)
- self.assertIn('pool 2.ubuntu.pool.ntp.org iburst', out)
- self.assertIn('pool 3.ubuntu.pool.ntp.org iburst', out)
-
- # vi: ts=4 expandtab
-
-
-Here is a breakdown of the unit test file:
-
-* The import statement allows access to the output files.
-
-* The class can be named anything, but must import the
- ``base.CloudTestCase``, either directly or via another test class.
-
-* There can be 1 to N number of functions with any name, however only
- functions starting with ``test_*`` will be executed.
-
-* There can be 1 to N number of classes in a test module, however only
- classes inheriting from ``base.CloudTestCase`` will be loaded.
-
-* Output from the commands can be accessed via
- ``self.get_data_file('key')`` where key is the sub-key of
- ``collect_scripts`` above.
-
-* The cloud config that the test ran with can be accessed via
- ``self.cloud_config``, or any entry from the cloud config can be accessed
- via ``self.get_config_entry('key')``.
-
-* See the base ``CloudTestCase`` for additional helper functions.
-
-Layout
-------
-
-Integration tests are located under the `tests/cloud_tests` directory.
-Test configurations are placed under `configs` and the test verification
-scripts under `testcases`:
-
-.. code-block:: shell-session
-
- cloud-init$ tree -d tests/cloud_tests/
- tests/cloud_tests/
- ├── configs
- │   ├── bugs
- │   ├── examples
- │   ├── main
- │   └── modules
- └── testcases
- ├── bugs
- ├── examples
- ├── main
- └── modules
-
-The sub-folders of bugs, examples, main, and modules help organize the
-tests. View the README.md in each to understand in more detail each
-directory.
-
-Test Creation Helper
---------------------
-
-The integration testing suite has a built in helper to aid in test
-development. Help can be invoked via ``tox -e citest -- create --help``. It
-can create a template test case config file with user data passed in from
-the command line, as well as a template test case verifier module.
-
-The following would create a test case named ``example`` under the
-``modules`` category with the given description, and cloud config data read
-in from ``/tmp/user_data``.
-
-.. code-block:: shell-session
-
- $ tox -e citest -- create modules/example \
- -d "a simple example test case" -c "$(< /tmp/user_data)"
-
-
-Development Checklist
----------------------
-
-* Configuration File
- * Named 'your_test.yaml'
- * Contains at least a valid cloud-config
- * Optionally, commands to capture additional output
- * Valid YAML
- * Placed in the appropriate sub-folder in the configs directory
- * Any image features required for the test are specified
-* Verification File
- * Named 'your_test.py'
- * Valid unit tests validating output collected
- * Passes pylint & pep8 checks
- * Placed in the appropriate sub-folder in the test cases directory
-* Tested by running the test:
-
- .. code-block:: shell-session
-
- $ tox -e citest -- run -verbose \
- --os-name <release target> \
- --test modules/your_test.yaml \
- [--deb <build of cloud-init>]
-
-
-Platforms
-=========
-
-EC2
----
-To run on the EC2 platform it is required that the user has an AWS credentials
-configuration file specifying his or her access keys and a default region.
-These configuration files are the standard that the AWS cli and other AWS
-tools utilize for interacting directly with AWS itself and are normally
-generated when running ``aws configure``:
-
-.. code-block:: shell-session
-
- $ cat $HOME/.aws/credentials
- [default]
- aws_access_key_id = <KEY HERE>
- aws_secret_access_key = <KEY HERE>
-
-.. code-block:: shell-session
-
- $ cat $HOME/.aws/config
- [default]
- region = us-west-2
-
-
-Azure Cloud
------------
-
-To run on Azure Cloud platform users login with Service Principal and export
-credentials file. Region is defaulted and can be set in
-``tests/cloud_tests/platforms.yaml``. The Service Principal credentials are
-the standard authentication for Azure SDK to interact with Azure Services:
-
-Create Service Principal account or login
-
-.. code-block:: shell-session
-
- $ az ad sp create-for-rbac --name "APP_ID" --password "STRONG-SECRET-PASSWORD"
-
-.. code-block:: shell-session
-
- $ az login --service-principal --username "APP_ID" --password "STRONG-SECRET-PASSWORD"
-
-Export credentials
-
-.. code-block:: shell-session
-
- $ az ad sp create-for-rbac --sdk-auth > $HOME/.azure/credentials.json
-
-.. code-block:: json
-
- {
- "clientId": "<Service principal ID>",
- "clientSecret": "<Service principal secret/password>",
- "subscriptionId": "<Subscription associated with the service principal>",
- "tenantId": "<The service principal's tenant>",
- "activeDirectoryEndpointUrl": "https://login.microsoftonline.com",
- "resourceManagerEndpointUrl": "https://management.azure.com/",
- "activeDirectoryGraphResourceId": "https://graph.windows.net/",
- "sqlManagementEndpointUrl": "https://management.core.windows.net:8443/",
- "galleryEndpointUrl": "https://gallery.azure.com/",
- "managementEndpointUrl": "https://management.core.windows.net/"
- }
-
-Set region in platforms.yaml
-
-.. code-block:: yaml
-
- azurecloud:
- enabled: true
- region: West US 2
- vm_size: Standard_DS1_v2
- storage_sku: standard_lrs
- tag: ci
-
-
-Architecture
-============
-
-The following section outlines the high-level architecture of the
-integration process.
-
-Overview
---------
-The process flow during a complete end-to-end LXD-backed test.
-
-1. Configuration
- * The back end and specific distro releases are verified as supported
- * The test or tests that need to be run are determined either by
- directory or by individual yaml
-
-2. Image Creation
- * Acquire the request LXD image
- * Install the specified cloud-init package
- * Clean the image so that it does not appear to have been booted
- * A snapshot of the image is created and reused by all tests
-
-3. Configuration
- * For each test, the cloud-config is injected into a copy of the
- snapshot and booted
- * The framework waits for ``/var/lib/cloud/instance/boot-finished``
- (up to 120 seconds)
- * All default commands are ran and output collected
- * Any commands the user specified are executed and output collected
-
-4. Verification
- * The default commands are checked for any failures, errors, and
- warnings to validate basic functionality of cloud-init completed
- successfully
- * The user generated unit tests are then ran validating against the
- collected output
-
-5. Results
- * If any failures were detected the test suite returns a failure
- * Results can be dumped in yaml format to a specified file using the
- ``-r <result_file_name>.yaml`` option
-
-Configuring the Test Suite
---------------------------
-
-Most of the behavior of the test suite is configurable through several yaml
-files. These control the behavior of the test suite's platforms, images, and
-tests. The main config files for platforms, images and test cases are
-``platforms.yaml``, ``releases.yaml`` and ``testcases.yaml``.
-
-Config handling
-^^^^^^^^^^^^^^^
-
-All configurable parts of the test suite use a defaults + overrides system
-for managing config entries. All base config items are dictionaries.
-
-Merging is done on a key-by-key basis, with all keys in the default and
-override represented in the final result. If a key exists both in
-the defaults and the overrides, then the behavior depends on the type of data
-the key refers to. If it is atomic data or a list, then the overrides will
-replace the default. If the data is a dictionary then the value will be the
-result of merging that dictionary from the default config and that
-dictionary from the overrides.
-
-Merging is done using the function
-``tests.cloud_tests.config.merge_config``, which can be examined for more
-detail on config merging behavior.
-
-The following demonstrates merge behavior:
-
-.. code-block:: yaml
-
- defaults:
- list_item:
- - list_entry_1
- - list_entry_2
- int_item_1: 123
- int_item_2: 234
- dict_item:
- subkey_1: 1
- subkey_2: 2
- subkey_dict:
- subsubkey_1: a
- subsubkey_2: b
-
- overrides:
- list_item:
- - overridden_list_entry
- int_item_1: 0
- dict_item:
- subkey_2: false
- subkey_dict:
- subsubkey_2: 'new value'
-
- result:
- list_item:
- - overridden_list_entry
- int_item_1: 0
- int_item_2: 234
- dict_item:
- subkey_1: 1
- subkey_2: false
- subkey_dict:
- subsubkey_1: a
- subsubkey_2: 'new value'
-
-
-Image Config
-------------
-
-Image configuration is handled in ``releases.yaml``. The image configuration
-controls how platforms locate and acquire images, how the platforms should
-interact with the images, how platforms should detect when an image has
-fully booted, any options that are required to set the image up, and
-features that the image supports.
-
-Since settings for locating an image and interacting with it differ from
-platform to platform, there are 4 levels of settings available for images on
-top of the default image settings. The structure of the image config file
-is:
-
-.. code-block:: yaml
-
- default_release_config:
- default:
- ...
- <platform>:
- ...
- <platform>:
- ...
-
- releases:
- <release name>:
- <default>:
- ...
- <platform>:
- ...
- <platform>:
- ...
-
-
-The base config is created from the overall defaults and the overrides for
-the platform. The overrides are created from the default config for the
-image and the platform specific overrides for the image.
-
-System Boot
-^^^^^^^^^^^
-
-The test suite must be able to test if a system has fully booted and if
-cloud-init has finished running, so that running collect scripts does not
-race against the target image booting. This is done using the
-``system_ready_script`` and ``cloud_init_ready_script`` image config keys.
-
-Each of these keys accepts a small bash test statement as a string that must
-return 0 or 1. Since this test statement will be added into a larger bash
-statement it must be a single statement using the ``[`` test syntax.
-
-The default image config provides a system ready script that works for any
-systemd based image. If the image is not systemd based, then a different
-test statement must be provided. The default config also provides a test
-for whether or not cloud-init has finished which checks for the file
-``/run/cloud-init/result.json``. This should be sufficient for most systems
-as writing this file is one of the last things cloud-init does.
-
-The setting ``boot_timeout`` controls how long, in seconds, the platform
-should wait for an image to boot. If the system ready script has not
-indicated that the system is fully booted within this time an error will be
-raised.
-
-Feature Flags
-^^^^^^^^^^^^^
-
-Not all test cases can work on all images due to features the test case
-requires not being present on that image. If a test case requires features
-in an image that are not likely to be present across all distros and
-platforms that the test suite supports, then the test can be skipped
-everywhere it is not supported.
-
-Feature flags, which are names for features supported on some images, but
-not all that may be required by test cases. Configuration for feature flags
-is provided in ``releases.yaml`` under the ``features`` top level key. The
-features config includes a list of all currently defined feature flags,
-their meanings, and a list of feature groups.
-
-Feature groups are groups of features that many images have in common. For
-example, the ``Ubuntu_specific`` feature group includes features that
-should be present across most Ubuntu releases, but may or may not be for
-other distros. Feature groups are specified for an image as a list under
-the key ``feature_groups``.
-
-An image's feature flags are derived from the features groups that that
-image has and any feature overrides provided. Feature overrides can be
-specified under the ``features`` key which accepts a dictionary of
-``{<feature_name>: true/false}`` mappings. If a feature is omitted from an
-image's feature flags or set to false in the overrides then the test suite
-will skip any tests that require that feature when using that image.
-
-Feature flags may be overridden at run time using the ``--feature-override``
-command line argument. It accepts a feature flag and value to set in the
-format ``<feature name>=true/false``. Multiple ``--feature-override``
-flags can be used, and will all be applied to all feature flags for images
-used during a test.
-
-Setup Overrides
-^^^^^^^^^^^^^^^
-
-If an image requires some of the options for image setup to be used, then it
-may specify overrides for the command line arguments passed into setup
-image. These may be specified as a dictionary under the ``setup_overrides``
-key. When an image is set up, the arguments that control how it is set up
-will be the arguments from the command line, with any entries in
-``setup_overrides`` used to override these arguments.
-
-For example, images that do not come with cloud-init already installed
-should have ``setup_overrides: {upgrade: true}`` specified so that in the
-event that no additional setup options are given, cloud-init will be
-installed from the image's repos before running tests. Note that if other
-options such as ``--deb`` are passed in on the command line, these will
-still work as expected, since apt's policy for cloud-init would prefer the
-locally installed deb over an older version from the repos.
-
-Platform Specific Options
-^^^^^^^^^^^^^^^^^^^^^^^^^
-
-There are many platform specific options in image configuration that allow
-platforms to locate images and that control additional setup that the
-platform may have to do to make the image usable. For information on how
-these work, please consult the documentation for that platform in the
-integration testing suite and the ``releases.yaml`` file for examples.
-
-Error Handling
---------------
-
-The test suite makes an attempt to run as many tests as possible even in the
-event of some failing so that automated runs collect as much data as
-possible. In the event that something goes wrong while setting up for or
-running a test, the test suite will attempt to continue running any tests
-which have not been affected by the error.
-
-For example, if the test suite was told to run tests on one platform for two
-releases and an error occurred setting up the first image, all tests for
-that image would be skipped, and the test suite would continue to set up
-the second image and run tests on it. Or, if the system does not start
-properly for one test case out of many to run on that image, that test case
-will be skipped and the next one will be run.
-
-Note that if any errors occur, the test suite will record the failure and
-where it occurred in the result data and write it out to the specified
-result file.
-
-Results
--------
-
-The test suite generates result data that includes how long each stage of
-the test suite took and which parts were and were not successful. This data
-is dumped to the log after the collect and verify stages, and may also be
-written out in yaml format to a file. If part of the setup failed, the
-traceback for the failure and the error message will be included in the
-result file. If a test verifier finds a problem with the collected data
-from a test run, the class, test function and test will be recorded in the
-result data.
-
-Exit Codes
-^^^^^^^^^^
-
-The test suite counts how many errors occur throughout a run. The exit code
-after a run is the number of errors that occurred. If the exit code is
-non-zero then something is wrong either with the test suite, the
-configuration for an image, a test case, or cloud-init itself.
-
-Note that the exit code does not always directly correspond to the number
-of failed test cases, since in some cases, a single error during image setup
-can mean that several test cases are not run. If run is used, then the exit
-code will be the sum of the number of errors in the collect and verify
-stages.
-
-Data Dir
-^^^^^^^^
-
-When using run, the collected data is written into a temporary directory. In
-the event that all tests pass, this directory is deleted, but if a test
-fails or an error occurs, this data will be left in place, and a message
-will be written to the log giving the location of the data.
diff --git a/doc/rtd/topics/datasources.rst b/doc/rtd/topics/datasources.rst
index f5aee1c2..0ebc0f32 100644
--- a/doc/rtd/topics/datasources.rst
+++ b/doc/rtd/topics/datasources.rst
@@ -39,6 +39,7 @@ The following is a list of documents for each supported datasource:
datasources/exoscale.rst
datasources/fallback.rst
datasources/gce.rst
+ datasources/lxd.rst
datasources/maas.rst
datasources/nocloud.rst
datasources/opennebula.rst
diff --git a/doc/rtd/topics/datasources/gce.rst b/doc/rtd/topics/datasources/gce.rst
index 8406695c..f3590282 100644
--- a/doc/rtd/topics/datasources/gce.rst
+++ b/doc/rtd/topics/datasources/gce.rst
@@ -15,6 +15,28 @@ to provide ``public-keys``.
``user-data`` and ``user-data-encoding`` can be provided to cloud-init by
setting those custom metadata keys for an *instance*.
+Configuration
+-------------
+The following configuration can be set for the datasource in system
+configuration (in `/etc/cloud/cloud.cfg` or `/etc/cloud/cloud.cfg.d/`).
+
+The settings that may be configured are:
+
+ * **retries**: The number of retries that should be done for an http request.
+ This value is used only after metadata_url is selected. (default: 5)
+ * **sec_between_retries**: The amount of wait time between the retries when
+ crawling the metadata service. (default: 1)
+
+
+An example configuration with the default values is provided below:
+
+.. sourcecode:: yaml
+
+ datasource:
+ GCE:
+ retries: 5
+ sec_between_retries: 1
+
.. _GCE metadata docs: https://cloud.google.com/compute/docs/storing-retrieving-metadata#querying
.. vi: textwidth=78
diff --git a/doc/rtd/topics/datasources/lxd.rst b/doc/rtd/topics/datasources/lxd.rst
new file mode 100644
index 00000000..3991a4dd
--- /dev/null
+++ b/doc/rtd/topics/datasources/lxd.rst
@@ -0,0 +1,65 @@
+.. _datasource_lxd:
+
+LXD
+===
+
+The data source ``LXD`` allows the user to provide custom user-data,
+vendor-data, meta-data and network-config to the instance without running
+a network service (or even without having a network at all). This datasource
+performs HTTP GETs against the `LXD socket device`_ which is provided to each
+running LXD container and VM as ``/dev/lxd/sock`` and represents all
+instance-metadata as versioned HTTP routes such as:
+
+ - 1.0/meta-data
+ - 1.0/config/user.meta-data
+ - 1.0/config/user.vendor-data
+ - 1.0/config/user.user-data
+ - 1.0/config/user.<any-custom-key>
+
+The LXD socket device ``/dev/lxd/sock`` is only present on containers and VMs
+when the instance configuration has ``security.devlxd=true`` (default).
+Disabling ``security.devlxd`` configuration setting at initial launch will
+ensure that cloud-init uses the :ref:`datasource_nocloud` datasource.
+Disabling ``security.devlxd`` ove the life of the container will result in
+warnings from cloud-init and cloud-init will keep the originally detected LXD
+datasource.
+
+The LXD datasource provides cloud-init the opportunity to react to meta-data,
+vendor-data, user-data and network-config changes and render the updated
+configuration across a system reboot.
+
+One can manipulate what meta-data, vendor-data or user-data is provided to
+the launched container using the LXD profiles or
+``lxc launch ... -c <key>="<value>"`` at initial container launch using one of
+the following keys:
+
+ - user.meta-data: YAML metadata which will be appended to base meta-data
+ - user.vendor-data: YAML which overrides any meta-data values
+ - user.network-config: YAML representing either :ref:`network_config_v1` or
+ :ref:`network_config_v2` format
+ - user.user-data: YAML which takes preference and overrides both meta-data
+ and vendor-data values
+ - user.any-key: Custom user configuration key and value pairs can be passed to
+ cloud-init. Those keys/values will be present in instance-data which can be
+ used by both `#template: jinja` #cloud-config templates and
+ the `cloud-init query` command.
+
+
+By default, network configuration from this datasource will be:
+
+.. code:: yaml
+
+ version: 1
+ config:
+ - type: physical
+ name: eth0
+ subnets:
+ - type: dhcp
+ control: auto
+
+This datasource is intended to replace :ref:`datasource_nocloud`
+datasource for LXD instances with a more direct support for LXD APIs instead
+of static NoCloud seed files.
+
+.. _LXD socket device: https://linuxcontainers.org/lxd/docs/master/dev-lxd
+.. vi: textwidth=78
diff --git a/doc/rtd/topics/datasources/opennebula.rst b/doc/rtd/topics/datasources/opennebula.rst
index 350a3e93..9daa0462 100644
--- a/doc/rtd/topics/datasources/opennebula.rst
+++ b/doc/rtd/topics/datasources/opennebula.rst
@@ -69,13 +69,21 @@ Datasource mode configuration override. Values: local, net, disabled.
ETH<x>_NETWORK
ETH<x>_MASK
ETH<x>_GATEWAY
+ ETH<x>_GATEWAY6
ETH<x>_DOMAIN
ETH<x>_DNS
+ ETH<x>_SEARCH_DOMAIN
+ ETH<x>_MTU
+ ETH<x>_IP6
+ ETH<x>_IP6_ULA
+ ETH<x>_IP6_PREFIX_LENGTH
+ ETH<x>_IP6_GATEWAY
Static `network configuration`_.
::
+ SET_HOSTNAME
HOSTNAME
Instance hostname.
diff --git a/doc/rtd/topics/datasources/vmware.rst b/doc/rtd/topics/datasources/vmware.rst
index 996eb61f..3ca9f10f 100644
--- a/doc/rtd/topics/datasources/vmware.rst
+++ b/doc/rtd/topics/datasources/vmware.rst
@@ -236,7 +236,6 @@ this datasource:
primary_group: akutz
sudo: ALL=(ALL) NOPASSWD:ALL
groups: sudo, wheel
- ssh_import_id: None
lock_passwd: true
ssh_authorized_keys:
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDE0c5FczvcGSh/tG4iw+Fhfi/O5/EvUM/96js65tly4++YTXK1d9jcznPS5ruDlbIZ30oveCBd3kT8LLVFwzh6hepYTf0YmCTpF4eDunyqmpCXDvVscQYRXyasEm5olGmVe05RrCJSeSShAeptv4ueIn40kZKOghinGWLDSZG4+FFfgrmcMCpx5YSCtX2gvnEYZJr0czt4rxOZuuP7PkJKgC/mt2PcPjooeX00vAj81jjU2f3XKrjjz2u2+KIt9eba+vOQ6HiC8c2IzRkUAJ5i1atLy8RIbejo23+0P4N2jjk17QySFOVHwPBDTYb0/0M/4ideeU74EN/CgVsvO6JrLsPBR4dojkV5qNbMNxIVv5cUwIy2ThlLgqpNCeFIDLCWNZEFKlEuNeSQ2mPtIO7ETxEL2Cz5y/7AIuildzYMc6wi2bofRC8HmQ7rMXRWdwLKWsR0L7SKjHblIwarxOGqLnUI+k2E71YoP7SZSlxaKi17pqkr0OMCF+kKqvcvHAQuwGqyumTEWOlH6TCx1dSPrW+pVCZSHSJtSTfDW2uzL6y8k10MT06+pVunSrWo5LHAXcS91htHV1M1UrH/tZKSpjYtjMb5+RonfhaFRNzvj7cCE1f3Kp8UVqAdcGBTtReoE8eRUT63qIxjw03a7VwAyB2w+9cu1R9/vAo8SBeRqw== sakutz@gmail.com
diff --git a/doc/rtd/topics/modules.rst b/doc/rtd/topics/modules.rst
index e30fe0fe..3ca6b9e3 100644
--- a/doc/rtd/topics/modules.rst
+++ b/doc/rtd/topics/modules.rst
@@ -22,6 +22,7 @@ Modules
.. automodule:: cloudinit.config.cc_foo
.. automodule:: cloudinit.config.cc_growpart
.. automodule:: cloudinit.config.cc_grub_dpkg
+.. automodule:: cloudinit.config.cc_install_hotplug
.. automodule:: cloudinit.config.cc_keys_to_console
.. automodule:: cloudinit.config.cc_landscape
.. automodule:: cloudinit.config.cc_locale
diff --git a/doc/rtd/topics/network-config.rst b/doc/rtd/topics/network-config.rst
index 8eb7a31b..494b687a 100644
--- a/doc/rtd/topics/network-config.rst
+++ b/doc/rtd/topics/network-config.rst
@@ -75,6 +75,17 @@ If `Cloud-init`_ 's networking config has not been disabled, and
no other network information is found, then it will proceed
to generate a fallback networking configuration.
+Disabling Network Activation
+----------------------------
+
+Some datasources may not be initialized until after network has been brought
+up. In this case, cloud-init will attempt to bring up the interfaces specified
+by the datasource metadata.
+
+This behavior can be disabled in the cloud-init configuration dictionary,
+merged from ``/etc/cloud/cloud.cfg`` and ``/etc/cloud/cloud.cfg.d/*``::
+
+ disable_network_activation: true
Fallback Network Configuration
==============================
diff --git a/doc/sources/ovf/example/ovf-env.xml b/doc/sources/ovf/example/ovf-env.xml
index 4ef4ee63..e5f4e262 100644
--- a/doc/sources/ovf/example/ovf-env.xml
+++ b/doc/sources/ovf/example/ovf-env.xml
@@ -42,7 +42,7 @@
<Property oe:key="user-data" oe:value="IyEvYmluL3NoCmVjaG8gImhpIHdvcmxkIgo="/>
<Property oe:key="password" oe:value="passw0rd"/>
<!--
- network-config is optional, it can only be read from VMware guestinfo.ovfEnv
+ network-config is optional.
The value for network-config is to be base64 encoded.
It will be decoded, and then processed normally as network-config.
Set ovf-env.xml to VMware guestinfo.ovfEnv by below command:
diff --git a/integration-requirements.txt b/integration-requirements.txt
index 20940328..53ea85f7 100644
--- a/integration-requirements.txt
+++ b/integration-requirements.txt
@@ -1,5 +1,5 @@
# PyPI requirements for cloud-init integration testing
# https://cloudinit.readthedocs.io/en/latest/topics/integration_tests.html
#
-pycloudlib @ git+https://github.com/canonical/pycloudlib.git@245ca0b97e71926fdb651147e42d6256b17f6778
+pycloudlib @ git+https://github.com/canonical/pycloudlib.git@4c4543b5b2c1b51b5875704d21c73eea1c9d3c50
pytest
diff --git a/packages/redhat/cloud-init.spec.in b/packages/redhat/cloud-init.spec.in
index b930709b..1491822b 100644
--- a/packages/redhat/cloud-init.spec.in
+++ b/packages/redhat/cloud-init.spec.in
@@ -119,12 +119,6 @@ version_pys=$(cd "$RPM_BUILD_ROOT" && find . -name version.py -type f)
( cd "$RPM_BUILD_ROOT" &&
sed -i "s,@@PACKAGED_VERSION@@,%{version}-%{release}," $version_pys )
-# patch hotplug /usr/libexec script path
-hotplug_file=$(cd "$RPM_BUILD_ROOT" && find . -name 10-cloud-init-hook-hotplug.rules -type f)
-
-( cd "$RPM_BUILD_ROOT" &&
- sed -i "s,/usr/lib,%{_libexecdir}," $hotplug_file )
-
%clean
rm -rf $RPM_BUILD_ROOT
@@ -178,10 +172,10 @@ fi
%files
/lib/udev/rules.d/66-azure-ephemeral.rules
-/lib/udev/rules.d/10-cloud-init-hook-hotplug.rules
%if "%{init_system}" == "systemd"
/usr/lib/systemd/system-generators/cloud-init-generator
+%{_sysconfdir}/systemd/system/sshd-keygen@.service.d/disable-sshd-keygen-if-cloud-init-active.conf
%{_unitdir}/cloud-*
%else
%attr(0755, root, root) %{_initddir}/cloud-config
diff --git a/packages/suse/cloud-init.spec.in b/packages/suse/cloud-init.spec.in
index 004b875f..da8107b4 100644
--- a/packages/suse/cloud-init.spec.in
+++ b/packages/suse/cloud-init.spec.in
@@ -126,6 +126,7 @@ version_pys=$(cd "%{buildroot}" && find . -name version.py -type f)
%{_sysconfdir}/dhcp/dhclient-exit-hooks.d/hook-dhclient
%{_sysconfdir}/NetworkManager/dispatcher.d/hook-network-manager
+%{_sysconfdir}/systemd/system/sshd-keygen@.service.d/disable-sshd-keygen-if-cloud-init-active.conf
# Python code is here...
%{python_sitelib}/*
diff --git a/setup.py b/setup.py
index 7fa03e63..58fddf0f 100755
--- a/setup.py
+++ b/setup.py
@@ -28,9 +28,11 @@ import subprocess
RENDERED_TMPD_PREFIX = "RENDERED_TEMPD"
VARIANT = None
+
def is_f(p):
return os.path.isfile(p)
+
def is_generator(p):
return '-generator' in p
@@ -38,6 +40,7 @@ def is_generator(p):
def pkg_config_read(library, var):
fallbacks = {
'systemd': {
+ 'systemdsystemconfdir': '/etc/systemd/system',
'systemdsystemunitdir': '/lib/systemd/system',
'systemdsystemgeneratordir': '/lib/systemd/system-generators',
}
@@ -111,6 +114,7 @@ def render_tmpl(template, mode=None):
# return path relative to setup.py
return os.path.join(os.path.basename(tmpd), bname)
+
# User can set the variant for template rendering
if '--distro' in sys.argv:
idx = sys.argv.index('--distro')
@@ -166,11 +170,11 @@ elif os.path.isfile('/etc/system-release-cpe'):
with open('/etc/system-release-cpe') as f:
cpe_data = f.read().rstrip().split(':')
- if cpe_data[1] == "\o":
- # URI formated CPE
+ if cpe_data[1] == "\o": # noqa: W605
+ # URI formatted CPE
inc = 0
else:
- # String formated CPE
+ # String formatted CPE
inc = 1
(cpe_vendor, cpe_product, cpe_version) = cpe_data[2+inc:5+inc]
if cpe_vendor == "amazon":
@@ -216,7 +220,8 @@ class InitsysInstallData(install):
if self.init_system and isinstance(self.init_system, str):
self.init_system = self.init_system.split(",")
- if len(self.init_system) == 0 and not platform.system().endswith('BSD'):
+ if (len(self.init_system) == 0 and
+ not platform.system().endswith('BSD')):
self.init_system = ['systemd']
bad = [f for f in self.init_system if f not in INITSYS_TYPES]
@@ -233,7 +238,7 @@ class InitsysInstallData(install):
continue
self.distribution.data_files.append(
(INITSYS_ROOTS[k], INITSYS_FILES[k]))
- # Force that command to reinitalize (with new file list)
+ # Force that command to reinitialize (with new file list)
self.distribution.reinitialize_command('install_data', True)
@@ -266,7 +271,9 @@ if not platform.system().endswith('BSD'):
(ETC + '/NetworkManager/dispatcher.d/',
['tools/hook-network-manager']),
(ETC + '/dhcp/dhclient-exit-hooks.d/', ['tools/hook-dhclient']),
- (LIB + '/udev/rules.d', [f for f in glob('udev/*.rules')])
+ (LIB + '/udev/rules.d', [f for f in glob('udev/*.rules')]),
+ (ETC + '/systemd/system/sshd-keygen@.service.d/',
+ ['systemd/disable-sshd-keygen-if-cloud-init-active.conf']),
])
# Use a subclass for install that handles
# adding on the right init system configuration files
diff --git a/systemd/cloud-init-generator.tmpl b/systemd/cloud-init-generator.tmpl
index 3dbe5947..7d1e7256 100644
--- a/systemd/cloud-init-generator.tmpl
+++ b/systemd/cloud-init-generator.tmpl
@@ -83,8 +83,8 @@ default() {
check_for_datasource() {
local ds_rc=""
-{% if variant in ["almalinux", "centos", "eurolinux", "fedora", "rhel",
- "rocky", "virtuozzo"] %}
+{% if variant in ["almalinux", "centos", "cloudlinux", "eurolinux", "fedora",
+ "openEuler", "rhel", "rocky", "virtuozzo"] %}
local dsidentify="/usr/libexec/cloud-init/ds-identify"
{% else %}
local dsidentify="/usr/lib/cloud-init/ds-identify"
diff --git a/systemd/cloud-init.service.tmpl b/systemd/cloud-init.service.tmpl
index 636f59be..de3f3d91 100644
--- a/systemd/cloud-init.service.tmpl
+++ b/systemd/cloud-init.service.tmpl
@@ -12,8 +12,8 @@ After=systemd-networkd-wait-online.service
{% if variant in ["ubuntu", "unknown", "debian"] %}
After=networking.service
{% endif %}
-{% if variant in ["almalinux", "centos", "eurolinux", "fedora", "rhel",
- "rocky", "virtuozzo"] %}
+{% if variant in ["almalinux", "centos", "cloudlinux", "eurolinux", "fedora",
+ "openEuler", "rhel", "rocky", "virtuozzo"] %}
After=network.service
After=NetworkManager.service
{% endif %}
diff --git a/systemd/disable-sshd-keygen-if-cloud-init-active.conf b/systemd/disable-sshd-keygen-if-cloud-init-active.conf
new file mode 100644
index 00000000..1a5d7a5a
--- /dev/null
+++ b/systemd/disable-sshd-keygen-if-cloud-init-active.conf
@@ -0,0 +1,7 @@
+# In some cloud-init enabled images the sshd-keygen template service may race
+# with cloud-init during boot causing issues with host key generation. This
+# drop-in config adds a condition to sshd-keygen@.service if it exists and
+# prevents the sshd-keygen units from running *if* cloud-init is going to run.
+#
+[Unit]
+ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target
diff --git a/templates/hosts.alpine.tmpl b/templates/hosts.alpine.tmpl
index 33c1a941..98ae55e6 100644
--- a/templates/hosts.alpine.tmpl
+++ b/templates/hosts.alpine.tmpl
@@ -13,16 +13,13 @@ you need to add the following to config:
# /etc/cloud/cloud.cfg or cloud-config from user-data
#
# The following lines are desirable for IPv4 capable hosts
-127.0.1.1 {{fqdn}} {{hostname}}
-127.0.0.1 localhost.localdomain localhost
-127.0.0.1 localhost4.localdomain4 localhost4
+127.0.1.1 {{hostname}} {{fqdn}}
+127.0.0.1 localhost localhost.localdomain
+127.0.0.1 localhost4 localhost4.localdomain4
# The following lines are desirable for IPv6 capable hosts
-::1 {{fqdn}} {{hostname}}
-::1 localhost6.localdomain6 localhost6
+::1 {{hostname}} {{fqdn}}
+::1 localhost6 localhost6.localdomain6
-fe00::0 ip6-localnet
-ff00::0 ip6-mcastprefix
ff02::1 ip6-allnodes
ff02::2 ip6-allrouters
-ff02::3 ip6-allhosts
diff --git a/templates/hosts.debian.tmpl b/templates/hosts.debian.tmpl
index 7e29907a..afeccf9a 100644
--- a/templates/hosts.debian.tmpl
+++ b/templates/hosts.debian.tmpl
@@ -17,10 +17,7 @@ you need to add the following to config:
127.0.0.1 localhost
# The following lines are desirable for IPv6 capable hosts
-::1 ip6-localhost ip6-loopback
-fe00::0 ip6-localnet
-ff00::0 ip6-mcastprefix
+::1 localhost ip6-localhost ip6-loopback
ff02::1 ip6-allnodes
ff02::2 ip6-allrouters
-ff02::3 ip6-allhosts
diff --git a/templates/sources.list.debian.tmpl b/templates/sources.list.debian.tmpl
index e7ef9ed1..e8a7c015 100644
--- a/templates/sources.list.debian.tmpl
+++ b/templates/sources.list.debian.tmpl
@@ -15,8 +15,8 @@ deb-src {{mirror}} {{codename}} main
## Major bug fix updates produced after the final release of the
## distribution.
-deb {{security}} {{codename}}/updates main
-deb-src {{security}} {{codename}}/updates main
+deb {{security}} {{codename}}{% if codename in ('buster', 'stretch') %}/updates{% else %}-security{% endif %} main
+deb-src {{security}} {{codename}}{% if codename in ('buster', 'stretch') %}/updates{% else %}-security{% endif %} main
deb {{mirror}} {{codename}}-updates main
deb-src {{mirror}} {{codename}}-updates main
diff --git a/test-requirements.txt b/test-requirements.txt
index 0a6a04d4..06dfbbec 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -5,3 +5,4 @@ pytest-cov
# Only really needed on older versions of python
setuptools
+jsonschema
diff --git a/tests/cloud_tests/__init__.py b/tests/cloud_tests/__init__.py
deleted file mode 100644
index 6c632f99..00000000
--- a/tests/cloud_tests/__init__.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Main init."""
-
-import logging
-import os
-
-BASE_DIR = os.path.dirname(os.path.abspath(__file__))
-TESTCASES_DIR = os.path.join(BASE_DIR, 'testcases')
-TEST_CONF_DIR = os.path.join(BASE_DIR, 'testcases')
-TREE_BASE = os.sep.join(BASE_DIR.split(os.sep)[:-2])
-
-# This domain contains reverse lookups for hostnames that are used.
-# The primary reason is so sudo will return quickly when it attempts
-# to look up the hostname. i9n is just short for 'integration'.
-# see also bug 1730744 for why we had to do this.
-CI_DOMAIN = "i9n.cloud-init.io"
-
-
-def _initialize_logging():
- """Configure logging for cloud_tests."""
- logger = logging.getLogger(__name__)
- logger.setLevel(logging.DEBUG)
- formatter = logging.Formatter(
- '%(asctime)s - %(pathname)s:%(funcName)s:%(lineno)s '
- '[%(levelname)s]: %(message)s')
-
- console = logging.StreamHandler()
- console.setLevel(logging.DEBUG)
- console.setFormatter(formatter)
-
- logger.addHandler(console)
-
- return logger
-
-
-LOG = _initialize_logging()
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/__main__.py b/tests/cloud_tests/__main__.py
deleted file mode 100644
index 7ee29cad..00000000
--- a/tests/cloud_tests/__main__.py
+++ /dev/null
@@ -1,71 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Main entry point."""
-
-import argparse
-import logging
-import os
-import sys
-
-from tests.cloud_tests import args, bddeb, collect, manage, run_funcs, verify
-from tests.cloud_tests import LOG
-
-
-def configure_log(args):
- """Configure logging."""
- level = logging.INFO
- if args.verbose:
- level = logging.DEBUG
- elif args.quiet:
- level = logging.WARN
- LOG.setLevel(level)
-
-
-def main():
- """Entry point for cloud test suite."""
- # configure parser
- parser = argparse.ArgumentParser(prog='cloud_tests')
- subparsers = parser.add_subparsers(dest="subcmd")
- subparsers.required = True
-
- def add_subparser(name, description, arg_sets):
- """Add arguments to subparser."""
- subparser = subparsers.add_parser(name, help=description)
- for (_args, _kwargs) in (a for arg_set in arg_sets for a in arg_set):
- subparser.add_argument(*_args, **_kwargs)
-
- # configure subparsers
- for (name, (description, arg_sets)) in args.SUBCMDS.items():
- add_subparser(name, description,
- [args.ARG_SETS[arg_set] for arg_set in arg_sets])
-
- # parse arguments
- parsed = parser.parse_args()
-
- # process arguments
- configure_log(parsed)
- (_, arg_sets) = args.SUBCMDS[parsed.subcmd]
- for normalizer in [args.NORMALIZERS[arg_set] for arg_set in arg_sets]:
- parsed = normalizer(parsed)
- if not parsed:
- return -1
-
- # run handler
- LOG.debug('running with args: %s', parsed)
- return {
- 'bddeb': bddeb.bddeb,
- 'collect': collect.collect,
- 'create': manage.create,
- 'run': run_funcs.run,
- 'tree_collect': run_funcs.tree_collect,
- 'tree_run': run_funcs.tree_run,
- 'verify': verify.verify,
- }[parsed.subcmd](parsed)
-
-
-if __name__ == "__main__":
- if os.geteuid() == 0:
- sys.exit('Do not run as root')
- sys.exit(main())
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/args.py b/tests/cloud_tests/args.py
deleted file mode 100644
index ab345491..00000000
--- a/tests/cloud_tests/args.py
+++ /dev/null
@@ -1,304 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Argparse argument setup and sanitization."""
-
-import os
-
-from tests.cloud_tests import config, util
-from tests.cloud_tests import LOG, TREE_BASE
-
-ARG_SETS = {
- 'BDDEB': (
- (('--bddeb-args',),
- {'help': 'args to pass through to bddeb',
- 'action': 'store', 'default': None, 'required': False}),
- (('--build-os',),
- {'help': 'OS to use as build system (default is xenial)',
- 'action': 'store', 'choices': config.ENABLED_DISTROS,
- 'default': 'xenial', 'required': False}),
- (('--build-platform',),
- {'help': 'platform to use for build system (default is lxd)',
- 'action': 'store', 'choices': config.ENABLED_PLATFORMS,
- 'default': 'lxd', 'required': False}),
- (('--cloud-init',),
- {'help': 'path to base of cloud-init tree', 'metavar': 'DIR',
- 'action': 'store', 'required': False, 'default': TREE_BASE}),),
- 'COLLECT': (
- (('-p', '--platform'),
- {'help': 'platform(s) to run tests on', 'metavar': 'PLATFORM',
- 'action': 'append', 'choices': config.ENABLED_PLATFORMS,
- 'default': []}),
- (('-n', '--os-name'),
- {'help': 'the name(s) of the OS(s) to test', 'metavar': 'NAME',
- 'action': 'append', 'choices': config.ENABLED_DISTROS,
- 'default': []}),
- (('-t', '--test-config'),
- {'help': 'test config file(s) to use', 'metavar': 'FILE',
- 'action': 'append', 'default': []}),
- (('--feature-override',),
- {'help': 'feature flags override(s), <flagname>=<true/false>',
- 'action': 'append', 'default': [], 'required': False}),),
- 'CREATE': (
- (('-c', '--config'),
- {'help': 'cloud-config yaml for testcase', 'metavar': 'DATA',
- 'action': 'store', 'required': False, 'default': None}),
- (('-e', '--enable'),
- {'help': 'enable testcase', 'required': False, 'default': False,
- 'action': 'store_true'}),
- (('name',),
- {'help': 'testcase name, in format "<category>/<test>"',
- 'action': 'store'}),
- (('-d', '--description'),
- {'help': 'description of testcase', 'required': False}),
- (('-f', '--force'),
- {'help': 'overwrite already existing test', 'required': False,
- 'action': 'store_true', 'default': False}),),
- 'INTERFACE': (
- (('-v', '--verbose'),
- {'help': 'verbose output', 'action': 'store_true', 'default': False}),
- (('-q', '--quiet'),
- {'help': 'quiet output', 'action': 'store_true', 'default': False}),),
- 'OUTPUT': (
- (('-d', '--data-dir'),
- {'help': 'directory to store test data in',
- 'action': 'store', 'metavar': 'DIR', 'required': False}),
- (('--preserve-instance',),
- {'help': 'do not destroy the instance under test',
- 'action': 'store_true', 'default': False, 'required': False}),
- (('--preserve-data',),
- {'help': 'do not remove collected data after successful run',
- 'action': 'store_true', 'default': False, 'required': False}),),
- 'OUTPUT_DEB': (
- (('--deb',),
- {'help': 'path to write output deb to', 'metavar': 'FILE',
- 'action': 'store', 'required': False,
- 'default': 'cloud-init_all.deb'}),),
- 'RESULT': (
- (('-r', '--result'),
- {'help': 'file to write results to',
- 'action': 'store', 'metavar': 'FILE'}),),
- 'SETUP': (
- (('--deb',),
- {'help': 'install deb', 'metavar': 'FILE', 'action': 'store'}),
- (('--rpm',),
- {'help': 'install rpm', 'metavar': 'FILE', 'action': 'store'}),
- (('--script',),
- {'help': 'script to set up image', 'metavar': 'DATA',
- 'action': 'store'}),
- (('--repo',),
- {'help': 'repo to enable (implies -u)', 'metavar': 'NAME',
- 'action': 'store'}),
- (('--ppa',),
- {'help': 'ppa to enable (implies -u)', 'metavar': 'NAME',
- 'action': 'store'}),
- (('-u', '--upgrade'),
- {'help': 'upgrade or install cloud-init from repo',
- 'action': 'store_true', 'default': False}),
- (('--upgrade-full',),
- {'help': 'do full system upgrade from repo (implies -u)',
- 'action': 'store_true', 'default': False}),),
-
-}
-
-SUBCMDS = {
- 'bddeb': ('build cloud-init deb from tree',
- ('BDDEB', 'OUTPUT_DEB', 'INTERFACE')),
- 'collect': ('collect test data',
- ('COLLECT', 'INTERFACE', 'OUTPUT', 'RESULT', 'SETUP')),
- 'create': ('create new test case', ('CREATE', 'INTERFACE')),
- 'run': ('run test suite',
- ('COLLECT', 'INTERFACE', 'RESULT', 'OUTPUT', 'SETUP')),
- 'tree_collect': ('collect using current working tree',
- ('BDDEB', 'COLLECT', 'INTERFACE', 'OUTPUT', 'RESULT')),
- 'tree_run': ('run using current working tree',
- ('BDDEB', 'COLLECT', 'INTERFACE', 'OUTPUT', 'RESULT')),
- 'verify': ('verify test data', ('INTERFACE', 'OUTPUT', 'RESULT')),
-}
-
-
-def _empty_normalizer(args):
- """Do not normalize arguments."""
- return args
-
-
-def normalize_bddeb_args(args):
- """Normalize BDDEB arguments.
-
- @param args: parsed args
- @return_value: updated args, or None if errors encountered
- """
- # make sure cloud-init dir is accessible
- if not (args.cloud_init and os.path.isdir(args.cloud_init)):
- LOG.error('invalid cloud-init tree path')
- return None
-
- return args
-
-
-def normalize_create_args(args):
- """Normalize CREATE arguments.
-
- @param args: parsed args
- @return_value: updated args, or None if errors occurred
- """
- # ensure valid name for new test
- if len(args.name.split('/')) != 2:
- LOG.error('invalid test name: %s', args.name)
- return None
- if os.path.exists(config.name_to_path(args.name)):
- msg = 'test: {} already exists'.format(args.name)
- if args.force:
- LOG.warning('%s but ignoring due to --force', msg)
- else:
- LOG.error(msg)
- return None
-
- # ensure test config valid if specified
- if isinstance(args.config, str) and len(args.config) == 0:
- LOG.error('test config cannot be empty if specified')
- return None
-
- # ensure description valid if specified
- if (isinstance(args.description, str) and
- (len(args.description) > 70 or len(args.description) == 0)):
- LOG.error('test description must be between 1 and 70 characters')
- return None
-
- return args
-
-
-def normalize_collect_args(args):
- """Normalize COLLECT arguments.
-
- @param args: parsed args
- @return_value: updated args, or None if errors occurred
- """
- # platform should default to lxd
- if len(args.platform) == 0:
- args.platform = ['lxd']
- args.platform = util.sorted_unique(args.platform)
-
- # os name should default to all enabled
- # if os name is provided ensure that all provided are supported
- if len(args.os_name) == 0:
- args.os_name = config.ENABLED_DISTROS
- else:
- supported = config.ENABLED_DISTROS
- invalid = [os_name for os_name in args.os_name
- if os_name not in supported]
- if len(invalid) != 0:
- LOG.error('invalid os name(s): %s', invalid)
- return None
- args.os_name = util.sorted_unique(args.os_name)
-
- # test configs should default to all enabled
- # if test configs are provided, ensure that all provided are valid
- if len(args.test_config) == 0:
- args.test_config = config.list_test_configs()
- else:
- valid = []
- invalid = []
- for name in args.test_config:
- if os.path.exists(name):
- valid.append(name)
- elif os.path.exists(config.name_to_path(name)):
- valid.append(config.name_to_path(name))
- else:
- invalid.append(name)
- if len(invalid) != 0:
- LOG.error('invalid test config(s): %s', invalid)
- return None
- else:
- args.test_config = valid
- args.test_config = util.sorted_unique(args.test_config)
-
- # parse feature flag overrides and ensure all are valid
- if args.feature_override:
- overrides = args.feature_override
- args.feature_override = util.parse_conf_list(
- overrides, boolean=True, valid=config.list_feature_flags())
- if not args.feature_override:
- LOG.error('invalid feature flag override(s): %s', overrides)
- return None
- else:
- args.feature_override = {}
-
- return args
-
-
-def normalize_output_args(args):
- """Normalize OUTPUT arguments.
-
- @param args: parsed args
- @return_value: updated args, or None if errors occurred
- """
- if args.data_dir:
- args.data_dir = os.path.abspath(args.data_dir)
- if not os.path.exists(args.data_dir):
- os.mkdir(args.data_dir)
-
- if not args.data_dir:
- args.data_dir = None
-
- # ensure clean output dir if collect
- # ensure data exists if verify
- if args.subcmd == 'collect':
- if not util.is_clean_writable_dir(args.data_dir):
- LOG.error('data_dir must be empty/new and must be writable')
- return None
-
- return args
-
-
-def normalize_output_deb_args(args):
- """Normalize OUTPUT_DEB arguments.
-
- @param args: parsed args
- @return_value: updated args, or None if erros occurred
- """
- # make sure to use abspath for deb
- args.deb = os.path.abspath(args.deb)
-
- if not args.deb.endswith('.deb'):
- LOG.error('output filename does not end in ".deb"')
- return None
-
- return args
-
-
-def normalize_setup_args(args):
- """Normalize SETUP arguments.
-
- @param args: parsed args
- @return_value: updated_args, or None if errors occurred
- """
- # ensure deb or rpm valid if specified
- for pkg in (args.deb, args.rpm):
- if pkg is not None and not os.path.exists(pkg):
- LOG.error('cannot find package: %s', pkg)
- return None
-
- # if repo or ppa to be enabled run upgrade
- if args.repo or args.ppa:
- args.upgrade = True
-
- # if ppa is specified, remove leading 'ppa:' if any
- _ppa_header = 'ppa:'
- if args.ppa and args.ppa.startswith(_ppa_header):
- args.ppa = args.ppa[len(_ppa_header):]
-
- return args
-
-
-NORMALIZERS = {
- 'BDDEB': normalize_bddeb_args,
- 'COLLECT': normalize_collect_args,
- 'CREATE': normalize_create_args,
- 'INTERFACE': _empty_normalizer,
- 'OUTPUT': normalize_output_args,
- 'OUTPUT_DEB': normalize_output_deb_args,
- 'RESULT': _empty_normalizer,
- 'SETUP': normalize_setup_args,
-}
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/bddeb.py b/tests/cloud_tests/bddeb.py
deleted file mode 100644
index e45ad947..00000000
--- a/tests/cloud_tests/bddeb.py
+++ /dev/null
@@ -1,119 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Used to build a deb."""
-
-from functools import partial
-import os
-import tempfile
-
-from cloudinit import subp
-from tests.cloud_tests import (config, LOG)
-from tests.cloud_tests import platforms
-from tests.cloud_tests.stage import (PlatformComponent, run_stage, run_single)
-
-pre_reqs = ['devscripts', 'equivs', 'git', 'tar']
-
-
-def _out(cmd_res):
- """Get clean output from cmd result."""
- return cmd_res[0].decode("utf-8").strip()
-
-
-def build_deb(args, instance):
- """Build deb on system and copy out to location at args.deb.
-
- @param args: cmdline arguments
- @return_value: tuple of results and fail count
- """
- # update remote system package list and install build deps
- LOG.debug('installing pre-reqs')
- pkgs = ' '.join(pre_reqs)
- instance.execute('apt-get update && apt-get install --yes {}'.format(pkgs))
-
- # local tmpfile that must be deleted
- local_tarball = tempfile.NamedTemporaryFile().name
-
- # paths to use in remote system
- output_link = '/root/cloud-init_all.deb'
- remote_tarball = _out(instance.execute(['mktemp']))
- extract_dir = '/root'
- bddeb_path = os.path.join(extract_dir, 'packages', 'bddeb')
- git_env = {'GIT_DIR': os.path.join(extract_dir, '.git'),
- 'GIT_WORK_TREE': extract_dir}
-
- LOG.debug('creating tarball of cloud-init at: %s', local_tarball)
- subp.subp(['tar', 'cf', local_tarball, '--owner', 'root',
- '--group', 'root', '-C', args.cloud_init, '.'])
- LOG.debug('copying to remote system at: %s', remote_tarball)
- instance.push_file(local_tarball, remote_tarball)
-
- LOG.debug('extracting tarball in remote system at: %s', extract_dir)
- instance.execute(['tar', 'xf', remote_tarball, '-C', extract_dir])
- instance.execute(['git', 'commit', '-a', '-m', 'tmp', '--allow-empty'],
- env=git_env)
-
- LOG.debug('installing deps')
- deps_path = os.path.join(extract_dir, 'tools', 'read-dependencies')
- instance.execute([deps_path, '--install', '--test-distro',
- '--distro', 'ubuntu'])
-
- LOG.debug('building deb in remote system at: %s', output_link)
- bddeb_args = args.bddeb_args.split() if args.bddeb_args else []
- instance.execute([bddeb_path, '-d'] + bddeb_args, env=git_env)
-
- # copy the deb back to the host system
- LOG.debug('copying built deb to host at: %s', args.deb)
- instance.pull_file(output_link, args.deb)
-
-
-def setup_build(args):
- """Set build system up then run build.
-
- @param args: cmdline arguments
- @return_value: tuple of results and fail count
- """
- res = ({}, 1)
-
- # set up platform
- LOG.info('setting up platform: %s', args.build_platform)
- platform_config = config.load_platform_config(args.build_platform)
- platform_call = partial(platforms.get_platform, args.build_platform,
- platform_config)
- with PlatformComponent(platform_call) as platform:
-
- # set up image
- LOG.info('acquiring image for os: %s', args.build_os)
- img_conf = config.load_os_config(platform.platform_name, args.build_os)
- image_call = partial(platforms.get_image, platform, img_conf)
- with PlatformComponent(image_call) as image:
-
- # set up snapshot
- snapshot_call = partial(platforms.get_snapshot, image)
- with PlatformComponent(snapshot_call) as snapshot:
-
- # create instance with cloud-config to set it up
- LOG.info('creating instance to build deb in')
- empty_cloud_config = "#cloud-config\n{}"
- instance_call = partial(
- platforms.get_instance, snapshot, empty_cloud_config,
- use_desc='build cloud-init deb')
- with PlatformComponent(instance_call) as instance:
-
- # build the deb
- res = run_single('build deb on system',
- partial(build_deb, args, instance))
-
- return res
-
-
-def bddeb(args):
- """Entry point for build deb.
-
- @param args: cmdline arguments
- @return_value: fail count
- """
- LOG.info('preparing to build cloud-init deb')
- _res, failed = run_stage('build deb', [partial(setup_build, args)])
- return failed
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/collect.py b/tests/cloud_tests/collect.py
deleted file mode 100644
index 642745d8..00000000
--- a/tests/cloud_tests/collect.py
+++ /dev/null
@@ -1,219 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Used to collect data from platforms during tests."""
-
-from functools import partial
-import os
-
-from cloudinit import util as c_util
-from tests.cloud_tests import (config, LOG, setup_image, util)
-from tests.cloud_tests.stage import (PlatformComponent, run_stage, run_single)
-from tests.cloud_tests import platforms
-from tests.cloud_tests.testcases import base, get_test_class
-
-
-def collect_script(instance, base_dir, script, script_name):
- """Collect script data.
-
- @param instance: instance to run script on
- @param base_dir: base directory for output data
- @param script: script contents
- @param script_name: name of script to run
- @return_value: None, may raise errors
- """
- LOG.debug('running collect script: %s', script_name)
- (out, err, exit) = instance.run_script(
- script.encode(), rcs=False,
- description='collect: {}'.format(script_name))
- if err:
- LOG.debug("collect script %s exited '%s' and had stderr: %s",
- script_name, err, exit)
- if not isinstance(out, bytes):
- raise util.PlatformError(
- "Collection of '%s' returned type %s, expected bytes: %s" %
- (script_name, type(out), out))
-
- c_util.write_file(os.path.join(base_dir, script_name), out)
-
-
-def collect_console(instance, base_dir):
- """Collect instance console log.
-
- @param instance: instance to get console log for
- @param base_dir: directory to write console log to
- """
- logfile = os.path.join(base_dir, 'console.log')
- LOG.debug('getting console log for %s to %s', instance.name, logfile)
- try:
- data = instance.console_log()
- except NotImplementedError as e:
- # args[0] is hacky, but thats all I see to get at the message.
- data = b'NotImplementedError:' + e.args[0].encode()
- with open(logfile, "wb") as fp:
- fp.write(data)
-
-
-def collect_test_data(args, snapshot, os_name, test_name):
- """Collect data for test case.
-
- @param args: cmdline arguments
- @param snapshot: instantiated snapshot
- @param test_name: name or path of test to run
- @return_value: tuple of results and fail count
- """
- res = ({}, 1)
-
- # load test config
- test_name_in = test_name
- test_name = config.path_to_name(test_name)
- test_config = config.load_test_config(test_name)
- user_data = test_config['cloud_config']
- test_scripts = test_config['collect_scripts']
- test_output_dir = os.sep.join(
- (args.data_dir, snapshot.platform_name, os_name, test_name))
-
- # if test is not enabled, skip and return 0 failures
- if not test_config.get('enabled', False):
- LOG.warning('test config %s is not enabled, skipping', test_name)
- return ({}, 0)
-
- test_class = get_test_class(
- config.name_to_module(test_name_in),
- test_data={'platform': snapshot.platform_name, 'os_name': os_name},
- test_conf=test_config['cloud_config'])
- try:
- test_class.maybeSkipTest()
- except base.SkipTest as s:
- LOG.warning('skipping test config %s: %s', test_name, s)
- return ({}, 0)
-
- # if testcase requires a feature flag that the image does not support,
- # skip the testcase with a warning
- req_features = test_config.get('required_features', [])
- if any(feature not in snapshot.features for feature in req_features):
- LOG.warning('test config %s requires features not supported by image, '
- 'skipping.\nrequired features: %s\nsupported features: %s',
- test_name, req_features, snapshot.features)
- return ({}, 0)
-
- # if there are user data overrides required for this test case, apply them
- overrides = snapshot.config.get('user_data_overrides', {})
- if overrides:
- LOG.debug('updating user data for collect with: %s', overrides)
- user_data = util.update_user_data(user_data, overrides)
-
- # create test instance
- component = PlatformComponent(
- partial(platforms.get_instance, snapshot, user_data,
- block=True, start=False, use_desc=test_name),
- preserve_instance=args.preserve_instance)
-
- LOG.info('collecting test data for test: %s', test_name)
- with component as instance:
- start_call = partial(run_single, 'boot instance', partial(
- instance.start, wait=True, wait_for_cloud_init=True))
- collect_calls = [partial(run_single, 'script {}'.format(script_name),
- partial(collect_script, instance,
- test_output_dir, script, script_name))
- for script_name, script in test_scripts.items()]
-
- res = run_stage('collect for test: {}'.format(test_name),
- [start_call] + collect_calls)
-
- instance.shutdown()
- collect_console(instance, test_output_dir)
-
- return res
-
-
-def collect_snapshot(args, image, os_name):
- """Collect data for snapshot of image.
-
- @param args: cmdline arguments
- @param image: instantiated image with set up complete
- @return_value tuple of results and fail count
- """
- res = ({}, 1)
-
- component = PlatformComponent(partial(platforms.get_snapshot, image))
-
- LOG.debug('creating snapshot for %s', os_name)
- with component as snapshot:
- LOG.info('collecting test data for os: %s', os_name)
- res = run_stage(
- 'collect test data for {}'.format(os_name),
- [partial(collect_test_data, args, snapshot, os_name, test_name)
- for test_name in args.test_config])
-
- return res
-
-
-def collect_image(args, platform, os_name):
- """Collect data for image.
-
- @param args: cmdline arguments
- @param platform: instantiated platform
- @param os_name: name of distro to collect for
- @return_value: tuple of results and fail count
- """
- res = ({}, 1)
-
- os_config = config.load_os_config(
- platform.platform_name, os_name, require_enabled=True,
- feature_overrides=args.feature_override)
- LOG.debug('os config: %s', os_config)
- component = PlatformComponent(
- partial(platforms.get_image, platform, os_config))
-
- LOG.info('acquiring image for os: %s', os_name)
- with component as image:
- res = run_stage('set up and collect data for os: {}'.format(os_name),
- [partial(setup_image.setup_image, args, image)] +
- [partial(collect_snapshot, args, image, os_name)],
- continue_after_error=False)
-
- return res
-
-
-def collect_platform(args, platform_name):
- """Collect data for platform.
-
- @param args: cmdline arguments
- @param platform_name: platform to collect for
- @return_value: tuple of results and fail count
- """
- res = ({}, 1)
-
- platform_config = config.load_platform_config(
- platform_name, require_enabled=True)
- platform_config['data_dir'] = args.data_dir
- LOG.debug('platform config: %s', platform_config)
- component = PlatformComponent(
- partial(platforms.get_platform, platform_name, platform_config))
-
- LOG.info('setting up platform: %s', platform_name)
- with component as platform:
- res = run_stage('collect for platform: {}'.format(platform_name),
- [partial(collect_image, args, platform, os_name)
- for os_name in args.os_name])
-
- return res
-
-
-def collect(args):
- """Entry point for collection.
-
- @param args: cmdline arguments
- @return_value: fail count
- """
- (res, failed) = run_stage(
- 'collect data', [partial(collect_platform, args, platform_name)
- for platform_name in args.platform])
-
- LOG.debug('collect stages: %s', res)
- if args.result:
- util.merge_results({'collect_stages': res}, args.result)
-
- return failed
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/config.py b/tests/cloud_tests/config.py
deleted file mode 100644
index 06536edc..00000000
--- a/tests/cloud_tests/config.py
+++ /dev/null
@@ -1,165 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Used to setup test configuration."""
-
-import glob
-import os
-
-from cloudinit import util as c_util
-from tests.cloud_tests import (BASE_DIR, TEST_CONF_DIR)
-
-# conf files
-CONF_EXT = '.yaml'
-VERIFY_EXT = '.py'
-PLATFORM_CONF = os.path.join(BASE_DIR, 'platforms.yaml')
-RELEASES_CONF = os.path.join(BASE_DIR, 'releases.yaml')
-TESTCASE_CONF = os.path.join(BASE_DIR, 'testcases.yaml')
-
-
-def get(base, key):
- """Get config entry 'key' from base, ensuring is dictionary."""
- return base[key] if key in base and base[key] is not None else {}
-
-
-def enabled(config):
- """Test if config item is enabled."""
- return isinstance(config, dict) and config.get('enabled', False)
-
-
-def path_to_name(path):
- """Convert abs or rel path to test config to path under 'sconfigs/'."""
- dir_path, file_name = os.path.split(os.path.normpath(path))
- name = os.path.splitext(file_name)[0]
- return os.sep.join((os.path.basename(dir_path), name))
-
-
-def name_to_path(name):
- """Convert test config path under configs/ to full config path."""
- name = os.path.normpath(name)
- if not name.endswith(CONF_EXT):
- name = name + CONF_EXT
- return name if os.path.isabs(name) else os.path.join(TEST_CONF_DIR, name)
-
-
-def name_sanitize(name):
- """Sanitize test name to be used as a module name."""
- return name.replace('-', '_')
-
-
-def name_to_module(name):
- """Convert test name to a loadable module name under 'testcases/'."""
- name = name_sanitize(path_to_name(name))
- return name.replace(os.path.sep, '.')
-
-
-def merge_config(base, override):
- """Merge config and base."""
- res = base.copy()
- res.update(override)
- res.update({k: merge_config(base.get(k, {}), v)
- for k, v in override.items() if isinstance(v, dict)})
- return res
-
-
-def merge_feature_groups(feature_conf, feature_groups, overrides):
- """Combine feature groups and overrides to construct a supported list.
-
- @param feature_conf: feature config from releases.yaml
- @param feature_groups: feature groups the release is a member of
- @param overrides: overrides specified by the release's config
- @return_value: dict of {feature: true/false} settings
- """
- res = dict().fromkeys(feature_conf['all'])
- for group in feature_groups:
- res.update(feature_conf['groups'][group])
- res.update(overrides)
- return res
-
-
-def load_platform_config(platform_name, require_enabled=False):
- """Load configuration for platform.
-
- @param platform_name: name of platform to retrieve config for
- @param require_enabled: if true, raise error if 'enabled' not True
- @return_value: config dict
- """
- main_conf = c_util.read_conf(PLATFORM_CONF)
- conf = merge_config(main_conf['default_platform_config'],
- main_conf['platforms'][platform_name])
- if require_enabled and not enabled(conf):
- raise ValueError('Platform is not enabled')
- return conf
-
-
-def load_os_config(platform_name, os_name, require_enabled=False,
- feature_overrides=None):
- """Load configuration for os.
-
- @param platform_name: platform name to load os config for
- @param os_name: name of os to retrieve config for
- @param require_enabled: if true, raise error if 'enabled' not True
- @param feature_overrides: feature flag overrides to merge with features
- @return_value: config dict
- """
- if feature_overrides is None:
- feature_overrides = {}
- main_conf = c_util.read_conf(RELEASES_CONF)
- default = main_conf['default_release_config']
- image = main_conf['releases'][os_name]
- conf = merge_config(merge_config(get(default, 'default'),
- get(default, platform_name)),
- merge_config(get(image, 'default'),
- get(image, platform_name)))
-
- feature_conf = main_conf['features']
- feature_groups = conf.get('feature_groups', [])
- overrides = merge_config(get(conf, 'features'), feature_overrides)
- conf['arch'] = c_util.get_dpkg_architecture()
- conf['features'] = merge_feature_groups(
- feature_conf, feature_groups, overrides)
-
- if require_enabled and not enabled(conf):
- raise ValueError('OS is not enabled')
- return conf
-
-
-def load_test_config(path):
- """Load a test config file by either abs path or rel path."""
- return merge_config(c_util.read_conf(TESTCASE_CONF)['base_test_data'],
- c_util.read_conf(name_to_path(path)))
-
-
-def list_feature_flags():
- """List all supported feature flags."""
- feature_conf = get(c_util.read_conf(RELEASES_CONF), 'features')
- return feature_conf.get('all', [])
-
-
-def list_enabled_platforms():
- """List all platforms enabled for testing."""
- platforms = get(c_util.read_conf(PLATFORM_CONF), 'platforms')
- return [k for k, v in platforms.items() if enabled(v)]
-
-
-def list_enabled_distros(platforms):
- """List all distros enabled for testing on specified platforms."""
- def platform_has_enabled(config):
- """List if platform is enabled."""
- return any(enabled(merge_config(get(config, 'default'),
- get(config, platform)))
- for platform in platforms)
-
- releases = get(c_util.read_conf(RELEASES_CONF), 'releases')
- return [k for k, v in releases.items() if platform_has_enabled(v)]
-
-
-def list_test_configs():
- """List all available test config files by abspath."""
- return [os.path.abspath(f) for f in
- glob.glob(os.sep.join((TEST_CONF_DIR, '*', '*.yaml')))]
-
-
-ENABLED_PLATFORMS = sorted(list_enabled_platforms())
-ENABLED_DISTROS = sorted(list_enabled_distros(ENABLED_PLATFORMS))
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/manage.py b/tests/cloud_tests/manage.py
deleted file mode 100644
index 5f0cfd23..00000000
--- a/tests/cloud_tests/manage.py
+++ /dev/null
@@ -1,74 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Create test cases automatically given a user_data script."""
-
-import os
-import textwrap
-
-from cloudinit import util as c_util
-from tests.cloud_tests.config import VERIFY_EXT
-from tests.cloud_tests import (config, util)
-from tests.cloud_tests import TESTCASES_DIR
-
-
-_verifier_fmt = textwrap.dedent(
- """
- \"\"\"cloud-init Integration Test Verify Script\"\"\"
- from tests.cloud_tests.testcases import base
-
-
- class {test_class}(base.CloudTestCase):
- \"\"\"
- Name: {test_name}
- Category: {test_category}
- Description: {test_description}
- \"\"\"
- pass
- """
-).lstrip()
-_config_fmt = textwrap.dedent(
- """
- #
- # Name: {test_name}
- # Category: {test_category}
- # Description: {test_description}
- #
- {config}
- """
-).strip()
-
-
-def write_testcase_config(args, fmt_args, testcase_file):
- """Write the testcase config file."""
- testcase_config = {'enabled': args.enable, 'collect_scripts': {}}
- if args.config:
- testcase_config['cloud_config'] = args.config
- fmt_args['config'] = util.yaml_format(testcase_config)
- c_util.write_file(testcase_file, _config_fmt.format(**fmt_args), omode='w')
-
-
-def write_verifier(args, fmt_args, verifier_file):
- """Write the verifier script."""
- fmt_args['test_class'] = 'Test{}'.format(
- config.name_sanitize(fmt_args['test_name']).title())
- c_util.write_file(verifier_file,
- _verifier_fmt.format(**fmt_args), omode='w')
-
-
-def create(args):
- """Create a new testcase."""
- (test_category, test_name) = args.name.split('/')
- fmt_args = {'test_name': test_name, 'test_category': test_category,
- 'test_description': str(args.description)}
-
- testcase_file = config.name_to_path(args.name)
- verifier_file = os.path.join(
- TESTCASES_DIR, test_category,
- config.name_sanitize(test_name) + VERIFY_EXT)
-
- write_testcase_config(args, fmt_args, testcase_file)
- write_verifier(args, fmt_args, verifier_file)
-
- return 0
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/platforms.yaml b/tests/cloud_tests/platforms.yaml
deleted file mode 100644
index eaaa0a71..00000000
--- a/tests/cloud_tests/platforms.yaml
+++ /dev/null
@@ -1,77 +0,0 @@
-# ============================= Platform Config ===============================
-default_platform_config:
- # all disabled by default
- enabled: false
- # maximum time to retrieve image
- get_image_timeout: 300
- # maximum time to create instance (before waiting for cloud-init)
- create_instance_timeout: 60
- private_key: cloud_init_rsa
- public_key: cloud_init_rsa.pub
-platforms:
- ec2:
- enabled: true
- instance-type: t2.micro
- tag: cii
- lxd:
- enabled: true
- # overrides for image templates
- template_overrides:
- /var/lib/cloud/seed/nocloud-net/meta-data:
- when:
- - create
- - copy
- template: cloud-init-meta.tpl
- /var/lib/cloud/seed/nocloud-net/network-config:
- when:
- - create
- - copy
- template: cloud-init-network.tpl
- /var/lib/cloud/seed/nocloud-net/user-data:
- when:
- - create
- - copy
- template: cloud-init-user.tpl
- properties:
- default: |
- #cloud-config
- {}
- /var/lib/cloud/seed/nocloud-net/vendor-data:
- when:
- - create
- - copy
- template: cloud-init-vendor.tpl
- properties:
- default: |
- #cloud-config
- {}
- # overrides image template files
- template_files:
- cloud-init-meta.tpl: |
- #cloud-config
- instance-id: {{ container.name }}
- local-hostname: {{ container.name }}
- {{ config_get("user.meta-data", "") }}
- cloud-init-network.tpl: |
- {% if config_get("user.network-config", "") == "" %}version: 1
- config:
- - type: physical
- name: eth0
- subnets:
- - type: {% if config_get("user.network_mode", "") == "link-local" %}manual{% else %}dhcp{% endif %}
- control: auto{% else %}{{ config_get("user.network-config", "") }}{% endif %}
- cloud-init-user.tpl: |
- {{ config_get("user.user-data", properties.default) }}
- cloud-init-vendor.tpl: |
- {{ config_get("user.vendor-data", properties.default) }}
- nocloud-kvm:
- enabled: true
- cache_mode: cache=none,aio=native
- azurecloud:
- enabled: true
- region: West US 2
- vm_size: Standard_DS1_v2
- storage_sku: standard_lrs
- tag: ci
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/platforms/__init__.py b/tests/cloud_tests/platforms/__init__.py
deleted file mode 100644
index e506baa0..00000000
--- a/tests/cloud_tests/platforms/__init__.py
+++ /dev/null
@@ -1,43 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Main init."""
-
-from .ec2 import platform as ec2
-from .lxd import platform as lxd
-from .nocloudkvm import platform as nocloudkvm
-from .azurecloud import platform as azurecloud
-from ..util import emit_dots_on_travis
-
-PLATFORMS = {
- 'ec2': ec2.EC2Platform,
- 'nocloud-kvm': nocloudkvm.NoCloudKVMPlatform,
- 'lxd': lxd.LXDPlatform,
- 'azurecloud': azurecloud.AzureCloudPlatform,
-}
-
-
-def get_image(platform, config):
- """Get image from platform object using os_name."""
- with emit_dots_on_travis():
- return platform.get_image(config)
-
-
-def get_instance(snapshot, *args, **kwargs):
- """Get instance from snapshot."""
- return snapshot.launch(*args, **kwargs)
-
-
-def get_platform(platform_name, config):
- """Get the platform object for 'platform_name' and init."""
- platform_cls = PLATFORMS.get(platform_name)
- if not platform_cls:
- raise ValueError('invalid platform name: {}'.format(platform_name))
- return platform_cls(config)
-
-
-def get_snapshot(image):
- """Get snapshot from image."""
- return image.snapshot()
-
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/platforms/azurecloud/__init__.py b/tests/cloud_tests/platforms/azurecloud/__init__.py
deleted file mode 100644
index e69de29b..00000000
--- a/tests/cloud_tests/platforms/azurecloud/__init__.py
+++ /dev/null
diff --git a/tests/cloud_tests/platforms/azurecloud/image.py b/tests/cloud_tests/platforms/azurecloud/image.py
deleted file mode 100644
index aad2bca1..00000000
--- a/tests/cloud_tests/platforms/azurecloud/image.py
+++ /dev/null
@@ -1,116 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Azure Cloud image Base class."""
-
-from tests.cloud_tests import LOG
-
-from ..images import Image
-from .snapshot import AzureCloudSnapshot
-
-
-class AzureCloudImage(Image):
- """Azure Cloud backed image."""
-
- platform_name = 'azurecloud'
-
- def __init__(self, platform, config, image_id):
- """Set up image.
-
- @param platform: platform object
- @param config: image configuration
- @param image_id: image id used to boot instance
- """
- super(AzureCloudImage, self).__init__(platform, config)
- self._img_instance = None
- self.image_id = image_id
-
- @property
- def _instance(self):
- """Internal use only, returns a running instance"""
- if not self._img_instance:
- self._img_instance = self.platform.create_instance(
- self.properties, self.config, self.features,
- self.image_id, user_data=None)
- self._img_instance.start(wait=True, wait_for_cloud_init=True)
- return self._img_instance
-
- def destroy(self):
- """Delete the instance used to create a custom image."""
- if self._img_instance:
- LOG.debug('Deleting backing instance %s',
- self._img_instance.vm_name)
- delete_vm = self.platform.compute_client.virtual_machines.delete(
- self.platform.resource_group.name, self._img_instance.vm_name)
- delete_vm.wait()
-
- super(AzureCloudImage, self).destroy()
-
- def _execute(self, *args, **kwargs):
- """Execute command in image, modifying image."""
- LOG.debug('executing commands on image')
- self._instance.start(wait=True)
- return self._instance._execute(*args, **kwargs)
-
- def push_file(self, local_path, remote_path):
- """Copy file at 'local_path' to instance at 'remote_path'."""
- LOG.debug('pushing file to image')
- return self._instance.push_file(local_path, remote_path)
-
- def run_script(self, *args, **kwargs):
- """Run script in image, modifying image.
-
- @return_value: script output
- """
- LOG.debug('running script on image')
- self._instance.start()
- return self._instance.run_script(*args, **kwargs)
-
- def snapshot(self):
- """ Create snapshot (image) of instance, wait until done.
-
- If no instance has been booted, base image is returned.
- Otherwise runs the clean script, deallocates, generalizes
- and creates custom image from instance.
- """
- LOG.debug('creating snapshot of image')
- if not self._img_instance:
- LOG.debug('No existing image, snapshotting base image')
- return AzureCloudSnapshot(self.platform, self.properties,
- self.config, self.features,
- self._instance.vm_name,
- delete_on_destroy=False)
-
- LOG.debug('creating snapshot from instance: %s', self._img_instance)
- if self.config.get('boot_clean_script'):
- self._img_instance.run_script(self.config.get('boot_clean_script'))
-
- LOG.debug('deallocating instance %s', self._instance.vm_name)
- deallocate = self.platform.compute_client.virtual_machines.deallocate(
- self.platform.resource_group.name, self._instance.vm_name)
- deallocate.wait()
-
- LOG.debug('generalizing instance %s', self._instance.vm_name)
- self.platform.compute_client.virtual_machines.generalize(
- self.platform.resource_group.name, self._instance.vm_name)
-
- image_params = {
- "location": self.platform.location,
- "properties": {
- "sourceVirtualMachine": {
- "id": self._img_instance.instance.id
- }
- }
- }
- LOG.debug('updating resource group image %s', self._instance.vm_name)
- self.platform.compute_client.images.create_or_update(
- self.platform.resource_group.name, self._instance.vm_name,
- image_params)
-
- LOG.debug('destroying self')
- self.destroy()
-
- LOG.debug('snapshot complete')
- return AzureCloudSnapshot(self.platform, self.properties, self.config,
- self.features, self._instance.vm_name)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/platforms/azurecloud/instance.py b/tests/cloud_tests/platforms/azurecloud/instance.py
deleted file mode 100644
index eedbaae8..00000000
--- a/tests/cloud_tests/platforms/azurecloud/instance.py
+++ /dev/null
@@ -1,247 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Base Azure Cloud instance."""
-
-from datetime import datetime, timedelta
-from urllib.parse import urlparse
-from time import sleep
-import traceback
-import os
-
-
-# pylint: disable=no-name-in-module
-from azure.storage.blob import BlockBlobService, BlobPermissions
-from msrestazure.azure_exceptions import CloudError
-
-from tests.cloud_tests import LOG
-
-from ..instances import Instance
-
-
-class AzureCloudInstance(Instance):
- """Azure Cloud backed instance."""
-
- platform_name = 'azurecloud'
-
- def __init__(self, platform, properties, config,
- features, image_id, user_data=None):
- """Set up instance.
-
- @param platform: platform object
- @param properties: dictionary of properties
- @param config: dictionary of configuration values
- @param features: dictionary of supported feature flags
- @param image_id: image to find and/or use
- @param user_data: test user-data to pass to instance
- """
- super(AzureCloudInstance, self).__init__(
- platform, image_id, properties, config, features)
-
- self.ssh_port = 22
- self.ssh_ip = None
- self.instance = None
- self.image_id = image_id
- self.vm_name = 'ci-azure-i-%s' % self.platform.tag
- self.user_data = user_data
- self.ssh_key_file = os.path.join(
- platform.config['data_dir'], platform.config['private_key'])
- self.ssh_pubkey_file = os.path.join(
- platform.config['data_dir'], platform.config['public_key'])
- self.blob_client, self.container, self.blob = None, None, None
-
- def start(self, wait=True, wait_for_cloud_init=False):
- """Start instance with the platforms NIC."""
- if self.instance:
- return
- data = self.image_id.split('-')
- release, support = data[2].replace('_', '.'), data[3]
- sku = '%s-%s' % (release, support) if support == 'LTS' else release
- image_resource_id = '/subscriptions/%s' \
- '/resourceGroups/%s' \
- '/providers/Microsoft.Compute/images/%s' % (
- self.platform.subscription_id,
- self.platform.resource_group.name,
- self.image_id)
- storage_uri = "http://%s.blob.core.windows.net" \
- % self.platform.storage.name
- with open(self.ssh_pubkey_file, 'r') as key:
- ssh_pub_keydata = key.read()
-
- image_exists = False
- try:
- LOG.debug('finding image in resource group using image_id')
- self.platform.compute_client.images.get(
- self.platform.resource_group.name,
- self.image_id
- )
- image_exists = True
- LOG.debug('image found, launching instance, image_id=%s',
- self.image_id)
- except CloudError:
- LOG.debug(('image not found, launching instance with base image, '
- 'image_id=%s'), self.image_id)
-
- vm_params = {
- 'name': self.vm_name,
- 'location': self.platform.location,
- 'os_profile': {
- 'computer_name': 'CI-%s' % self.platform.tag,
- 'admin_username': self.ssh_username,
- "customData": self.user_data,
- "linuxConfiguration": {
- "disable_password_authentication": True,
- "ssh": {
- "public_keys": [{
- "path": "/home/%s/.ssh/authorized_keys" %
- self.ssh_username,
- "keyData": ssh_pub_keydata
- }]
- }
- }
- },
- "diagnosticsProfile": {
- "bootDiagnostics": {
- "storageUri": storage_uri,
- "enabled": True
- }
- },
- 'hardware_profile': {
- 'vm_size': self.platform.vm_size
- },
- 'storage_profile': {
- 'image_reference': {
- 'id': image_resource_id
- } if image_exists else {
- 'publisher': 'Canonical',
- 'offer': 'UbuntuServer',
- 'sku': sku,
- 'version': 'latest'
- }
- },
- 'network_profile': {
- 'network_interfaces': [{
- 'id': self.platform.nic.id
- }]
- },
- 'tags': {
- 'Name': self.platform.tag,
- }
- }
-
- try:
- self.instance = self.platform.compute_client.virtual_machines.\
- create_or_update(self.platform.resource_group.name,
- self.vm_name, vm_params)
- LOG.debug('creating instance %s from image_id=%s', self.vm_name,
- self.image_id)
- except CloudError as e:
- raise RuntimeError(
- 'failed creating instance:\n{}'.format(traceback.format_exc())
- ) from e
-
- if wait:
- self.instance.wait()
- self.ssh_ip = self.platform.network_client.\
- public_ip_addresses.get(
- self.platform.resource_group.name,
- self.platform.public_ip.name
- ).ip_address
- self._wait_for_system(wait_for_cloud_init)
-
- self.instance = self.instance.result()
- self.blob_client, self.container, self.blob =\
- self._get_blob_client()
-
- def shutdown(self, wait=True):
- """Finds console log then stopping/deallocates VM"""
- LOG.debug('waiting on console log before stopping')
- attempts, exists = 5, False
- while not exists and attempts:
- try:
- attempts -= 1
- exists = self.blob_client.get_blob_to_bytes(
- self.container, self.blob)
- LOG.debug('found console log')
- except Exception as e:
- if attempts:
- LOG.debug('Unable to find console log, '
- '%s attempts remaining', attempts)
- sleep(15)
- else:
- LOG.warning('Could not find console log: %s', e)
-
- LOG.debug('stopping instance %s', self.image_id)
- vm_deallocate = \
- self.platform.compute_client.virtual_machines.deallocate(
- self.platform.resource_group.name, self.image_id)
- if wait:
- vm_deallocate.wait()
-
- def destroy(self):
- """Delete VM and close all connections"""
- if self.instance:
- LOG.debug('destroying instance: %s', self.image_id)
- vm_delete = self.platform.compute_client.virtual_machines.delete(
- self.platform.resource_group.name, self.image_id)
- vm_delete.wait()
-
- self._ssh_close()
-
- super(AzureCloudInstance, self).destroy()
-
- def _execute(self, command, stdin=None, env=None):
- """Execute command on instance."""
- env_args = []
- if env:
- env_args = ['env'] + ["%s=%s" for k, v in env.items()]
-
- return self._ssh(['sudo'] + env_args + list(command), stdin=stdin)
-
- def _get_blob_client(self):
- """
- Use VM details to retrieve container and blob name.
- Then Create blob service client for sas token to
- retrieve console log.
-
- :return: blob service, container name, blob name
- """
- LOG.debug('creating blob service for console log')
- storage = self.platform.storage_client.storage_accounts.get_properties(
- self.platform.resource_group.name, self.platform.storage.name)
-
- keys = self.platform.storage_client.storage_accounts.list_keys(
- self.platform.resource_group.name, self.platform.storage.name
- ).keys[0].value
-
- virtual_machine = self.platform.compute_client.virtual_machines.get(
- self.platform.resource_group.name, self.instance.name,
- expand='instanceView')
-
- blob_uri = virtual_machine.instance_view.boot_diagnostics.\
- serial_console_log_blob_uri
-
- container, blob = urlparse(blob_uri).path.split('/')[-2:]
-
- blob_client = BlockBlobService(
- account_name=storage.name,
- account_key=keys)
-
- sas = blob_client.generate_blob_shared_access_signature(
- container_name=container, blob_name=blob, protocol='https',
- expiry=datetime.utcnow() + timedelta(hours=1),
- permission=BlobPermissions.READ)
-
- blob_client = BlockBlobService(
- account_name=storage.name,
- sas_token=sas)
-
- return blob_client, container, blob
-
- def console_log(self):
- """Instance console.
-
- @return_value: bytes of this instance’s console
- """
- boot_diagnostics = self.blob_client.get_blob_to_bytes(
- self.container, self.blob)
- return boot_diagnostics.content
diff --git a/tests/cloud_tests/platforms/azurecloud/platform.py b/tests/cloud_tests/platforms/azurecloud/platform.py
deleted file mode 100644
index a664f612..00000000
--- a/tests/cloud_tests/platforms/azurecloud/platform.py
+++ /dev/null
@@ -1,240 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Base Azure Cloud class."""
-
-import os
-import base64
-import traceback
-from datetime import datetime
-from tests.cloud_tests import LOG
-
-# pylint: disable=no-name-in-module
-from azure.common.credentials import ServicePrincipalCredentials
-# pylint: disable=no-name-in-module
-from azure.mgmt.resource import ResourceManagementClient
-# pylint: disable=no-name-in-module
-from azure.mgmt.network import NetworkManagementClient
-# pylint: disable=no-name-in-module
-from azure.mgmt.compute import ComputeManagementClient
-# pylint: disable=no-name-in-module
-from azure.mgmt.storage import StorageManagementClient
-from msrestazure.azure_exceptions import CloudError
-
-from .image import AzureCloudImage
-from .instance import AzureCloudInstance
-from ..platforms import Platform
-
-from cloudinit import util as c_util
-
-
-class AzureCloudPlatform(Platform):
- """Azure Cloud test platforms."""
-
- platform_name = 'azurecloud'
-
- def __init__(self, config):
- """Set up platform."""
- super(AzureCloudPlatform, self).__init__(config)
- self.tag = '%s-%s' % (
- config['tag'], datetime.now().strftime('%Y%m%d%H%M%S'))
- self.storage_sku = config['storage_sku']
- self.vm_size = config['vm_size']
- self.location = config['region']
-
- try:
- self.credentials, self.subscription_id = self._get_credentials()
-
- self.resource_client = ResourceManagementClient(
- self.credentials, self.subscription_id)
- self.compute_client = ComputeManagementClient(
- self.credentials, self.subscription_id)
- self.network_client = NetworkManagementClient(
- self.credentials, self.subscription_id)
- self.storage_client = StorageManagementClient(
- self.credentials, self.subscription_id)
-
- self.resource_group = self._create_resource_group()
- self.public_ip = self._create_public_ip_address()
- self.storage = self._create_storage_account(config)
- self.vnet = self._create_vnet()
- self.subnet = self._create_subnet()
- self.nic = self._create_nic()
- except CloudError as e:
- raise RuntimeError(
- 'failed creating a resource:\n{}'.format(
- traceback.format_exc()
- )
- ) from e
-
- def create_instance(self, properties, config, features,
- image_id, user_data=None):
- """Create an instance
-
- @param properties: image properties
- @param config: image configuration
- @param features: image features
- @param image_id: string of image id
- @param user_data: test user-data to pass to instance
- @return_value: cloud_tests.instances instance
- """
- if user_data is not None:
- user_data = str(base64.b64encode(
- user_data.encode('utf-8')), 'utf-8')
-
- return AzureCloudInstance(self, properties, config, features,
- image_id, user_data)
-
- def get_image(self, img_conf):
- """Get image using specified image configuration.
-
- @param img_conf: configuration for image
- @return_value: cloud_tests.images instance
- """
- ss_region = self.azure_location_to_simplestreams_region()
-
- filters = [
- 'arch=%s' % 'amd64',
- 'endpoint=https://management.core.windows.net/',
- 'region=%s' % ss_region,
- 'release=%s' % img_conf['release']
- ]
-
- LOG.debug('finding image using streams')
- image = self._query_streams(img_conf, filters)
-
- try:
- image_id = image['id']
- LOG.debug('found image: %s', image_id)
- if image_id.find('__') > 0:
- image_id = image_id.split('__')[1]
- LOG.debug('image_id shortened to %s', image_id)
- except KeyError as e:
- raise RuntimeError(
- 'no images found for %s' % img_conf['release']
- ) from e
-
- return AzureCloudImage(self, img_conf, image_id)
-
- def destroy(self):
- """Delete all resources in resource group."""
- LOG.debug("Deleting resource group: %s", self.resource_group.name)
- delete = self.resource_client.resource_groups.delete(
- self.resource_group.name)
- delete.wait()
-
- def azure_location_to_simplestreams_region(self):
- """Convert location to simplestreams region"""
- location = self.location.lower().replace(' ', '')
- LOG.debug('finding location %s using simple streams', location)
- regions_file = os.path.join(
- os.path.dirname(os.path.abspath(__file__)), 'regions.json')
- region_simplestreams_map = c_util.load_json(
- c_util.load_file(regions_file))
- return region_simplestreams_map.get(location, location)
-
- def _get_credentials(self):
- """Get credentials from environment"""
- LOG.debug('getting credentials from environment')
- cred_file = os.path.expanduser('~/.azure/credentials.json')
- try:
- azure_creds = c_util.load_json(
- c_util.load_file(cred_file))
- subscription_id = azure_creds['subscriptionId']
- credentials = ServicePrincipalCredentials(
- client_id=azure_creds['clientId'],
- secret=azure_creds['clientSecret'],
- tenant=azure_creds['tenantId'])
- return credentials, subscription_id
- except KeyError as e:
- raise RuntimeError(
- 'Please configure Azure service principal'
- ' credentials in %s' % cred_file
- ) from e
-
- def _create_resource_group(self):
- """Create resource group"""
- LOG.debug('creating resource group')
- resource_group_name = self.tag
- resource_group_params = {
- 'location': self.location
- }
- resource_group = self.resource_client.resource_groups.create_or_update(
- resource_group_name, resource_group_params)
- return resource_group
-
- def _create_storage_account(self, config):
- LOG.debug('creating storage account')
- storage_account_name = 'storage%s' % datetime.now().\
- strftime('%Y%m%d%H%M%S')
- storage_params = {
- 'sku': {
- 'name': config['storage_sku']
- },
- 'kind': "Storage",
- 'location': self.location
- }
- storage_account = self.storage_client.storage_accounts.create(
- self.resource_group.name, storage_account_name, storage_params)
- return storage_account.result()
-
- def _create_public_ip_address(self):
- """Create public ip address"""
- LOG.debug('creating public ip address')
- public_ip_name = '%s-ip' % self.resource_group.name
- public_ip_params = {
- 'location': self.location,
- 'public_ip_allocation_method': 'Dynamic'
- }
- ip = self.network_client.public_ip_addresses.create_or_update(
- self.resource_group.name, public_ip_name, public_ip_params)
- return ip.result()
-
- def _create_vnet(self):
- """create virtual network"""
- LOG.debug('creating vnet')
- vnet_name = '%s-vnet' % self.resource_group.name
- vnet_params = {
- 'location': self.location,
- 'address_space': {
- 'address_prefixes': ['10.0.0.0/16']
- }
- }
- vnet = self.network_client.virtual_networks.create_or_update(
- self.resource_group.name, vnet_name, vnet_params)
- return vnet.result()
-
- def _create_subnet(self):
- """create sub-network"""
- LOG.debug('creating subnet')
- subnet_name = '%s-subnet' % self.resource_group.name
- subnet_params = {
- 'address_prefix': '10.0.0.0/24'
- }
- subnet = self.network_client.subnets.create_or_update(
- self.resource_group.name, self.vnet.name,
- subnet_name, subnet_params)
- return subnet.result()
-
- def _create_nic(self):
- """Create network interface controller"""
- LOG.debug('creating nic')
- nic_name = '%s-nic' % self.resource_group.name
- nic_params = {
- 'location': self.location,
- 'ip_configurations': [{
- 'name': 'ipconfig',
- 'subnet': {
- 'id': self.subnet.id
- },
- 'publicIpAddress': {
- 'id': "/subscriptions/%s"
- "/resourceGroups/%s/providers/Microsoft.Network"
- "/publicIPAddresses/%s" % (
- self.subscription_id, self.resource_group.name,
- self.public_ip.name),
- }
- }]
- }
- nic = self.network_client.network_interfaces.create_or_update(
- self.resource_group.name, nic_name, nic_params)
- return nic.result()
diff --git a/tests/cloud_tests/platforms/azurecloud/regions.json b/tests/cloud_tests/platforms/azurecloud/regions.json
deleted file mode 100644
index c1b4da20..00000000
--- a/tests/cloud_tests/platforms/azurecloud/regions.json
+++ /dev/null
@@ -1,42 +0,0 @@
-{
- "eastasia": "East Asia",
- "southeastasia": "Southeast Asia",
- "centralus": "Central US",
- "eastus": "East US",
- "eastus2": "East US 2",
- "westus": "West US",
- "northcentralus": "North Central US",
- "southcentralus": "South Central US",
- "northeurope": "North Europe",
- "westeurope": "West Europe",
- "japanwest": "Japan West",
- "japaneast": "Japan East",
- "brazilsouth": "Brazil South",
- "australiaeast": "Australia East",
- "australiasoutheast": "Australia Southeast",
- "southindia": "South India",
- "centralindia": "Central India",
- "westindia": "West India",
- "canadacentral": "Canada Central",
- "canadaeast": "Canada East",
- "uksouth": "UK South",
- "ukwest": "UK West",
- "westcentralus": "West Central US",
- "westus2": "West US 2",
- "koreacentral": "Korea Central",
- "koreasouth": "Korea South",
- "francecentral": "France Central",
- "francesouth": "France South",
- "australiacentral": "Australia Central",
- "australiacentral2": "Australia Central 2",
- "uaecentral": "UAE Central",
- "uaenorth": "UAE North",
- "southafricanorth": "South Africa North",
- "southafricawest": "South Africa West",
- "switzerlandnorth": "Switzerland North",
- "switzerlandwest": "Switzerland West",
- "germanynorth": "Germany North",
- "germanywestcentral": "Germany West Central",
- "norwaywest": "Norway West",
- "norwayeast": "Norway East"
-}
diff --git a/tests/cloud_tests/platforms/azurecloud/snapshot.py b/tests/cloud_tests/platforms/azurecloud/snapshot.py
deleted file mode 100644
index 580cc596..00000000
--- a/tests/cloud_tests/platforms/azurecloud/snapshot.py
+++ /dev/null
@@ -1,58 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Base Azure Cloud snapshot."""
-
-from ..snapshots import Snapshot
-
-from tests.cloud_tests import LOG
-
-
-class AzureCloudSnapshot(Snapshot):
- """Azure Cloud image copy backed snapshot."""
-
- platform_name = 'azurecloud'
-
- def __init__(self, platform, properties, config, features, image_id,
- delete_on_destroy=True):
- """Set up snapshot.
-
- @param platform: platform object
- @param properties: image properties
- @param config: image config
- @param features: supported feature flags
- """
- super(AzureCloudSnapshot, self).__init__(
- platform, properties, config, features)
-
- self.image_id = image_id
- self.delete_on_destroy = delete_on_destroy
-
- def launch(self, user_data, meta_data=None, block=True, start=True,
- use_desc=None):
- """Launch instance.
-
- @param user_data: user-data for the instance
- @param meta_data: meta_data for the instance
- @param block: wait until instance is created
- @param start: start instance and wait until fully started
- @param use_desc: description of snapshot instance use
- @return_value: an Instance
- """
- if meta_data is not None:
- raise ValueError("metadata not supported on Azure Cloud tests")
-
- instance = self.platform.create_instance(
- self.properties, self.config, self.features,
- self.image_id, user_data)
-
- return instance
-
- def destroy(self):
- """Clean up snapshot data."""
- LOG.debug('destroying image %s', self.image_id)
- if self.delete_on_destroy:
- self.platform.compute_client.images.delete(
- self.platform.resource_group.name,
- self.image_id)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/platforms/ec2/__init__.py b/tests/cloud_tests/platforms/ec2/__init__.py
deleted file mode 100644
index e69de29b..00000000
--- a/tests/cloud_tests/platforms/ec2/__init__.py
+++ /dev/null
diff --git a/tests/cloud_tests/platforms/ec2/image.py b/tests/cloud_tests/platforms/ec2/image.py
deleted file mode 100644
index d7b2c908..00000000
--- a/tests/cloud_tests/platforms/ec2/image.py
+++ /dev/null
@@ -1,100 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""EC2 Image Base Class."""
-
-from ..images import Image
-from .snapshot import EC2Snapshot
-
-from tests.cloud_tests import LOG
-
-
-class EC2Image(Image):
- """EC2 backed image."""
-
- platform_name = 'ec2'
-
- def __init__(self, platform, config, image_ami):
- """Set up image.
-
- @param platform: platform object
- @param config: image configuration
- @param image_ami: string of image ami ID
- """
- super(EC2Image, self).__init__(platform, config)
- self._img_instance = None
- self.image_ami = image_ami
-
- @property
- def _instance(self):
- """Internal use only, returns a running instance"""
- if not self._img_instance:
- self._img_instance = self.platform.create_instance(
- self.properties, self.config, self.features,
- self.image_ami, user_data=None)
- self._img_instance.start(wait=True, wait_for_cloud_init=True)
- return self._img_instance
-
- def destroy(self):
- """Delete the instance used to create a custom image."""
- if self._img_instance:
- LOG.debug('terminating backing instance %s',
- self._img_instance.instance.instance_id)
- self._img_instance.instance.terminate()
- self._img_instance.instance.wait_until_terminated()
-
- super(EC2Image, self).destroy()
-
- def _execute(self, *args, **kwargs):
- """Execute command in image, modifying image."""
- self._instance.start(wait=True)
- return self._instance._execute(*args, **kwargs)
-
- def push_file(self, local_path, remote_path):
- """Copy file at 'local_path' to instance at 'remote_path'."""
- self._instance.start(wait=True)
- return self._instance.push_file(local_path, remote_path)
-
- def run_script(self, *args, **kwargs):
- """Run script in image, modifying image.
-
- @return_value: script output
- """
- self._instance.start(wait=True)
- return self._instance.run_script(*args, **kwargs)
-
- def snapshot(self):
- """Create snapshot of image, block until done.
-
- Will return base image_ami if no instance has been booted, otherwise
- will run the clean script, shutdown the instance, create a custom
- AMI, and use that AMI once available.
- """
- if not self._img_instance:
- return EC2Snapshot(self.platform, self.properties, self.config,
- self.features, self.image_ami,
- delete_on_destroy=False)
-
- if self.config.get('boot_clean_script'):
- self._img_instance.run_script(self.config.get('boot_clean_script'))
-
- self._img_instance.shutdown(wait=True)
-
- LOG.debug('creating custom ami from instance %s',
- self._img_instance.instance.instance_id)
- response = self.platform.ec2_client.create_image(
- Name='%s-%s' % (self.platform.tag, self.image_ami),
- InstanceId=self._img_instance.instance.instance_id
- )
- image_ami_edited = response['ImageId']
-
- # Create image and wait until it is in the 'available' state
- image = self.platform.ec2_resource.Image(image_ami_edited)
- image.wait_until_exists()
- waiter = self.platform.ec2_client.get_waiter('image_available')
- waiter.wait(ImageIds=[image.id])
- image.reload()
-
- return EC2Snapshot(self.platform, self.properties, self.config,
- self.features, image_ami_edited)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/platforms/ec2/instance.py b/tests/cloud_tests/platforms/ec2/instance.py
deleted file mode 100644
index d2e84047..00000000
--- a/tests/cloud_tests/platforms/ec2/instance.py
+++ /dev/null
@@ -1,132 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Base EC2 instance."""
-import os
-
-import botocore
-
-from ..instances import Instance
-from tests.cloud_tests import LOG, util
-
-
-class EC2Instance(Instance):
- """EC2 backed instance."""
-
- platform_name = "ec2"
- _ssh_client = None
-
- def __init__(self, platform, properties, config, features,
- image_ami, user_data=None):
- """Set up instance.
-
- @param platform: platform object
- @param properties: dictionary of properties
- @param config: dictionary of configuration values
- @param features: dictionary of supported feature flags
- @param image_ami: AWS AMI ID for image to use
- @param user_data: test user-data to pass to instance
- """
- super(EC2Instance, self).__init__(
- platform, image_ami, properties, config, features)
-
- self.image_ami = image_ami
- self.instance = None
- self.user_data = user_data
- self.ssh_ip = None
- self.ssh_port = 22
- self.ssh_key_file = os.path.join(
- platform.config['data_dir'], platform.config['private_key'])
- self.ssh_pubkey_file = os.path.join(
- platform.config['data_dir'], platform.config['public_key'])
-
- def console_log(self):
- """Collect console log from instance.
-
- The console log is buffered and not always present, therefore
- may return empty string.
- """
- try:
- # OutputBytes comes from platform._decode_console_output_as_bytes
- response = self.instance.console_output()
- return response['OutputBytes']
- except KeyError as e:
- if 'Output' in response:
- msg = ("'OutputBytes' did not exist in console_output() but "
- "'Output' did: %s..." % response['Output'][0:128])
- raise util.PlatformError('console_log', msg) from e
- return ('No Console Output [%s]' % self.instance).encode()
-
- def destroy(self):
- """Clean up instance."""
- if self.instance:
- LOG.debug('destroying instance %s', self.instance.id)
- self.instance.terminate()
- self.instance.wait_until_terminated()
-
- self._ssh_close()
-
- super(EC2Instance, self).destroy()
-
- def _execute(self, command, stdin=None, env=None):
- """Execute command on instance."""
- env_args = []
- if env:
- env_args = ['env'] + ["%s=%s" for k, v in env.items()]
-
- return self._ssh(['sudo'] + env_args + list(command), stdin=stdin)
-
- def start(self, wait=True, wait_for_cloud_init=False):
- """Start instance on EC2 with the platfrom's VPC."""
- if self.instance:
- if self.instance.state['Name'] == 'running':
- return
-
- LOG.debug('starting instance %s', self.instance.id)
- self.instance.start()
- else:
- LOG.debug('launching instance')
-
- args = {
- 'ImageId': self.image_ami,
- 'InstanceType': self.platform.instance_type,
- 'KeyName': self.platform.key_name,
- 'MaxCount': 1,
- 'MinCount': 1,
- 'SecurityGroupIds': [self.platform.security_group.id],
- 'SubnetId': self.platform.subnet.id,
- 'TagSpecifications': [{
- 'ResourceType': 'instance',
- 'Tags': [{
- 'Key': 'Name', 'Value': self.platform.tag
- }]
- }],
- }
-
- if self.user_data:
- args['UserData'] = self.user_data
-
- try:
- instances = self.platform.ec2_resource.create_instances(**args)
- except botocore.exceptions.ClientError as error:
- error_msg = error.response['Error']['Message']
- raise util.PlatformError('start', error_msg)
-
- self.instance = instances[0]
-
- LOG.debug('instance id: %s', self.instance.id)
- if wait:
- self.instance.wait_until_running()
- self.instance.reload()
- self.ssh_ip = self.instance.public_ip_address
- self._wait_for_system(wait_for_cloud_init)
-
- def shutdown(self, wait=True):
- """Shutdown instance."""
- LOG.debug('stopping instance %s', self.instance.id)
- self.instance.stop()
-
- if wait:
- self.instance.wait_until_stopped()
- self.instance.reload()
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/platforms/ec2/platform.py b/tests/cloud_tests/platforms/ec2/platform.py
deleted file mode 100644
index b61a2ffb..00000000
--- a/tests/cloud_tests/platforms/ec2/platform.py
+++ /dev/null
@@ -1,263 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Base EC2 platform."""
-from datetime import datetime
-import os
-
-import boto3
-import botocore
-from botocore import session, handlers
-import base64
-
-from ..platforms import Platform
-from .image import EC2Image
-from .instance import EC2Instance
-from tests.cloud_tests import LOG
-
-
-class EC2Platform(Platform):
- """EC2 test platform."""
-
- platform_name = 'ec2'
- ipv4_cidr = '192.168.1.0/20'
-
- def __init__(self, config):
- """Set up platform."""
- super(EC2Platform, self).__init__(config)
- # Used for unique VPC, SSH key, and custom AMI generation naming
- self.tag = '%s-%s' % (
- config['tag'], datetime.now().strftime('%Y%m%d%H%M%S'))
- self.instance_type = config['instance-type']
-
- try:
- b3session = get_session()
- self.ec2_client = b3session.client('ec2')
- self.ec2_resource = b3session.resource('ec2')
- self.ec2_region = b3session.region_name
- self.key_name = self._upload_public_key(config)
- except botocore.exceptions.NoRegionError as e:
- raise RuntimeError(
- 'Please configure default region in $HOME/.aws/config'
- ) from e
- except botocore.exceptions.NoCredentialsError as e:
- raise RuntimeError(
- 'Please configure ec2 credentials in $HOME/.aws/credentials'
- ) from e
-
- self.vpc = self._create_vpc()
- self.internet_gateway = self._create_internet_gateway()
- self.subnet = self._create_subnet()
- self.routing_table = self._create_routing_table()
- self.security_group = self._create_security_group()
-
- def create_instance(self, properties, config, features,
- image_ami, user_data=None):
- """Create an instance
-
- @param src_img_path: image path to launch from
- @param properties: image properties
- @param config: image configuration
- @param features: image features
- @param image_ami: string of image ami ID
- @param user_data: test user-data to pass to instance
- @return_value: cloud_tests.instances instance
- """
- return EC2Instance(self, properties, config, features,
- image_ami, user_data)
-
- def destroy(self):
- """Delete SSH keys, terminate all instances, and delete VPC."""
- for instance in self.vpc.instances.all():
- LOG.debug('waiting for instance %s termination', instance.id)
- instance.terminate()
- instance.wait_until_terminated()
-
- if self.key_name:
- LOG.debug('deleting SSH key %s', self.key_name)
- self.ec2_client.delete_key_pair(KeyName=self.key_name)
-
- if self.security_group:
- LOG.debug('deleting security group %s', self.security_group.id)
- self.security_group.delete()
-
- if self.subnet:
- LOG.debug('deleting subnet %s', self.subnet.id)
- self.subnet.delete()
-
- if self.routing_table:
- LOG.debug('deleting routing table %s', self.routing_table.id)
- self.routing_table.delete()
-
- if self.internet_gateway:
- LOG.debug('deleting internet gateway %s', self.internet_gateway.id)
- self.internet_gateway.detach_from_vpc(VpcId=self.vpc.id)
- self.internet_gateway.delete()
-
- if self.vpc:
- LOG.debug('deleting vpc %s', self.vpc.id)
- self.vpc.delete()
-
- def get_image(self, img_conf):
- """Get image using specified image configuration.
-
- Hard coded for 'amd64' based images.
-
- @param img_conf: configuration for image
- @return_value: cloud_tests.images instance
- """
- if img_conf['root-store'] == 'ebs':
- root_store = 'ssd'
- elif img_conf['root-store'] == 'instance-store':
- root_store = 'instance'
- else:
- raise RuntimeError('Unknown root-store type: %s' %
- (img_conf['root-store']))
-
- filters = [
- 'arch=%s' % 'amd64',
- 'endpoint=https://ec2.%s.amazonaws.com' % self.ec2_region,
- 'region=%s' % self.ec2_region,
- 'release=%s' % img_conf['release'],
- 'root_store=%s' % root_store,
- 'virt=hvm',
- ]
-
- LOG.debug('finding image using streams')
- image = self._query_streams(img_conf, filters)
-
- try:
- image_ami = image['id']
- except KeyError as e:
- raise RuntimeError(
- 'No images found for %s!' % img_conf['release']
- ) from e
-
- LOG.debug('found image: %s', image_ami)
- image = EC2Image(self, img_conf, image_ami)
- return image
-
- def _create_internet_gateway(self):
- """Create Internet Gateway and assign to VPC."""
- LOG.debug('creating internet gateway')
- # pylint: disable=no-member
- internet_gateway = self.ec2_resource.create_internet_gateway()
- internet_gateway.attach_to_vpc(VpcId=self.vpc.id)
- self._tag_resource(internet_gateway)
-
- return internet_gateway
-
- def _create_routing_table(self):
- """Update default routing table with internet gateway.
-
- This sets up internet access between the VPC via the internet gateway
- by configuring routing tables for IPv4 and IPv6.
- """
- LOG.debug('creating routing table')
- route_table = self.vpc.create_route_table()
- route_table.create_route(DestinationCidrBlock='0.0.0.0/0',
- GatewayId=self.internet_gateway.id)
- route_table.create_route(DestinationIpv6CidrBlock='::/0',
- GatewayId=self.internet_gateway.id)
- route_table.associate_with_subnet(SubnetId=self.subnet.id)
- self._tag_resource(route_table)
-
- return route_table
-
- def _create_security_group(self):
- """Enables ingress to default VPC security group."""
- LOG.debug('creating security group')
- security_group = self.vpc.create_security_group(
- GroupName=self.tag, Description='integration test security group')
- security_group.authorize_ingress(
- IpProtocol='-1', FromPort=-1, ToPort=-1, CidrIp='0.0.0.0/0')
- self._tag_resource(security_group)
-
- return security_group
-
- def _create_subnet(self):
- """Generate IPv4 and IPv6 subnets for use."""
- ipv6_cidr = self.vpc.ipv6_cidr_block_association_set[0][
- 'Ipv6CidrBlock'][:-2] + '64'
-
- LOG.debug('creating subnet with following ranges:')
- LOG.debug('ipv4: %s', self.ipv4_cidr)
- LOG.debug('ipv6: %s', ipv6_cidr)
- subnet = self.vpc.create_subnet(CidrBlock=self.ipv4_cidr,
- Ipv6CidrBlock=ipv6_cidr)
- modify_subnet = subnet.meta.client.modify_subnet_attribute
- modify_subnet(SubnetId=subnet.id,
- MapPublicIpOnLaunch={'Value': True})
- self._tag_resource(subnet)
-
- return subnet
-
- def _create_vpc(self):
- """Setup AWS EC2 VPC or return existing VPC."""
- LOG.debug('creating new vpc')
- try:
- vpc = self.ec2_resource.create_vpc( # pylint: disable=no-member
- CidrBlock=self.ipv4_cidr,
- AmazonProvidedIpv6CidrBlock=True)
- except botocore.exceptions.ClientError as e:
- raise RuntimeError(e) from e
-
- vpc.wait_until_available()
- self._tag_resource(vpc)
-
- return vpc
-
- def _tag_resource(self, resource):
- """Tag a resource with the specified tag.
-
- This makes finding and deleting resources specific to this testing
- much easier to find.
-
- @param resource: resource to tag
- """
- tag = {
- 'Key': 'Name',
- 'Value': self.tag
- }
- resource.create_tags(Tags=[tag])
-
- def _upload_public_key(self, config):
- """Generate random name and upload SSH key with that name.
-
- @param config: platform config
- @return: string of ssh key name
- """
- key_file = os.path.join(config['data_dir'], config['public_key'])
- with open(key_file, 'r') as file:
- public_key = file.read().strip('\n')
-
- LOG.debug('uploading SSH key %s', self.tag)
- self.ec2_client.import_key_pair(KeyName=self.tag,
- PublicKeyMaterial=public_key)
-
- return self.tag
-
-
-def _decode_console_output_as_bytes(parsed, **kwargs):
- """Provide console output as bytes in OutputBytes.
-
- For this to be useful, the session has to have had the
- decode_console_output handler unregistered already.
-
- https://github.com/boto/botocore/issues/1351 ."""
- if 'Output' not in parsed:
- return
- orig = parsed['Output']
- handlers.decode_console_output(parsed, **kwargs)
- parsed['OutputBytes'] = base64.b64decode(orig)
-
-
-def get_session():
- mysess = session.get_session()
- mysess.unregister('after-call.ec2.GetConsoleOutput',
- handlers.decode_console_output)
- mysess.register('after-call.ec2.GetConsoleOutput',
- _decode_console_output_as_bytes)
- return boto3.Session(botocore_session=mysess)
-
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/platforms/ec2/snapshot.py b/tests/cloud_tests/platforms/ec2/snapshot.py
deleted file mode 100644
index 2c48cb54..00000000
--- a/tests/cloud_tests/platforms/ec2/snapshot.py
+++ /dev/null
@@ -1,66 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Base EC2 snapshot."""
-
-from ..snapshots import Snapshot
-from tests.cloud_tests import LOG
-
-
-class EC2Snapshot(Snapshot):
- """EC2 image copy backed snapshot."""
-
- platform_name = 'ec2'
-
- def __init__(self, platform, properties, config, features, image_ami,
- delete_on_destroy=True):
- """Set up snapshot.
-
- @param platform: platform object
- @param properties: image properties
- @param config: image config
- @param features: supported feature flags
- @param image_ami: string of image ami ID
- @param delete_on_destroy: boolean to delete on destroy
- """
- super(EC2Snapshot, self).__init__(
- platform, properties, config, features)
-
- self.image_ami = image_ami
- self.delete_on_destroy = delete_on_destroy
-
- def destroy(self):
- """Deregister the backing AMI."""
- if self.delete_on_destroy:
- image = self.platform.ec2_resource.Image(self.image_ami)
- snapshot_id = image.block_device_mappings[0]['Ebs']['SnapshotId']
-
- LOG.debug('removing custom ami %s', self.image_ami)
- self.platform.ec2_client.deregister_image(ImageId=self.image_ami)
-
- LOG.debug('removing custom snapshot %s', snapshot_id)
- self.platform.ec2_client.delete_snapshot(SnapshotId=snapshot_id)
-
- def launch(self, user_data, meta_data=None, block=True, start=True,
- use_desc=None):
- """Launch instance.
-
- @param user_data: user-data for the instance
- @param meta_data: meta_data for the instance
- @param block: wait until instance is created
- @param start: start instance and wait until fully started
- @param use_desc: string of test name
- @return_value: an Instance
- """
- if meta_data is not None:
- raise ValueError("metadata not supported on Ec2")
-
- instance = self.platform.create_instance(
- self.properties, self.config, self.features,
- self.image_ami, user_data)
-
- if start:
- instance.start()
-
- return instance
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/platforms/images.py b/tests/cloud_tests/platforms/images.py
deleted file mode 100644
index f047de2e..00000000
--- a/tests/cloud_tests/platforms/images.py
+++ /dev/null
@@ -1,56 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Base class for images."""
-
-from ..util import TargetBase
-
-
-class Image(TargetBase):
- """Base class for images."""
-
- platform_name = None
-
- def __init__(self, platform, config):
- """Set up image.
-
- @param platform: platform object
- @param config: image configuration
- """
- self.platform = platform
- self.config = config
-
- def __str__(self):
- """A brief description of the image."""
- return '-'.join((self.properties['os'], self.properties['release']))
-
- @property
- def properties(self):
- """{} containing: 'arch', 'os', 'version', 'release'."""
- return {k: self.config[k]
- for k in ('arch', 'os', 'release', 'version')}
-
- @property
- def features(self):
- """Feature flags supported by this image.
-
- @return_value: list of feature names
- """
- return [k for k, v in self.config.get('features', {}).items() if v]
-
- @property
- def setup_overrides(self):
- """Setup options that need to be overridden for the image.
-
- @return_value: dictionary to update args with
- """
- # NOTE: more sophisticated options may be requied at some point
- return self.config.get('setup_overrides', {})
-
- def snapshot(self):
- """Create snapshot of image, block until done."""
- raise NotImplementedError
-
- def destroy(self):
- """Clean up data associated with image."""
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/platforms/instances.py b/tests/cloud_tests/platforms/instances.py
deleted file mode 100644
index efc35c7f..00000000
--- a/tests/cloud_tests/platforms/instances.py
+++ /dev/null
@@ -1,165 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Base instance."""
-import time
-
-import paramiko
-from paramiko.ssh_exception import (
- BadHostKeyException, AuthenticationException, SSHException)
-
-from ..util import TargetBase
-from tests.cloud_tests import LOG, util
-
-
-class Instance(TargetBase):
- """Base instance object."""
-
- platform_name = None
- _ssh_client = None
-
- def __init__(self, platform, name, properties, config, features):
- """Set up instance.
-
- @param platform: platform object
- @param name: hostname of instance
- @param properties: image properties
- @param config: image config
- @param features: supported feature flags
- """
- self.platform = platform
- self.name = name
- self.properties = properties
- self.config = config
- self.features = features
- self._tmp_count = 0
-
- self.ssh_ip = None
- self.ssh_port = None
- self.ssh_key_file = None
- self.ssh_username = 'ubuntu'
-
- def console_log(self):
- """Instance console.
-
- @return_value: bytes of this instance’s console
- """
- raise NotImplementedError
-
- def reboot(self, wait=True):
- """Reboot instance."""
- raise NotImplementedError
-
- def shutdown(self, wait=True):
- """Shutdown instance."""
- raise NotImplementedError
-
- def start(self, wait=True, wait_for_cloud_init=False):
- """Start instance."""
- raise NotImplementedError
-
- def destroy(self):
- """Clean up instance."""
- self._ssh_close()
-
- def _ssh(self, command, stdin=None):
- """Run a command via SSH."""
- client = self._ssh_connect()
-
- cmd = util.shell_pack(command)
- fp_in, fp_out, fp_err = client.exec_command(cmd)
- channel = fp_in.channel
-
- if stdin is not None:
- fp_in.write(stdin)
- fp_in.close()
-
- channel.shutdown_write()
- rc = channel.recv_exit_status()
-
- return (fp_out.read(), fp_err.read(), rc)
-
- def _ssh_close(self):
- if self._ssh_client:
- try:
- self._ssh_client.close()
- except SSHException:
- LOG.warning('Failed to close SSH connection.')
- self._ssh_client = None
-
- def _ssh_connect(self):
- """Connect via SSH.
-
- Attempt to SSH to the client on the specific IP and port. If it
- fails in some manner, then retry 2 more times for a total of 3
- attempts; sleeping a few seconds between attempts.
- """
- if self._ssh_client:
- return self._ssh_client
-
- if not self.ssh_ip or not self.ssh_port:
- raise ValueError("Cannot ssh_connect, ssh_ip=%s ssh_port=%s" %
- (self.ssh_ip, self.ssh_port))
-
- client = paramiko.SSHClient()
- client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
- private_key = paramiko.RSAKey.from_private_key_file(self.ssh_key_file)
-
- retries = 3
- while retries:
- try:
- client.connect(username=self.ssh_username,
- hostname=self.ssh_ip, port=self.ssh_port,
- pkey=private_key)
- self._ssh_client = client
- return client
- except (ConnectionRefusedError, AuthenticationException,
- BadHostKeyException, ConnectionResetError, SSHException,
- OSError):
- retries -= 1
- LOG.debug('Retrying ssh connection on connect failure')
- time.sleep(3)
-
- ssh_cmd = 'Failed ssh connection to %s@%s:%s after 3 retries' % (
- self.ssh_username, self.ssh_ip, self.ssh_port
- )
- raise util.InTargetExecuteError(b'', b'', 1, ssh_cmd, 'ssh')
-
- def _wait_for_system(self, wait_for_cloud_init):
- """Wait until system has fully booted and cloud-init has finished.
-
- @param wait_time: maximum time to wait
- @return_value: None, may raise OSError if wait_time exceeded
- """
- def clean_test(test):
- """Clean formatting for system ready test testcase."""
- return ' '.join(line for line in test.strip().splitlines()
- if not line.lstrip().startswith('#'))
-
- boot_timeout = self.config['boot_timeout']
- tests = [self.config['system_ready_script']]
- if wait_for_cloud_init:
- tests.append(self.config['cloud_init_ready_script'])
-
- formatted_tests = ' && '.join(clean_test(t) for t in tests)
- cmd = ('i=0; while [ $i -lt {time} ] && i=$(($i+1)); do {test} && '
- 'exit 0; sleep 1; done; exit 1').format(time=boot_timeout,
- test=formatted_tests)
-
- end_time = time.time() + boot_timeout
- while True:
- try:
- return_code = self.execute(
- cmd, rcs=(0, 1), description='wait for instance start'
- )[-1]
- if return_code == 0:
- break
- except util.InTargetExecuteError:
- LOG.warning("failed to connect via SSH")
-
- if time.time() < end_time:
- time.sleep(3)
- else:
- raise util.PlatformError('ssh', 'after %ss instance is not '
- 'reachable' % boot_timeout)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/platforms/lxd/__init__.py b/tests/cloud_tests/platforms/lxd/__init__.py
deleted file mode 100644
index e69de29b..00000000
--- a/tests/cloud_tests/platforms/lxd/__init__.py
+++ /dev/null
diff --git a/tests/cloud_tests/platforms/lxd/image.py b/tests/cloud_tests/platforms/lxd/image.py
deleted file mode 100644
index a88b47f3..00000000
--- a/tests/cloud_tests/platforms/lxd/image.py
+++ /dev/null
@@ -1,211 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""LXD Image Base Class."""
-
-import os
-import shutil
-import tempfile
-
-from ..images import Image
-from .snapshot import LXDSnapshot
-from cloudinit import subp
-from cloudinit import util as c_util
-from tests.cloud_tests import util
-
-
-class LXDImage(Image):
- """LXD backed image."""
-
- platform_name = "lxd"
-
- def __init__(self, platform, config, pylxd_image):
- """Set up image.
-
- @param platform: platform object
- @param config: image configuration
- """
- self.modified = False
- self._img_instance = None
- self._pylxd_image = None
- self.pylxd_image = pylxd_image
- super(LXDImage, self).__init__(platform, config)
-
- @property
- def pylxd_image(self):
- """Property function."""
- if self._pylxd_image:
- self._pylxd_image.sync()
- return self._pylxd_image
-
- @pylxd_image.setter
- def pylxd_image(self, pylxd_image):
- if self._img_instance:
- self._instance.destroy()
- self._img_instance = None
- if (self._pylxd_image and
- (self._pylxd_image is not pylxd_image) and
- (not self.config.get('cache_base_image') or self.modified)):
- self._pylxd_image.delete(wait=True)
- self.modified = False
- self._pylxd_image = pylxd_image
-
- @property
- def _instance(self):
- """Internal use only, returns a instance
-
- This starts an lxc instance from the image, so it is "dirty".
- Better would be some way to modify this "at rest".
- lxc-pstart would be an option."""
- if not self._img_instance:
- self._img_instance = self.platform.launch_container(
- self.properties, self.config, self.features,
- use_desc='image-modification', image_desc=str(self),
- image=self.pylxd_image.fingerprint)
- self._img_instance.start()
- return self._img_instance
-
- @property
- def properties(self):
- """{} containing: 'arch', 'os', 'version', 'release'."""
- properties = self.pylxd_image.properties
- return {
- 'arch': properties.get('architecture'),
- 'os': properties.get('os'),
- 'version': properties.get('version'),
- 'release': properties.get('release'),
- }
-
- def export_image(self, output_dir):
- """Export image from lxd image store to disk.
-
- @param output_dir: dir to store the exported image in
- @return_value: tuple of path to metadata tarball and rootfs
-
- Only the "split" image format with separate rootfs and metadata
- files is supported, e.g:
-
- 71f171df[...]cd31.squashfs (could also be: .tar.xz or .tar.gz)
- meta-71f171df[...]cd31.tar.xz
-
- Combined images made by a single tarball are not supported.
- """
- # pylxd's image export feature doesn't do split exports, so use cmdline
- fp = self.pylxd_image.fingerprint
- subp.subp(['lxc', 'image', 'export', fp, output_dir], capture=True)
- image_files = [p for p in os.listdir(output_dir) if fp in p]
-
- if len(image_files) != 2:
- raise NotImplementedError(
- "Image %s has unsupported format. "
- "Expected 2 files, found %d: %s."
- % (fp, len(image_files), ', '.join(image_files)))
-
- metadata = os.path.join(
- output_dir,
- next(p for p in image_files if p.startswith('meta-')))
- rootfs = os.path.join(
- output_dir,
- next(p for p in image_files if not p.startswith('meta-')))
- return (metadata, rootfs)
-
- def import_image(self, metadata, rootfs):
- """Import image to lxd image store from (split) tarball on disk.
-
- Note, this will replace and delete the current pylxd_image
-
- @param metadata: metadata tarball
- @param rootfs: rootfs tarball
- @return_value: imported image fingerprint
- """
- alias = util.gen_instance_name(
- image_desc=str(self), use_desc='update-metadata')
- subp.subp(['lxc', 'image', 'import', metadata, rootfs,
- '--alias', alias], capture=True)
- self.pylxd_image = self.platform.query_image_by_alias(alias)
- return self.pylxd_image.fingerprint
-
- def update_templates(self, template_config, template_data):
- """Update the image's template configuration.
-
- Note, this will replace and delete the current pylxd_image
-
- @param template_config: config overrides for template metadata
- @param template_data: template data to place into templates/
- """
- # set up tmp files
- export_dir = tempfile.mkdtemp(prefix='cloud_test_util_')
- extract_dir = tempfile.mkdtemp(prefix='cloud_test_util_')
- new_metadata = os.path.join(export_dir, 'new-meta.tar.xz')
- metadata_yaml = os.path.join(extract_dir, 'metadata.yaml')
- template_dir = os.path.join(extract_dir, 'templates')
-
- try:
- # extract old data
- (metadata, rootfs) = self.export_image(export_dir)
- shutil.unpack_archive(metadata, extract_dir)
-
- # update metadata
- metadata = c_util.read_conf(metadata_yaml)
- templates = metadata.get('templates', {})
- templates.update(template_config)
- metadata['templates'] = templates
- util.yaml_dump(metadata, metadata_yaml)
-
- # write out template files
- for name, content in template_data.items():
- path = os.path.join(template_dir, name)
- c_util.write_file(path, content)
-
- # store new data, mark new image as modified
- util.flat_tar(new_metadata, extract_dir)
- self.import_image(new_metadata, rootfs)
- self.modified = True
-
- finally:
- # remove tmpfiles
- shutil.rmtree(export_dir)
- shutil.rmtree(extract_dir)
-
- def _execute(self, *args, **kwargs):
- """Execute command in image, modifying image."""
- return self._instance._execute(*args, **kwargs)
-
- def push_file(self, local_path, remote_path):
- """Copy file at 'local_path' to instance at 'remote_path'."""
- return self._instance.push_file(local_path, remote_path)
-
- def run_script(self, *args, **kwargs):
- """Run script in image, modifying image.
-
- @return_value: script output
- """
- return self._instance.run_script(*args, **kwargs)
-
- def snapshot(self):
- """Create snapshot of image, block until done."""
- # get empty user data to pass in to instance
- # if overrides for user data provided, use them
- empty_userdata = util.update_user_data(
- {}, self.config.get('user_data_overrides', {}))
- conf = {'user.user-data': empty_userdata}
- # clone current instance
- instance = self.platform.launch_container(
- self.properties, self.config, self.features,
- container=self._instance.name, image_desc=str(self),
- use_desc='snapshot', container_config=conf)
- # wait for cloud-init before boot_clean_script is run to ensure
- # /var/lib/cloud is removed cleanly
- instance.start(wait=True, wait_for_cloud_init=True)
- if self.config.get('boot_clean_script'):
- instance.run_script(self.config.get('boot_clean_script'))
- # freeze current instance and return snapshot
- instance.freeze()
- return LXDSnapshot(self.platform, self.properties, self.config,
- self.features, instance)
-
- def destroy(self):
- """Clean up data associated with image."""
- self.pylxd_image = None
- super(LXDImage, self).destroy()
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/platforms/lxd/instance.py b/tests/cloud_tests/platforms/lxd/instance.py
deleted file mode 100644
index 2b973a08..00000000
--- a/tests/cloud_tests/platforms/lxd/instance.py
+++ /dev/null
@@ -1,278 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Base LXD instance."""
-
-import os
-import shutil
-import time
-from tempfile import mkdtemp
-
-from cloudinit.subp import subp, ProcessExecutionError, which
-from cloudinit.util import load_yaml
-from tests.cloud_tests import LOG
-from tests.cloud_tests.util import PlatformError
-
-from ..instances import Instance
-
-from pylxd import exceptions as pylxd_exc
-
-
-class LXDInstance(Instance):
- """LXD container backed instance."""
-
- platform_name = "lxd"
- _console_log_method = None
- _console_log_file = None
-
- def __init__(self, platform, name, properties, config, features,
- pylxd_container):
- """Set up instance.
-
- @param platform: platform object
- @param name: hostname of instance
- @param properties: image properties
- @param config: image config
- @param features: supported feature flags
- """
- if not pylxd_container:
- raise ValueError("Invalid value pylxd_container: %s" %
- pylxd_container)
- self._pylxd_container = pylxd_container
- super(LXDInstance, self).__init__(
- platform, name, properties, config, features)
- self.tmpd = mkdtemp(prefix="%s-%s" % (type(self).__name__, name))
- self.name = name
- self._setup_console_log()
-
- @property
- def pylxd_container(self):
- """Property function."""
- if self._pylxd_container is None:
- raise RuntimeError(
- "%s: Attempted use of pylxd_container after deletion." % self)
- self._pylxd_container.sync()
- return self._pylxd_container
-
- def __str__(self):
- return (
- '%s(name=%s) status=%s' %
- (self.__class__.__name__, self.name,
- ("deleted" if self._pylxd_container is None else
- self.pylxd_container.status)))
-
- def _execute(self, command, stdin=None, env=None):
- if env is None:
- env = {}
-
- env_args = []
- if env:
- env_args = ['env'] + ["%s=%s" for k, v in env.items()]
-
- # ensure instance is running and execute the command
- self.start()
-
- # Use cmdline client due to https://github.com/lxc/pylxd/issues/268
- exit_code = 0
- try:
- stdout, stderr = subp(
- ['lxc', 'exec', self.name, '--'] + env_args + list(command),
- data=stdin, decode=False)
- except ProcessExecutionError as e:
- exit_code = e.exit_code
- stdout = e.stdout
- stderr = e.stderr
-
- return stdout, stderr, exit_code
-
- def read_data(self, remote_path, decode=False):
- """Read data from instance filesystem.
-
- @param remote_path: path in instance
- @param decode: decode data before returning.
- @return_value: content of remote_path as bytes if 'decode' is False,
- and as string if 'decode' is True.
- """
- data = self.pylxd_container.files.get(remote_path)
- return data.decode() if decode else data
-
- def write_data(self, remote_path, data):
- """Write data to instance filesystem.
-
- @param remote_path: path in instance
- @param data: data to write in bytes
- """
- self.pylxd_container.files.put(remote_path, data)
-
- @property
- def console_log_method(self):
- if self._console_log_method is not None:
- return self._console_log_method
-
- client = which('lxc')
- if not client:
- raise PlatformError("No 'lxc' client.")
-
- elif _has_proper_console_support():
- self._console_log_method = 'show-log'
- elif client.startswith("/snap"):
- self._console_log_method = 'logfile-snap'
- else:
- self._console_log_method = 'logfile-tmp'
-
- LOG.debug("Set console log method to %s", self._console_log_method)
- return self._console_log_method
-
- def _setup_console_log(self):
- method = self.console_log_method
- if not method.startswith("logfile-"):
- return
-
- if method == "logfile-snap":
- log_dir = "/var/snap/lxd/common/consoles"
- if not os.path.exists(log_dir):
- raise PlatformError(
- "Unable to log with snap lxc. Please run:\n"
- " sudo mkdir --mode=1777 -p %s" % log_dir)
- elif method == "logfile-tmp":
- log_dir = "/tmp"
- else:
- raise PlatformError(
- "Unexpected value for console method: %s" % method)
-
- # doing this ensures we can read it. Otherwise it ends up root:root.
- log_file = os.path.join(log_dir, self.name)
- with open(log_file, "w") as fp:
- fp.write("# %s\n" % self.name)
-
- cfg = "lxc.console.logfile=%s" % log_file
- orig = self._pylxd_container.config.get('raw.lxc', "")
- if orig:
- orig += "\n"
- self._pylxd_container.config['raw.lxc'] = orig + cfg
- self._pylxd_container.save()
- self._console_log_file = log_file
-
- def console_log(self):
- """Console log.
-
- @return_value: bytes of this instance's console
- """
-
- if self._console_log_file:
- if not os.path.exists(self._console_log_file):
- raise NotImplementedError(
- "Console log '%s' does not exist. If this is a remote "
- "lxc, then this is really NotImplementedError. If it is "
- "A local lxc, then this is a RuntimeError."
- "https://github.com/lxc/lxd/issues/1129")
- with open(self._console_log_file, "rb") as fp:
- return fp.read()
-
- try:
- return subp(['lxc', 'console', '--show-log', self.name],
- decode=False)[0]
- except ProcessExecutionError as e:
- raise PlatformError(
- "console log",
- "Console log failed [%d]: stdout=%s stderr=%s" % (
- e.exit_code, e.stdout, e.stderr)
- ) from e
-
- def reboot(self, wait=True):
- """Reboot instance."""
- self.shutdown(wait=wait)
- self.start(wait=wait)
-
- def shutdown(self, wait=True, retry=1):
- """Shutdown instance."""
- if self.pylxd_container.status == 'Stopped':
- return
-
- try:
- LOG.debug("%s: shutting down (wait=%s)", self, wait)
- self.pylxd_container.stop(wait=wait)
- except (pylxd_exc.LXDAPIException, pylxd_exc.NotFound) as e:
- # An exception happens here sometimes (LP: #1783198)
- # LOG it, and try again.
- LOG.warning(
- ("%s: shutdown(retry=%d) caught %s in shutdown "
- "(response=%s): %s"),
- self, retry, e.__class__.__name__, e.response, e)
- if isinstance(e, pylxd_exc.NotFound):
- LOG.debug("container_exists(%s) == %s",
- self.name, self.platform.container_exists(self.name))
- if retry == 0:
- raise e
- return self.shutdown(wait=wait, retry=retry - 1)
-
- def start(self, wait=True, wait_for_cloud_init=False):
- """Start instance."""
- if self.pylxd_container.status != 'Running':
- self.pylxd_container.start(wait=wait)
- if wait:
- self._wait_for_system(wait_for_cloud_init)
-
- def freeze(self):
- """Freeze instance."""
- if self.pylxd_container.status != 'Frozen':
- self.pylxd_container.freeze(wait=True)
-
- def unfreeze(self):
- """Unfreeze instance."""
- if self.pylxd_container.status == 'Frozen':
- self.pylxd_container.unfreeze(wait=True)
-
- def destroy(self):
- """Clean up instance."""
- LOG.debug("%s: deleting container.", self)
- self.unfreeze()
- self.shutdown()
- retries = [1] * 5
- for attempt, wait in enumerate(retries):
- try:
- self.pylxd_container.delete(wait=True)
- break
- except Exception:
- if attempt + 1 >= len(retries):
- raise
- LOG.debug('Failed to delete container %s (%s/%s) retrying...',
- self, attempt + 1, len(retries))
- time.sleep(wait)
-
- self._pylxd_container = None
-
- if self.platform.container_exists(self.name):
- raise OSError('%s: container was not properly removed' % self)
- if self._console_log_file and os.path.exists(self._console_log_file):
- os.unlink(self._console_log_file)
- shutil.rmtree(self.tmpd)
- super(LXDInstance, self).destroy()
-
-
-def _has_proper_console_support():
- stdout, _ = subp(['lxc', 'info'])
- info = load_yaml(stdout)
- reason = None
- if 'console' not in info.get('api_extensions', []):
- reason = "LXD server does not support console api extension"
- else:
- dver = str(info.get('environment', {}).get('driver_version', ""))
- if dver.startswith("2.") or dver.startswith("1."):
- reason = "LXD Driver version not 3.x+ (%s)" % dver
- else:
- try:
- stdout = subp(['lxc', 'console', '--help'], decode=False)[0]
- if not (b'console' in stdout and b'log' in stdout):
- reason = "no '--log' in lxc console --help"
- except ProcessExecutionError:
- reason = "no 'console' command in lxc client"
-
- if reason:
- LOG.debug("no console-support: %s", reason)
- return False
- else:
- LOG.debug("console-support looks good")
- return True
-
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/platforms/lxd/platform.py b/tests/cloud_tests/platforms/lxd/platform.py
deleted file mode 100644
index f7251a07..00000000
--- a/tests/cloud_tests/platforms/lxd/platform.py
+++ /dev/null
@@ -1,104 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Base LXD platform."""
-
-from pylxd import (Client, exceptions)
-
-from ..platforms import Platform
-from .image import LXDImage
-from .instance import LXDInstance
-from tests.cloud_tests import util
-
-DEFAULT_SSTREAMS_SERVER = "https://images.linuxcontainers.org:8443"
-
-
-class LXDPlatform(Platform):
- """LXD test platform."""
-
- platform_name = 'lxd'
-
- def __init__(self, config):
- """Set up platform."""
- super(LXDPlatform, self).__init__(config)
- # TODO: allow configuration of remote lxd host via env variables
- # set up lxd connection
- self.client = Client()
-
- def get_image(self, img_conf):
- """Get image using specified image configuration.
-
- @param img_conf: configuration for image
- @return_value: cloud_tests.images instance
- """
- pylxd_image = self.client.images.create_from_simplestreams(
- img_conf.get('sstreams_server', DEFAULT_SSTREAMS_SERVER),
- img_conf['alias'])
- image = LXDImage(self, img_conf, pylxd_image)
- if img_conf.get('override_templates', False):
- image.update_templates(self.config.get('template_overrides', {}),
- self.config.get('template_files', {}))
- return image
-
- def launch_container(self, properties, config, features,
- image=None, container=None, ephemeral=False,
- container_config=None, block=True, image_desc=None,
- use_desc=None):
- """Launch a container.
-
- @param properties: image properties
- @param config: image configuration
- @param features: image features
- @param image: image fingerprint to launch from
- @param container: container to copy
- @param ephemeral: delete image after first shutdown
- @param container_config: config options for instance as dict
- @param block: wait until container created
- @param image_desc: description of image being launched
- @param use_desc: description of container's use
- @return_value: cloud_tests.instances instance
- """
- if not (image or container):
- raise ValueError("either image or container must be specified")
- container = self.client.containers.create({
- 'name': util.gen_instance_name(image_desc=image_desc,
- use_desc=use_desc,
- used_list=self.list_containers()),
- 'ephemeral': bool(ephemeral),
- 'config': (container_config
- if isinstance(container_config, dict) else {}),
- 'source': ({'type': 'image', 'fingerprint': image} if image else
- {'type': 'copy', 'source': container})
- }, wait=block)
- return LXDInstance(self, container.name, properties, config, features,
- container)
-
- def container_exists(self, container_name):
- """Check if container with name 'container_name' exists.
-
- @return_value: True if exists else False
- """
- res = True
- try:
- self.client.containers.get(container_name)
- except exceptions.LXDAPIException as e:
- res = False
- if e.response.status_code != 404:
- raise
- return res
-
- def list_containers(self):
- """List names of all containers.
-
- @return_value: list of names
- """
- return [container.name for container in self.client.containers.all()]
-
- def query_image_by_alias(self, alias):
- """Get image by alias in local image store.
-
- @param alias: alias of image
- @return_value: pylxd image (not cloud_tests.images instance)
- """
- return self.client.images.get_by_alias(alias)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/platforms/lxd/snapshot.py b/tests/cloud_tests/platforms/lxd/snapshot.py
deleted file mode 100644
index b524644f..00000000
--- a/tests/cloud_tests/platforms/lxd/snapshot.py
+++ /dev/null
@@ -1,53 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Base LXD snapshot."""
-
-from ..snapshots import Snapshot
-
-
-class LXDSnapshot(Snapshot):
- """LXD image copy backed snapshot."""
-
- platform_name = "lxd"
-
- def __init__(self, platform, properties, config, features,
- pylxd_frozen_instance):
- """Set up snapshot.
-
- @param platform: platform object
- @param properties: image properties
- @param config: image config
- @param features: supported feature flags
- """
- self.pylxd_frozen_instance = pylxd_frozen_instance
- super(LXDSnapshot, self).__init__(
- platform, properties, config, features)
-
- def launch(self, user_data, meta_data=None, block=True, start=True,
- use_desc=None):
- """Launch instance.
-
- @param user_data: user-data for the instance
- @param instance_id: instance-id for the instance
- @param block: wait until instance is created
- @param start: start instance and wait until fully started
- @param use_desc: description of snapshot instance use
- @return_value: an Instance
- """
- inst_config = {'user.user-data': user_data}
- if meta_data:
- inst_config['user.meta-data'] = meta_data
- instance = self.platform.launch_container(
- self.properties, self.config, self.features, block=block,
- image_desc=str(self), container=self.pylxd_frozen_instance.name,
- use_desc=use_desc, container_config=inst_config)
- if start:
- instance.start()
- return instance
-
- def destroy(self):
- """Clean up snapshot data."""
- self.pylxd_frozen_instance.destroy()
- super(LXDSnapshot, self).destroy()
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/platforms/nocloudkvm/__init__.py b/tests/cloud_tests/platforms/nocloudkvm/__init__.py
deleted file mode 100644
index e69de29b..00000000
--- a/tests/cloud_tests/platforms/nocloudkvm/__init__.py
+++ /dev/null
diff --git a/tests/cloud_tests/platforms/nocloudkvm/image.py b/tests/cloud_tests/platforms/nocloudkvm/image.py
deleted file mode 100644
index ff5b6ad7..00000000
--- a/tests/cloud_tests/platforms/nocloudkvm/image.py
+++ /dev/null
@@ -1,79 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""NoCloud KVM Image Base Class."""
-
-from cloudinit import subp
-
-import os
-import shutil
-import tempfile
-
-from ..images import Image
-from .snapshot import NoCloudKVMSnapshot
-
-
-class NoCloudKVMImage(Image):
- """NoCloud KVM backed image."""
-
- platform_name = "nocloud-kvm"
-
- def __init__(self, platform, config, orig_img_path):
- """Set up image.
-
- @param platform: platform object
- @param config: image configuration
- @param img_path: path to the image
- """
- self.modified = False
- self._workd = tempfile.mkdtemp(prefix='NoCloudKVMImage')
- self._orig_img_path = orig_img_path
- self._img_path = os.path.join(self._workd,
- os.path.basename(self._orig_img_path))
-
- subp.subp(['qemu-img', 'create', '-f', 'qcow2',
- '-b', orig_img_path, self._img_path])
-
- super(NoCloudKVMImage, self).__init__(platform, config)
-
- def _execute(self, command, stdin=None, env=None):
- """Execute command in image, modifying image."""
- return self.mount_image_callback(command, stdin=stdin, env=env)
-
- def mount_image_callback(self, command, stdin=None, env=None):
- """Run mount-image-callback."""
-
- env_args = []
- if env:
- env_args = ['env'] + ["%s=%s" for k, v in env.items()]
-
- mic_chroot = ['sudo', 'mount-image-callback', '--system-mounts',
- '--system-resolvconf', self._img_path,
- '--', 'chroot', '_MOUNTPOINT_']
- try:
- out, err = subp.subp(mic_chroot + env_args + list(command),
- data=stdin, decode=False)
- return (out, err, 0)
- except subp.ProcessExecutionError as e:
- return (e.stdout, e.stderr, e.exit_code)
-
- def snapshot(self):
- """Create snapshot of image, block until done."""
- if not self._img_path:
- raise RuntimeError()
-
- return NoCloudKVMSnapshot(self.platform, self.properties, self.config,
- self.features, self._img_path)
-
- def destroy(self):
- """Unset path to signal image is no longer used.
-
- The removal of the images and all other items is handled by the
- framework. In some cases we want to keep the images, so let the
- framework decide whether to keep or destroy everything.
- """
- self._img_path = None
- shutil.rmtree(self._workd)
-
- super(NoCloudKVMImage, self).destroy()
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/platforms/nocloudkvm/instance.py b/tests/cloud_tests/platforms/nocloudkvm/instance.py
deleted file mode 100644
index 5140a11c..00000000
--- a/tests/cloud_tests/platforms/nocloudkvm/instance.py
+++ /dev/null
@@ -1,197 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Base NoCloud KVM instance."""
-
-import copy
-import os
-import socket
-import subprocess
-import time
-import uuid
-
-from ..instances import Instance
-from cloudinit.atomic_helper import write_json
-from cloudinit import subp
-from tests.cloud_tests import LOG, util
-
-# This domain contains reverse lookups for hostnames that are used.
-# The primary reason is so sudo will return quickly when it attempts
-# to look up the hostname. i9n is just short for 'integration'.
-# see also bug 1730744 for why we had to do this.
-CI_DOMAIN = "i9n.cloud-init.io"
-
-
-class NoCloudKVMInstance(Instance):
- """NoCloud KVM backed instance."""
-
- platform_name = "nocloud-kvm"
-
- def __init__(self, platform, name, image_path, properties, config,
- features, user_data, meta_data):
- """Set up instance.
-
- @param platform: platform object
- @param name: image path
- @param image_path: path to disk image to boot.
- @param properties: dictionary of properties
- @param config: dictionary of configuration values
- @param features: dictionary of supported feature flags
- """
- super(NoCloudKVMInstance, self).__init__(
- platform, name, properties, config, features
- )
-
- self.user_data = user_data
- if meta_data:
- meta_data = copy.deepcopy(meta_data)
- else:
- meta_data = {}
-
- if 'instance-id' in meta_data:
- iid = meta_data['instance-id']
- else:
- iid = str(uuid.uuid1())
- meta_data['instance-id'] = iid
-
- self.instance_id = iid
- self.ssh_key_file = os.path.join(
- platform.config['data_dir'], platform.config['private_key'])
- self.ssh_pubkey_file = os.path.join(
- platform.config['data_dir'], platform.config['public_key'])
-
- self.ssh_pubkey = None
- if self.ssh_pubkey_file:
- with open(self.ssh_pubkey_file, "r") as fp:
- self.ssh_pubkey = fp.read().rstrip('\n')
-
- if not meta_data.get('public-keys'):
- meta_data['public-keys'] = []
- meta_data['public-keys'].append(self.ssh_pubkey)
-
- self.ssh_ip = '127.0.0.1'
- self.ssh_port = None
- self.pid = None
- self.pid_file = None
- self.console_file = None
- self.disk = image_path
- self.cache_mode = platform.config.get('cache_mode',
- 'cache=none,aio=native')
- self.meta_data = meta_data
-
- def shutdown(self, wait=True):
- """Shutdown instance."""
-
- if self.pid:
- # This relies on _execute which uses sudo over ssh. The ssh
- # connection would get killed before sudo exited, so ignore errors.
- cmd = ['shutdown', 'now']
- try:
- self._execute(cmd)
- except util.InTargetExecuteError:
- pass
- self._ssh_close()
-
- if wait:
- LOG.debug("Executed shutdown. waiting on pid %s to end",
- self.pid)
- time_for_shutdown = 120
- give_up_at = time.time() + time_for_shutdown
- pid_file_path = '/proc/%s' % self.pid
- msg = ("pid %s did not exit in %s seconds after shutdown." %
- (self.pid, time_for_shutdown))
- while True:
- if not os.path.exists(pid_file_path):
- break
- if time.time() > give_up_at:
- raise util.PlatformError("shutdown", msg)
- self.pid = None
-
- def destroy(self):
- """Clean up instance."""
- if self.pid:
- try:
- subp.subp(['kill', '-9', self.pid])
- except subp.ProcessExecutionError:
- pass
-
- if self.pid_file:
- try:
- os.remove(self.pid_file)
- except Exception:
- pass
-
- self.pid = None
- self._ssh_close()
-
- super(NoCloudKVMInstance, self).destroy()
-
- def _execute(self, command, stdin=None, env=None):
- env_args = []
- if env:
- env_args = ['env'] + ["%s=%s" for k, v in env.items()]
-
- return self._ssh(['sudo'] + env_args + list(command), stdin=stdin)
-
- def generate_seed(self, tmpdir):
- """Generate nocloud seed from user-data"""
- seed_file = os.path.join(tmpdir, '%s_seed.img' % self.name)
- user_data_file = os.path.join(tmpdir, '%s_user_data' % self.name)
- meta_data_file = os.path.join(tmpdir, '%s_meta_data' % self.name)
-
- with open(user_data_file, "w") as ud_file:
- ud_file.write(self.user_data)
-
- # meta-data can be yaml, but more easily pretty printed with json
- write_json(meta_data_file, self.meta_data)
- subp.subp(['cloud-localds', seed_file, user_data_file,
- meta_data_file])
-
- return seed_file
-
- def get_free_port(self):
- """Get a free port assigned by the kernel."""
- s = socket.socket()
- s.bind(('', 0))
- num = s.getsockname()[1]
- s.close()
- return num
-
- def start(self, wait=True, wait_for_cloud_init=False):
- """Start instance."""
- tmpdir = self.platform.config['data_dir']
- seed = self.generate_seed(tmpdir)
- self.pid_file = os.path.join(tmpdir, '%s.pid' % self.name)
- self.console_file = os.path.join(tmpdir, '%s-console.log' % self.name)
- self.ssh_port = self.get_free_port()
-
- cmd = ['./tools/xkvm',
- '--disk', '%s,%s' % (self.disk, self.cache_mode),
- '--disk', '%s' % seed,
- '--netdev', ','.join(['user',
- 'hostfwd=tcp::%s-:22' % self.ssh_port,
- 'dnssearch=%s' % CI_DOMAIN]),
- '--', '-pidfile', self.pid_file, '-vnc', 'none',
- '-m', '2G', '-smp', '2', '-nographic', '-name', self.name,
- '-serial', 'file:' + self.console_file]
- subprocess.Popen(cmd,
- close_fds=True,
- stdin=subprocess.PIPE,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE)
-
- while not os.path.exists(self.pid_file):
- time.sleep(1)
-
- with open(self.pid_file, 'r') as pid_f:
- self.pid = pid_f.readlines()[0].strip()
-
- if wait:
- self._wait_for_system(wait_for_cloud_init)
-
- def console_log(self):
- if not self.console_file:
- return b''
- with open(self.console_file, "rb") as fp:
- return fp.read()
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/platforms/nocloudkvm/platform.py b/tests/cloud_tests/platforms/nocloudkvm/platform.py
deleted file mode 100644
index 53c8ebf2..00000000
--- a/tests/cloud_tests/platforms/nocloudkvm/platform.py
+++ /dev/null
@@ -1,94 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Base NoCloud KVM platform."""
-import glob
-import os
-
-from simplestreams import filters
-from simplestreams import mirrors
-from simplestreams import objectstores
-from simplestreams import util as s_util
-
-from ..platforms import Platform
-from .image import NoCloudKVMImage
-from .instance import NoCloudKVMInstance
-from cloudinit import subp
-from cloudinit import util as c_util
-from tests.cloud_tests import util
-
-
-class NoCloudKVMPlatform(Platform):
- """NoCloud KVM test platform."""
-
- platform_name = 'nocloud-kvm'
-
- def get_image(self, img_conf):
- """Get image using specified image configuration.
-
- @param img_conf: configuration for image
- @return_value: cloud_tests.images instance
- """
- (url, path) = s_util.path_from_mirror_url(img_conf['mirror_url'], None)
-
- filter = filters.get_filters(
- [
- 'arch=%s' % c_util.get_dpkg_architecture(),
- 'release=%s' % img_conf['release'],
- 'ftype=disk1.img',
- ]
- )
- mirror_config = {'filters': filter,
- 'keep_items': False,
- 'max_items': 1,
- 'checksumming_reader': True,
- 'item_download': True
- }
-
- def policy(content, path):
- return s_util.read_signed(content, keyring=img_conf['keyring'])
-
- smirror = mirrors.UrlMirrorReader(url, policy=policy)
- tstore = objectstores.FileStore(img_conf['mirror_dir'])
- tmirror = mirrors.ObjectFilterMirror(config=mirror_config,
- objectstore=tstore)
- tmirror.sync(smirror, path)
-
- search_d = os.path.join(img_conf['mirror_dir'], '**',
- img_conf['release'], '**', '*.img')
-
- images = []
- for fname in glob.iglob(search_d, recursive=True):
- images.append(fname)
-
- if len(images) < 1:
- raise RuntimeError("No images found under '%s'" % search_d)
- if len(images) > 1:
- raise RuntimeError(
- "Multiple images found in '%s': %s" % (search_d,
- ' '.join(images)))
-
- image = NoCloudKVMImage(self, img_conf, images[0])
- return image
-
- def create_instance(self, properties, config, features,
- src_img_path, image_desc=None, use_desc=None,
- user_data=None, meta_data=None):
- """Create an instance
-
- @param src_img_path: image path to launch from
- @param properties: image properties
- @param config: image configuration
- @param features: image features
- @param image_desc: description of image being launched
- @param use_desc: description of container's use
- @return_value: cloud_tests.instances instance
- """
- name = util.gen_instance_name(image_desc=image_desc, use_desc=use_desc)
- img_path = os.path.join(self.config['data_dir'], name + '.qcow2')
- subp.subp(['qemu-img', 'create', '-f', 'qcow2',
- '-b', src_img_path, img_path])
-
- return NoCloudKVMInstance(self, name, img_path, properties, config,
- features, user_data, meta_data)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/platforms/nocloudkvm/snapshot.py b/tests/cloud_tests/platforms/nocloudkvm/snapshot.py
deleted file mode 100644
index 2dae3590..00000000
--- a/tests/cloud_tests/platforms/nocloudkvm/snapshot.py
+++ /dev/null
@@ -1,59 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Base NoCloud KVM snapshot."""
-import os
-import shutil
-import tempfile
-
-from ..snapshots import Snapshot
-
-
-class NoCloudKVMSnapshot(Snapshot):
- """NoCloud KVM image copy backed snapshot."""
-
- platform_name = "nocloud-kvm"
-
- def __init__(self, platform, properties, config, features, image_path):
- """Set up snapshot.
-
- @param platform: platform object
- @param properties: image properties
- @param config: image config
- @param features: supported feature flags
- @param image_path: image file to snapshot.
- """
- self._workd = tempfile.mkdtemp(prefix='NoCloudKVMSnapshot')
- snapshot = os.path.join(self._workd, 'snapshot')
- shutil.copyfile(image_path, snapshot)
- self._image_path = snapshot
-
- super(NoCloudKVMSnapshot, self).__init__(
- platform, properties, config, features)
-
- def launch(self, user_data, meta_data=None, block=True, start=True,
- use_desc=None):
- """Launch instance.
-
- @param user_data: user-data for the instance
- @param instance_id: instance-id for the instance
- @param block: wait until instance is created
- @param start: start instance and wait until fully started
- @param use_desc: description of snapshot instance use
- @return_value: an Instance
- """
- instance = self.platform.create_instance(
- self.properties, self.config, self.features,
- self._image_path, image_desc=str(self), use_desc=use_desc,
- user_data=user_data, meta_data=meta_data)
-
- if start:
- instance.start()
-
- return instance
-
- def destroy(self):
- """Clean up snapshot data."""
- shutil.rmtree(self._workd)
- super(NoCloudKVMSnapshot, self).destroy()
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/platforms/platforms.py b/tests/cloud_tests/platforms/platforms.py
deleted file mode 100644
index ac3b6563..00000000
--- a/tests/cloud_tests/platforms/platforms.py
+++ /dev/null
@@ -1,109 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Base platform class."""
-import os
-import shutil
-
-from simplestreams import filters, mirrors
-from simplestreams import util as s_util
-
-from cloudinit import subp
-from cloudinit import util as c_util
-
-from tests.cloud_tests import util
-
-
-class Platform(object):
- """Base class for platforms."""
-
- platform_name = None
-
- def __init__(self, config):
- """Set up platform."""
- self.config = config
- self.tmpdir = util.mkdtemp()
- if 'data_dir' in config:
- self.data_dir = config['data_dir']
- else:
- self.data_dir = os.path.join(self.tmpdir, "data_dir")
- os.mkdir(self.data_dir)
-
- self._generate_ssh_keys(self.data_dir)
-
- def get_image(self, img_conf):
- """Get image using specified image configuration.
-
- @param img_conf: configuration for image
- @return_value: cloud_tests.images instance
- """
- raise NotImplementedError
-
- def destroy(self):
- """Clean up platform data."""
- shutil.rmtree(self.tmpdir)
-
- def _generate_ssh_keys(self, data_dir):
- """Generate SSH keys to be used with image."""
- filename = os.path.join(data_dir, self.config['private_key'])
-
- if os.path.exists(filename):
- c_util.del_file(filename)
-
- subp.subp(['ssh-keygen', '-m', 'PEM', '-t', 'rsa', '-b', '4096',
- '-f', filename, '-P', '',
- '-C', 'ubuntu@cloud_test'],
- capture=True)
-
- @staticmethod
- def _query_streams(img_conf, img_filter):
- """Query streams for latest image given a specific filter.
-
- @param img_conf: configuration for image
- @param filters: array of filters as strings format 'key=value'
- @return: dictionary with latest image information or empty
- """
- def policy(content, path):
- return s_util.read_signed(content, keyring=img_conf['keyring'])
-
- (url, path) = s_util.path_from_mirror_url(img_conf['mirror_url'], None)
- smirror = mirrors.UrlMirrorReader(url, policy=policy)
-
- config = {'max_items': 1, 'filters': filters.get_filters(img_filter)}
- tmirror = FilterMirror(config)
- tmirror.sync(smirror, path)
-
- try:
- return tmirror.json_entries[0]
- except IndexError as e:
- raise RuntimeError(
- 'no images found with filter: %s' % img_filter
- ) from e
-
-
-class FilterMirror(mirrors.BasicMirrorWriter):
- """Taken from sstream-query to return query result as json array."""
-
- def __init__(self, config=None):
- super(FilterMirror, self).__init__(config=config)
- if config is None:
- config = {}
- self.config = config
- self.filters = config.get('filters', [])
- self.json_entries = []
-
- def load_products(self, path=None, content_id=None):
- return {'content_id': content_id, 'products': {}}
-
- def filter_item(self, data, src, target, pedigree):
- return filters.filter_item(self.filters, data, src, pedigree)
-
- def insert_item(self, data, src, target, pedigree, contentsource):
- # src and target are top level products:1.0
- # data is src['products'][ped[0]]['versions'][ped[1]]['items'][ped[2]]
- # contentsource is a ContentSource if 'path' exists in data or None
- data = s_util.products_exdata(src, pedigree)
- if 'path' in data:
- data.update({'item_url': contentsource.url})
- self.json_entries.append(data)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/platforms/snapshots.py b/tests/cloud_tests/platforms/snapshots.py
deleted file mode 100644
index 0f5f8bb6..00000000
--- a/tests/cloud_tests/platforms/snapshots.py
+++ /dev/null
@@ -1,44 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Base snapshot."""
-
-
-class Snapshot(object):
- """Base class for snapshots."""
-
- platform_name = None
-
- def __init__(self, platform, properties, config, features):
- """Set up snapshot.
-
- @param platform: platform object
- @param properties: image properties
- @param config: image config
- @param features: supported feature flags
- """
- self.platform = platform
- self.properties = properties
- self.config = config
- self.features = features
-
- def __str__(self):
- """A brief description of the snapshot."""
- return '-'.join((self.properties['os'], self.properties['release']))
-
- def launch(self, user_data, meta_data=None, block=True, start=True,
- use_desc=None):
- """Launch instance.
-
- @param user_data: user-data for the instance
- @param instance_id: instance-id for the instance
- @param block: wait until instance is created
- @param start: start instance and wait until fully started
- @param use_desc: description of snapshot instance use
- @return_value: an Instance
- """
- raise NotImplementedError
-
- def destroy(self):
- """Clean up snapshot data."""
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/releases.yaml b/tests/cloud_tests/releases.yaml
deleted file mode 100644
index c52b78f9..00000000
--- a/tests/cloud_tests/releases.yaml
+++ /dev/null
@@ -1,381 +0,0 @@
-# ============================= Release Config ================================
-default_release_config:
- # global default configuration options
- default:
- # all are disabled by default
- enabled: false
- # timeout for booting image and running cloud init
- boot_timeout: 120
- # a script to run after a boot that is used to modify an image, before
- # making a snapshot of the image. may be useful for removing data left
- # behind from cloud-init booting, such as logs, to ensure that data
- # from snapshot.launch() will not include a cloud-init.log from a boot
- # used to create the snapshot, if cloud-init has not run
- boot_clean_script: |
- #!/bin/bash
- rm -rf /var/log/cloud-init.log /var/log/cloud-init-output.log \
- /var/lib/cloud/ /run/cloud-init/ /var/log/syslog
- # test script to determine if system is booted fully
- system_ready_script: |
- # permit running or degraded state as both indicate complete boot
- [ $(systemctl is-system-running) = 'running' -o
- $(systemctl is-system-running) = 'degraded' ]
- # test script to determine if cloud-init has finished
- cloud_init_ready_script: |
- [ -f '/run/cloud-init/result.json' ]
- # currently used features and their uses are:
- # features groups and additional feature settings
- feature_groups: []
- features: {}
- mirror_url: https://cloud-images.ubuntu.com/daily
- mirror_dir: '/srv/citest/images'
- keyring: /usr/share/keyrings/ubuntu-cloudimage-keyring.gpg
- # The OS version formatted as Major.Minor is used to compare releases.
- # Each release needs to define this, for example "16.04". Quoting is
- # necessary to ensure the version is treated as a string.
- version: null
-
- ec2:
- # Choose from: [ebs, instance-store]
- root-store: ebs
- boot_timeout: 300
- nocloud-kvm:
- setup_overrides: null
- override_templates: false
- # lxd specific default configuration options
- lxd:
- # default sstreams server to use for lxd image retrieval
- sstreams_server: https://us.images.linuxcontainers.org:8443
- # keep base image, avoids downloading again next run
- cache_base_image: true
- # lxd images from linuxcontainers.org do not have the nocloud seed
- # templates in place, so the image metadata must be modified
- override_templates: true
- # arg overrides to set image up
- setup_overrides:
- # lxd images from linuxcontainers.org do not come with
- # cloud-init, so must pull cloud-init in from repo using
- # setup_image.upgrade
- upgrade: true
- azurecloud:
- boot_timeout: 300
-
-features:
- # all currently supported feature flags
- all:
- - apt # image supports apt package manager
- - byobu # byobu is available in repositories
- - landscape # landscape-client available in repos
- - lxd # lxd is available in the image
- - ppa # image supports ppas
- - rpm # image supports rpms
- - snap # supports snapd
- # NOTE: the following feature flags are to work around bugs in the
- # images, and can be removed when no longer needed
- - hostname # setting system hostname works
- # NOTE: the following feature flags are to work around issues in the
- # testcases, and can be removed when no longer needed
- - apt_src_cont # default contents and format of sources.list matches
- # ubuntu sources.list
- - apt_hist_fmt # apt command history entries use full paths to apt
- # executable rather than relative paths
- - daylight_time # timezones are daylight not standard time
- - apt_up_out # 'Calculating upgrade..' present in log output from
- # apt-get dist-upgrade output
- - engb_locale # locale en_GB.UTF-8 is available
- - locale_gen # the /etc/locale.gen file exists
- - no_ntpdate # 'ntpdate' is not installed by default
- - no_file_fmt_e # the 'file' utility does not have a formatting error
- - ppa_file_name # the name of the source file added to sources.list.d has
- # the expected format for newer ubuntu releases
- - sshd # requires ssh server to be installed by default
- - ssh_key_fmt # ssh auth keys printed to console have expected format
- - syslog # test case requires syslog to be written by default
- - ubuntu_ntp # expect ubuntu.pool.ntp.org to be used as ntp server
- - ubuntu_repos # test case requres ubuntu repositories to be used
- - ubuntu_user # test case needs user with the name 'ubuntu' to exist
- # NOTE: the following feature flags are to work around issues that may
- # be considered bugs in cloud-init
- - lsb_release # image has lsb_release installed, maybe should install
- # if missing by default
- - sudo # image has sudo installed, should not be required
- # feature flag groups
- groups:
- base:
- hostname: true
- no_file_fmt_e: true
- ubuntu_specific:
- apt_src_cont: true
- apt_hist_fmt: true
- byobu: true
- daylight_time: true
- engb_locale: true
- landscape: true
- locale_gen: true
- lsb_release: true
- lxd: true
- ppa: true
- ppa_file_name: true
- snap: true
- sshd: true
- ssh_key_fmt: true
- sudo: true
- syslog: true
- ubuntu_ntp: true
- ubuntu_repos: true
- ubuntu_user: true
- debian_base:
- apt: true
- apt_up_out: true
- no_ntpdate: true
- rhel_base:
- rpm: true
-
-releases:
- # UBUNTU =================================================================
- impish:
- # EOL: July 2022
- default:
- enabled: true
- release: impish
- version: "21.10"
- os: ubuntu
- feature_groups:
- - base
- - debian_base
- - ubuntu_specific
- lxd:
- sstreams_server: https://cloud-images.ubuntu.com/daily
- alias: impish
- setup_overrides: null
- override_templates: false
-
- hirsute:
- # EOL: Jan 2022
- default:
- enabled: true
- release: hirsute
- version: "21.04"
- os: ubuntu
- feature_groups:
- - base
- - debian_base
- - ubuntu_specific
- lxd:
- sstreams_server: https://cloud-images.ubuntu.com/daily
- alias: hirsute
- setup_overrides: null
- override_templates: false
- groovy:
- # EOL: Jul 2021
- default:
- enabled: true
- release: groovy
- version: "20.10"
- os: ubuntu
- feature_groups:
- - base
- - debian_base
- - ubuntu_specific
- lxd:
- sstreams_server: https://cloud-images.ubuntu.com/daily
- alias: groovy
- setup_overrides: null
- override_templates: false
- focal:
- # EOL: Apr 2025
- default:
- enabled: true
- release: focal
- version: "20.04"
- os: ubuntu
- feature_groups:
- - base
- - debian_base
- - ubuntu_specific
- lxd:
- sstreams_server: https://cloud-images.ubuntu.com/daily
- alias: focal
- setup_overrides: null
- override_templates: false
- eoan:
- # EOL: Jul 2020
- default:
- enabled: true
- release: eoan
- version: "19.10"
- os: ubuntu
- feature_groups:
- - base
- - debian_base
- - ubuntu_specific
- lxd:
- sstreams_server: https://cloud-images.ubuntu.com/daily
- alias: eoan
- setup_overrides: null
- override_templates: false
- disco:
- # EOL: Jan 2020
- default:
- enabled: true
- release: disco
- version: "19.04"
- os: ubuntu
- feature_groups:
- - base
- - debian_base
- - ubuntu_specific
- lxd:
- sstreams_server: https://cloud-images.ubuntu.com/daily
- alias: disco
- setup_overrides: null
- override_templates: false
- cosmic:
- # EOL: Jul 2019
- default:
- enabled: true
- release: cosmic
- version: "18.10"
- os: ubuntu
- feature_groups:
- - base
- - debian_base
- - ubuntu_specific
- lxd:
- sstreams_server: https://cloud-images.ubuntu.com/daily
- alias: cosmic
- setup_overrides: null
- override_templates: false
- bionic:
- # EOL: Apr 2023
- default:
- enabled: true
- release: bionic
- version: "18.04"
- os: ubuntu
- feature_groups:
- - base
- - debian_base
- - ubuntu_specific
- lxd:
- sstreams_server: https://cloud-images.ubuntu.com/daily
- alias: bionic
- setup_overrides: null
- override_templates: false
- artful:
- # EOL: Jul 2018
- default:
- enabled: true
- release: artful
- version: "17.10"
- os: ubuntu
- feature_groups:
- - base
- - debian_base
- - ubuntu_specific
- lxd:
- sstreams_server: https://cloud-images.ubuntu.com/daily
- alias: artful
- setup_overrides: null
- override_templates: false
- xenial:
- # EOL: Apr 2021
- default:
- enabled: true
- release: xenial
- version: "16.04"
- os: ubuntu
- feature_groups:
- - base
- - debian_base
- - ubuntu_specific
- lxd:
- sstreams_server: https://cloud-images.ubuntu.com/daily
- alias: xenial
- setup_overrides: null
- override_templates: false
- trusty:
- # EOL: Apr 2019
- default:
- enabled: true
- release: trusty
- version: "14.04"
- os: ubuntu
- feature_groups:
- - base
- - debian_base
- - ubuntu_specific
- features:
- apt_up_out: false
- locale_gen: false
- lxd: false
- ppa_file_name: false
- snap: false
- ssh_key_fmt: false
- no_ntpdate: false
- no_file_fmt_e: false
- system_ready_script: |
- #!/bin/bash
- # upstart based, so use old style runlevels
- [ $(runlevel | awk '{print $2}') = '2' ]
- lxd:
- sstreams_server: https://cloud-images.ubuntu.com/daily
- alias: trusty
- setup_overrides: null
- override_templates: false
- # DEBIAN =================================================================
- stretch:
- # EOL: Not yet released
- default:
- enabled: true
- feature_groups:
- - base
- - debian_base
- lxd:
- alias: debian/stretch/default
- jessie:
- # EOL: Jun 2020
- # NOTE: the cloud-init version shipped with jessie is out of date
- # tests work if an up to date deb is used
- default:
- enabled: true
- feature_groups:
- - base
- - debian_base
- lxd:
- alias: debian/jessie/default
- # CENTOS =================================================================
- centos70:
- # EOL: Jun 2024 (2020 - end of full updates)
- default:
- enabled: true
- feature_groups:
- - base
- - rhel_base
- user_data_overrides:
- preserve_hostname: true
- lxd:
- features:
- # NOTE: (LP: #1575779)
- hostname: false
- alias: centos/7/default
- centos66:
- # EOL: Nov 2020
- default:
- enabled: true
- feature_groups:
- - base
- - rhel_base
- # still supported, but only bugfixes after may 2017
- system_ready_script: |
- #!/bin/bash
- [ $(runlevel | awk '{print $2}') = '3' ]
- user_data_overrides:
- preserve_hostname: true
- lxd:
- features:
- # NOTE: (LP: #1575779)
- hostname: false
- alias: centos/6/default
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/run_funcs.py b/tests/cloud_tests/run_funcs.py
deleted file mode 100644
index 8ae91120..00000000
--- a/tests/cloud_tests/run_funcs.py
+++ /dev/null
@@ -1,75 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Run functions."""
-
-import os
-
-from tests.cloud_tests import bddeb, collect, util, verify
-
-
-def tree_collect(args):
- """Collect data using deb build from current tree.
-
- @param args: cmdline args
- @return_value: fail count
- """
- failed = 0
- tmpdir = util.TempDir(tmpdir=args.data_dir, preserve=args.preserve_data)
-
- with tmpdir as data_dir:
- args.data_dir = data_dir
- args.deb = os.path.join(tmpdir.tmpdir, 'cloud-init_all.deb')
- try:
- failed += bddeb.bddeb(args)
- failed += collect.collect(args)
- except Exception:
- failed += 1
- raise
-
- return failed
-
-
-def tree_run(args):
- """Run test suite using deb build from current tree.
-
- @param args: cmdline args
- @return_value: fail count
- """
- failed = 0
- tmpdir = util.TempDir(tmpdir=args.data_dir, preserve=args.preserve_data)
-
- with tmpdir as data_dir:
- args.data_dir = data_dir
- args.deb = os.path.join(tmpdir.tmpdir, 'cloud-init_all.deb')
- try:
- failed += bddeb.bddeb(args)
- failed += collect.collect(args)
- failed += verify.verify(args)
- except Exception:
- failed += 1
- raise
-
- return failed
-
-
-def run(args):
- """Run test suite.
-
- @param args: cmdline args
- @return_value: fail count
- """
- failed = 0
- tmpdir = util.TempDir(tmpdir=args.data_dir, preserve=args.preserve_data)
-
- with tmpdir as data_dir:
- args.data_dir = data_dir
- try:
- failed += collect.collect(args)
- failed += verify.verify(args)
- except Exception:
- failed += 1
- raise
-
- return failed
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/setup_image.py b/tests/cloud_tests/setup_image.py
deleted file mode 100644
index 69e66e3f..00000000
--- a/tests/cloud_tests/setup_image.py
+++ /dev/null
@@ -1,237 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Setup image for testing."""
-
-from functools import partial
-import os
-import yaml
-
-from tests.cloud_tests import LOG
-from tests.cloud_tests import stage, util
-
-
-def installed_package_version(image, package, ensure_installed=True):
- """Get installed version of package.
-
- @param image: cloud_tests.images instance to operate on
- @param package: name of package
- @param ensure_installed: raise error if not installed
- @return_value: cloud-init version string
- """
- os_family = util.get_os_family(image.properties['os'])
- if os_family == 'debian':
- cmd = ['dpkg-query', '-W', "--showformat=${Version}", package]
- elif os_family == 'redhat':
- cmd = ['rpm', '-q', '--queryformat', "'%{VERSION}'", package]
- else:
- raise NotImplementedError
-
- return image.execute(
- cmd, description='query version for package: {}'.format(package),
- rcs=(0,) if ensure_installed else range(0, 256))[0].strip()
-
-
-def install_deb(args, image):
- """Install deb into image.
-
- @param args: cmdline arguments, must contain --deb
- @param image: cloud_tests.images instance to operate on
- @return_value: None, may raise errors
- """
- # ensure system is compatible with package format
- os_family = util.get_os_family(image.properties['os'])
- if os_family != 'debian':
- raise NotImplementedError('install deb: {} not supported on os '
- 'family: {}'.format(args.deb, os_family))
-
- # install deb
- msg = 'install deb: "{}" into target'.format(args.deb)
- LOG.debug(msg)
- remote_path = os.path.join('/tmp', os.path.basename(args.deb))
- image.push_file(args.deb, remote_path)
- image.execute(
- ['apt-get', 'install', '--allow-downgrades', '--assume-yes',
- remote_path], description=msg)
- # check installed deb version matches package
- fmt = ['-W', "--showformat=${Version}"]
- out = image.execute(['dpkg-deb'] + fmt + [remote_path])[0]
- expected_version = out.strip()
- found_version = installed_package_version(image, 'cloud-init')
- if expected_version != found_version:
- raise OSError('install deb version "{}" does not match expected "{}"'
- .format(found_version, expected_version))
-
- LOG.debug('successfully installed: %s, version: %s', args.deb,
- found_version)
-
-
-def install_rpm(args, image):
- """Install rpm into image.
-
- @param args: cmdline arguments, must contain --rpm
- @param image: cloud_tests.images instance to operate on
- @return_value: None, may raise errors
- """
- os_family = util.get_os_family(image.properties['os'])
- if os_family != 'redhat':
- raise NotImplementedError('install rpm: {} not supported on os '
- 'family: {}'.format(args.rpm, os_family))
-
- # install rpm
- msg = 'install rpm: "{}" into target'.format(args.rpm)
- LOG.debug(msg)
- remote_path = os.path.join('/tmp', os.path.basename(args.rpm))
- image.push_file(args.rpm, remote_path)
- image.execute(['rpm', '-U', remote_path], description=msg)
-
- fmt = ['--queryformat', '"%{VERSION}"']
- (out, _err, _exit) = image.execute(['rpm', '-q'] + fmt + [remote_path])
- expected_version = out.strip()
- found_version = installed_package_version(image, 'cloud-init')
- if expected_version != found_version:
- raise OSError('install rpm version "{}" does not match expected "{}"'
- .format(found_version, expected_version))
-
- LOG.debug('successfully installed: %s, version %s', args.rpm,
- found_version)
-
-
-def upgrade(args, image):
- """Upgrade or install cloud-init from repo.
-
- @param args: cmdline arguments
- @param image: cloud_tests.images instance to operate on
- @return_value: None, may raise errors
- """
- os_family = util.get_os_family(image.properties['os'])
- if os_family == 'debian':
- cmd = 'apt-get update && apt-get install cloud-init --yes'
- elif os_family == 'redhat':
- cmd = 'sleep 10 && yum install cloud-init --assumeyes'
- else:
- raise NotImplementedError
-
- msg = 'upgrading cloud-init'
- LOG.debug(msg)
- image.execute(cmd, description=msg)
-
-
-def upgrade_full(args, image):
- """Run the system's full upgrade command.
-
- @param args: cmdline arguments
- @param image: cloud_tests.images instance to operate on
- @return_value: None, may raise errors
- """
- os_family = util.get_os_family(image.properties['os'])
- if os_family == 'debian':
- cmd = 'apt-get update && apt-get upgrade --yes'
- elif os_family == 'redhat':
- cmd = 'yum upgrade --assumeyes'
- else:
- raise NotImplementedError('upgrade command not configured for distro '
- 'from family: {}'.format(os_family))
-
- msg = 'full system upgrade'
- LOG.debug(msg)
- image.execute(cmd, description=msg)
-
-
-def run_script(args, image):
- """Run a script in the target image.
-
- @param args: cmdline arguments, must contain --script
- @param image: cloud_tests.images instance to operate on
- @return_value: None, may raise errors
- """
- msg = 'run setup image script in target image'
- LOG.debug(msg)
- image.run_script(args.script, description=msg)
-
-
-def enable_ppa(args, image):
- """Enable a ppa in the target image.
-
- @param args: cmdline arguments, must contain --ppa
- @param image: cloud_tests.image instance to operate on
- @return_value: None, may raise errors
- """
- # ppa only supported on ubuntu (maybe debian?)
- if image.properties['os'].lower() != 'ubuntu':
- raise NotImplementedError('enabling a ppa is only available on ubuntu')
-
- # add ppa with add-apt-repository and update
- ppa = 'ppa:{}'.format(args.ppa)
- msg = 'enable ppa: "{}" in target'.format(ppa)
- LOG.debug(msg)
- cmd = 'add-apt-repository --yes {} && apt-get update'.format(ppa)
- image.execute(cmd, description=msg)
-
-
-def enable_repo(args, image):
- """Enable a repository in the target image.
-
- @param args: cmdline arguments, must contain --repo
- @param image: cloud_tests.image instance to operate on
- @return_value: None, may raise errors
- """
- # find enable repo command for the distro
- os_family = util.get_os_family(image.properties['os'])
- if os_family == 'debian':
- cmd = ('echo "{}" >> "/etc/apt/sources.list" '.format(args.repo) +
- '&& apt-get update')
- elif os_family == 'centos':
- cmd = 'yum-config-manager --add-repo="{}"'.format(args.repo)
- else:
- raise NotImplementedError('enable repo command not configured for '
- 'distro from family: {}'.format(os_family))
-
- msg = 'enable repo: "{}" in target'.format(args.repo)
- LOG.debug(msg)
- image.execute(cmd, description=msg)
-
-
-def setup_image(args, image):
- """Set up image as specified in args.
-
- @param args: cmdline arguments
- @param image: cloud_tests.image instance to operate on
- @return_value: tuple of results and fail count
- """
- # update the args if necessary for this image
- overrides = image.setup_overrides
- LOG.debug('updating args for setup with: %s', overrides)
- args = util.update_args(args, overrides, preserve_old=True)
-
- # mapping of setup cmdline arg name to setup function
- # represented as a tuple rather than a dict or odict as lookup by name not
- # needed, and order is important as --script and --upgrade go at the end
- handlers = (
- # arg handler description
- ('deb', install_deb, 'setup func for --deb, install deb'),
- ('rpm', install_rpm, 'setup func for --rpm, install rpm'),
- ('repo', enable_repo, 'setup func for --repo, enable repo'),
- ('ppa', enable_ppa, 'setup func for --ppa, enable ppa'),
- ('script', run_script, 'setup func for --script, run script'),
- ('upgrade', upgrade, 'setup func for --upgrade, upgrade cloud-init'),
- ('upgrade-full', upgrade_full, 'setup func for --upgrade-full'),
- )
-
- # determine which setup functions needed
- calls = [partial(stage.run_single, desc, partial(func, args, image))
- for name, func, desc in handlers if getattr(args, name, None)]
-
- try:
- data = yaml.safe_load(
- image.read_data("/etc/cloud/build.info", decode=True))
- info = ' '.join(["%s=%s" % (k, data.get(k))
- for k in ("build_name", "serial") if k in data])
- except Exception as e:
- info = "N/A (%s)" % e
-
- LOG.info('setting up image %s (info %s)', image, info)
- res = stage.run_stage(
- 'set up for {}'.format(image), calls, continue_after_error=False)
- return res
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/stage.py b/tests/cloud_tests/stage.py
deleted file mode 100644
index d64a1dcc..00000000
--- a/tests/cloud_tests/stage.py
+++ /dev/null
@@ -1,116 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Stage a run."""
-
-import sys
-import time
-import traceback
-
-from tests.cloud_tests import LOG
-
-
-class PlatformComponent(object):
- """Context manager to safely handle platform components."""
-
- def __init__(self, get_func, preserve_instance=False):
- """Store get_<platform component> function as partial with no args.
-
- @param get_func: Callable returning an instance from the platform.
- @param preserve_instance: Boolean, when True, do not destroy instance
- after test. Used for test development.
- """
- self.get_func = get_func
- self.preserve_instance = preserve_instance
-
- def __enter__(self):
- """Create instance of platform component."""
- self.instance = self.get_func()
- return self.instance
-
- def __exit__(self, etype, value, trace):
- """Destroy instance."""
- if self.instance is not None:
- if self.preserve_instance:
- LOG.info('Preserving test instance %s', self.instance.name)
- else:
- self.instance.destroy()
-
-
-def run_single(name, call):
- """Run a single function, keeping track of results and time.
-
- @param name: name of part
- @param call: call to make
- @return_value: a tuple of result and fail count
- """
- res = {
- 'name': name,
- 'time': 0,
- 'errors': [],
- 'success': False
- }
- failed = 0
- start_time = time.time()
-
- try:
- call()
- except Exception as e:
- failed += 1
- res['errors'].append(str(e))
- LOG.error('stage part: %s encountered error: %s', name, str(e))
- trace = traceback.extract_tb(sys.exc_info()[-1])
- LOG.error('traceback:\n%s', ''.join(traceback.format_list(trace)))
-
- res['time'] = time.time() - start_time
- if failed == 0:
- res['success'] = True
-
- return res, failed
-
-
-def run_stage(parent_name, calls, continue_after_error=True):
- """Run a stage of collection, keeping track of results and failures.
-
- @param parent_name: name of stage calls are under
- @param calls: list of function call taking no params. must return a tuple
- of results and failures. may raise exceptions
- @param continue_after_error: whether or not to proceed to the next call
- after catching an exception or recording a
- failure
- @return_value: a tuple of results and failures, with result containing
- results from the function call under 'stages', and a list
- of errors (if any on this level), and elapsed time
- running stage, and the name
- """
- res = {
- 'name': parent_name,
- 'time': 0,
- 'errors': [],
- 'stages': [],
- 'success': False,
- }
- failed = 0
- start_time = time.time()
-
- for call in calls:
- try:
- (call_res, call_failed) = call()
- res['stages'].append(call_res)
- except Exception as e:
- call_failed = 1
- res['errors'].append(str(e))
- LOG.error('stage: %s encountered error: %s', parent_name, str(e))
- trace = traceback.extract_tb(sys.exc_info()[-1])
- LOG.error('traceback:\n%s', ''.join(traceback.format_list(trace)))
-
- failed += call_failed
- if call_failed and not continue_after_error:
- break
-
- res['time'] = time.time() - start_time
- if not failed:
- res['success'] = True
-
- return (res, failed)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases.yaml b/tests/cloud_tests/testcases.yaml
deleted file mode 100644
index fb9a5d27..00000000
--- a/tests/cloud_tests/testcases.yaml
+++ /dev/null
@@ -1,50 +0,0 @@
-# ============================= Base Test Config ==============================
-base_test_data:
- script_timeout: 20
- enabled: True
- required_features: []
- cloud_config: |
- #cloud-config
- collect_scripts:
- cloud-init.log: |
- #!/bin/sh
- cat /var/log/cloud-init.log
- cloud-init-output.log: |
- #!/bin/sh
- cat /var/log/cloud-init-output.log
- instance-id: |
- #!/bin/sh
- cat /run/cloud-init/.instance-id
- instance-data.json: |
- #!/bin/sh
- cat /run/cloud-init/instance-data.json
- result.json: |
- #!/bin/sh
- cat /run/cloud-init/result.json
- status.json: |
- #!/bin/sh
- cat /run/cloud-init/status.json
- package-versions: |
- #!/bin/sh
- dpkg-query --show
- build.info: |
- #!/bin/sh
- binfo=/etc/cloud/build.info
- [ -f "$binfo" ] && cat "$binfo" || echo "N/A"
- system.journal.gz: |
- #!/bin/sh
- [ -d /run/systemd ] || { echo "not systemd."; exit 0; }
- fail() { echo "ERROR:" "$@" 1>&2; exit 1; }
- journal=""
- for d in /run/log/journal /var/log/journal; do
- for f in $d/*/system.journal; do
- [ -f "$f" ] || continue
- [ -z "$journal" ] ||
- fail "multiple journal found: $f $journal."
- journal="$f"
- done
- done
- [ -f "$journal" ] || fail "no journal file found."
- gzip --to-stdout "$journal"
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/__init__.py b/tests/cloud_tests/testcases/__init__.py
deleted file mode 100644
index bb9785d3..00000000
--- a/tests/cloud_tests/testcases/__init__.py
+++ /dev/null
@@ -1,73 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Main init."""
-
-import importlib
-import inspect
-import unittest
-
-from cloudinit.util import read_conf
-
-from tests.cloud_tests import config
-from tests.cloud_tests.testcases.base import CloudTestCase as base_test
-
-
-def discover_test(test_name):
- """Discover tests in test file for 'testname'.
-
- @return_value: list of test classes
- """
- testmod_name = 'tests.cloud_tests.testcases.{}'.format(
- config.name_sanitize(test_name))
- try:
- testmod = importlib.import_module(testmod_name)
- except NameError as e:
- raise ValueError(
- 'no test verifier found at: {}'.format(testmod_name)
- ) from e
-
- found = [mod for name, mod in inspect.getmembers(testmod)
- if (inspect.isclass(mod)
- and base_test in inspect.getmro(mod)
- and getattr(mod, '__test__', True))]
- if len(found) != 1:
- raise RuntimeError(
- "Unexpected situation, multiple tests for %s: %s" % (
- test_name, found))
-
- return found
-
-
-def get_test_class(test_name, test_data, test_conf):
- test_class = discover_test(test_name)[0]
-
- class DynamicTestSubclass(test_class):
-
- _realclass = test_class
- data = test_data
- conf = test_conf
- release_conf = read_conf(config.RELEASES_CONF)['releases']
-
- def __str__(self):
- return "%s (%s)" % (self._testMethodName,
- unittest.util.strclass(self._realclass))
-
- @classmethod
- def setUpClass(cls):
- cls.maybeSkipTest()
-
- return DynamicTestSubclass
-
-
-def get_suite(test_name, data, conf):
- """Get test suite with all tests for 'testname'.
-
- @return_value: a test suite
- """
- suite = unittest.TestSuite()
- suite.addTest(
- unittest.defaultTestLoader.loadTestsFromTestCase(
- get_test_class(test_name, data, conf)))
- return suite
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/base.py b/tests/cloud_tests/testcases/base.py
deleted file mode 100644
index 4448e0b5..00000000
--- a/tests/cloud_tests/testcases/base.py
+++ /dev/null
@@ -1,385 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Base test case module."""
-
-import crypt
-import json
-import re
-import unittest
-
-
-from cloudinit import util as c_util
-
-SkipTest = unittest.SkipTest
-
-
-class CloudTestCase(unittest.TestCase):
- """Base test class for verifiers."""
-
- # data gets populated in get_suite.setUpClass
- data = {}
- conf = None
- _cloud_config = None
- release_conf = {} # The platform's os release configuration
-
- expected_warnings = () # Subclasses set to ignore expected WARN logs
-
- @property
- def os_cfg(self):
- return self.release_conf[self.os_name]['default']
-
- def is_distro(self, distro_name):
- return self.os_cfg['os'] == distro_name
-
- @classmethod
- def maybeSkipTest(cls):
- """Present to allow subclasses to override and raise a skipTest."""
-
- def assertPackageInstalled(self, name, version=None):
- """Check dpkg-query --show output for matching package name.
-
- @param name: package base name
- @param version: string representing a package version or part of a
- version.
- """
- pkg_out = self.get_data_file('package-versions')
- pkg_match = re.search(
- '^%s\t(?P<version>.*)$' % name, pkg_out, re.MULTILINE)
- if pkg_match:
- installed_version = pkg_match.group('version')
- if not version:
- return # Success
- if installed_version.startswith(version):
- return # Success
- raise AssertionError(
- 'Expected package version %s-%s not found. Found %s' %
- name, version, installed_version)
- raise AssertionError('Package not installed: %s' % name)
-
- def os_version_cmp(self, cmp_version):
- """Compare the version of the test to comparison_version.
-
- @param: cmp_version: Either a float or a string representing
- a release os from releases.yaml (e.g. centos66)
-
- @return: -1 when version < cmp_version, 0 when version=cmp_version and
- 1 when version > cmp_version.
- """
- version = self.release_conf[self.os_name]['default']['version']
- if isinstance(cmp_version, str):
- cmp_version = self.release_conf[cmp_version]['default']['version']
- if version < cmp_version:
- return -1
- elif version == cmp_version:
- return 0
- else:
- return 1
-
- @property
- def os_name(self):
- return self.data.get('os_name', 'UNKNOWN')
-
- @property
- def platform(self):
- return self.data.get('platform', 'UNKNOWN')
-
- @property
- def cloud_config(self):
- """Get the cloud-config used by the test."""
- if not self._cloud_config:
- self._cloud_config = c_util.load_yaml(self.conf)
- return self._cloud_config
-
- def get_config_entry(self, name):
- """Get a config entry from cloud-config ensuring that it is present."""
- if name not in self.cloud_config:
- raise AssertionError('Key "{}" not in cloud config'.format(name))
- return self.cloud_config[name]
-
- def get_data_file(self, name, decode=True):
- """Get data file failing test if it is not present."""
- if name not in self.data:
- raise AssertionError('File "{}" missing from collect data'
- .format(name))
- if not decode:
- return self.data[name]
- return self.data[name].decode('utf-8')
-
- def get_instance_id(self):
- """Get recorded instance id."""
- return self.get_data_file('instance-id').strip()
-
- def get_status_data(self, data, version=None):
- """Parse result.json and status.json like data files.
-
- @param data: data to load
- @param version: cloud-init output version, defaults to 'v1'
- @return_value: dict of data or None if missing
- """
- if not version:
- version = 'v1'
- data = json.loads(data)
- return data.get(version)
-
- def get_datasource(self):
- """Get datasource name."""
- data = self.get_status_data(self.get_data_file('result.json'))
- return data.get('datasource')
-
- def test_no_stages_errors(self):
- """Ensure that there were no errors in any stage."""
- status = self.get_status_data(self.get_data_file('status.json'))
- for stage in ('init', 'init-local', 'modules-config', 'modules-final'):
- self.assertIn(stage, status)
- self.assertEqual(len(status[stage]['errors']), 0,
- 'errors {} were encountered in stage {}'
- .format(status[stage]['errors'], stage))
- result = self.get_status_data(self.get_data_file('result.json'))
- self.assertEqual(len(result['errors']), 0)
-
- def test_no_warnings_in_log(self):
- """Unexpected warnings should not be found in the log."""
- warnings = [
- line for line in self.get_data_file('cloud-init.log').splitlines()
- if 'WARN' in line]
- joined_warnings = '\n'.join(warnings)
- for expected_warning in self.expected_warnings:
- self.assertIn(
- expected_warning, joined_warnings,
- msg="Did not find %s in cloud-init.log" % expected_warning)
- # Prune expected from discovered warnings
- warnings = [w for w in warnings if expected_warning not in w]
- self.assertEqual(
- [], warnings, msg="'WARN' found inside cloud-init.log")
-
- def test_instance_data_json_ec2(self):
- """Validate instance-data.json content by ec2 platform.
-
- This content is sourced by snapd when determining snapstore endpoints.
- We validate expected values per cloud type to ensure we don't break
- snapd.
- """
- if self.platform != 'ec2':
- raise SkipTest(
- 'Skipping ec2 instance-data.json on %s' % self.platform)
- out = self.get_data_file('instance-data.json')
- if not out:
- if self.is_distro('ubuntu') and self.os_version_cmp('bionic') >= 0:
- raise AssertionError(
- 'No instance-data.json found on %s' % self.os_name)
- raise SkipTest(
- 'Skipping instance-data.json test.'
- ' OS: %s not bionic or newer' % self.os_name)
- instance_data = json.loads(out)
- self.assertCountEqual(['merged_cfg'], instance_data['sensitive_keys'])
- ds = instance_data.get('ds', {})
- v1_data = instance_data.get('v1', {})
- metadata = ds.get('meta-data', {})
- macs = metadata.get(
- 'network', {}).get('interfaces', {}).get('macs', {})
- if not macs:
- raise AssertionError('No network data from EC2 meta-data')
- # Check meta-data items we depend on
- expected_net_keys = [
- 'public-ipv4s', 'ipv4-associations', 'local-hostname',
- 'public-hostname']
- for mac_data in macs.values():
- for key in expected_net_keys:
- self.assertIn(key, mac_data)
- self.assertIsNotNone(
- metadata.get('placement', {}).get('availability-zone'),
- 'Could not determine EC2 Availability zone placement')
- self.assertIsNotNone(
- v1_data['availability_zone'], 'expected ec2 availability_zone')
- self.assertEqual('aws', v1_data['cloud_name'])
- self.assertEqual('ec2', v1_data['platform'])
- self.assertEqual(
- 'metadata (http://169.254.169.254)', v1_data['subplatform'])
- self.assertIn('i-', v1_data['instance_id'])
- self.assertIn('ip-', v1_data['local_hostname'])
- self.assertIsNotNone(v1_data['region'], 'expected ec2 region')
- self.assertIsNotNone(
- re.match(r'\d\.\d+\.\d+-\d+-aws', v1_data['kernel_release']))
- self.assertEqual(
- 'redacted for non-root user', instance_data['merged_cfg'])
- self.assertEqual(self.os_cfg['os'], v1_data['variant'])
- self.assertEqual(self.os_cfg['os'], v1_data['distro'])
- self.assertEqual(
- self.os_cfg['os'], instance_data["sys_info"]['dist'][0],
- "Unexpected sys_info dist value")
- self.assertEqual(self.os_name, v1_data['distro_release'])
- self.assertEqual(
- str(self.os_cfg['version']), v1_data['distro_version'])
- self.assertEqual('x86_64', v1_data['machine'])
- self.assertIsNotNone(
- re.match(r'3.\d\.\d', v1_data['python_version']),
- "unexpected python version: {ver}".format(
- ver=v1_data["python_version"]))
-
- def test_instance_data_json_lxd(self):
- """Validate instance-data.json content by lxd platform.
-
- This content is sourced by snapd when determining snapstore endpoints.
- We validate expected values per cloud type to ensure we don't break
- snapd.
- """
- if self.platform != 'lxd':
- raise SkipTest(
- 'Skipping lxd instance-data.json on %s' % self.platform)
- out = self.get_data_file('instance-data.json')
- if not out:
- if self.is_distro('ubuntu') and self.os_version_cmp('bionic') >= 0:
- raise AssertionError(
- 'No instance-data.json found on %s' % self.os_name)
- raise SkipTest(
- 'Skipping instance-data.json test.'
- ' OS: %s not bionic or newer' % self.os_name)
- instance_data = json.loads(out)
- v1_data = instance_data.get('v1', {})
- self.assertCountEqual([], sorted(instance_data['base64_encoded_keys']))
- self.assertEqual('unknown', v1_data['cloud_name'])
- self.assertEqual('lxd', v1_data['platform'])
- self.assertEqual(
- 'seed-dir (/var/lib/cloud/seed/nocloud-net)',
- v1_data['subplatform'])
- self.assertIsNone(
- v1_data['availability_zone'],
- 'found unexpected lxd availability_zone %s' %
- v1_data['availability_zone'])
- self.assertIn('cloud-test', v1_data['instance_id'])
- self.assertIn('cloud-test', v1_data['local_hostname'])
- self.assertIsNone(
- v1_data['region'],
- 'found unexpected lxd region %s' % v1_data['region'])
- self.assertIsNotNone(
- re.match(r'\d\.\d+\.\d+-\d+', v1_data['kernel_release']))
- self.assertEqual(
- 'redacted for non-root user', instance_data['merged_cfg'])
- self.assertEqual(self.os_cfg['os'], v1_data['variant'])
- self.assertEqual(self.os_cfg['os'], v1_data['distro'])
- self.assertEqual(
- self.os_cfg['os'], instance_data["sys_info"]['dist'][0],
- "Unexpected sys_info dist value")
- self.assertEqual(self.os_name, v1_data['distro_release'])
- self.assertEqual(
- str(self.os_cfg['version']), v1_data['distro_version'])
- self.assertEqual('x86_64', v1_data['machine'])
- self.assertIsNotNone(
- re.match(r'3.\d\.\d', v1_data['python_version']),
- "unexpected python version: {ver}".format(
- ver=v1_data["python_version"]))
-
- def test_instance_data_json_kvm(self):
- """Validate instance-data.json content by nocloud-kvm platform.
-
- This content is sourced by snapd when determining snapstore endpoints.
- We validate expected values per cloud type to ensure we don't break
- snapd.
- """
- if self.platform != 'nocloud-kvm':
- raise SkipTest(
- 'Skipping nocloud-kvm instance-data.json on %s' %
- self.platform)
- out = self.get_data_file('instance-data.json')
- if not out:
- if self.is_distro('ubuntu') and self.os_version_cmp('bionic') >= 0:
- raise AssertionError(
- 'No instance-data.json found on %s' % self.os_name)
- raise SkipTest(
- 'Skipping instance-data.json test.'
- ' OS: %s not bionic or newer' % self.os_name)
- instance_data = json.loads(out)
- v1_data = instance_data.get('v1', {})
- self.assertCountEqual([], instance_data['base64_encoded_keys'])
- self.assertEqual('unknown', v1_data['cloud_name'])
- self.assertEqual('nocloud', v1_data['platform'])
- subplatform = v1_data['subplatform']
- self.assertIsNotNone(
- re.match(r'config-disk \(\/dev\/[a-z]{3}\)', subplatform),
- 'kvm subplatform "%s" != "config-disk (/dev/...)"' % subplatform)
- self.assertIsNone(
- v1_data['availability_zone'],
- 'found unexpected kvm availability_zone %s' %
- v1_data['availability_zone'])
- self.assertIsNotNone(
- re.match(r'[\da-f]{8}(-[\da-f]{4}){3}-[\da-f]{12}',
- v1_data['instance_id']),
- 'kvm instance_id is not a UUID: %s' % v1_data['instance_id'])
- self.assertIn('ubuntu', v1_data['local_hostname'])
- self.assertIsNone(
- v1_data['region'],
- 'found unexpected lxd region %s' % v1_data['region'])
- self.assertIsNotNone(
- re.match(r'\d\.\d+\.\d+-\d+', v1_data['kernel_release']))
- self.assertEqual(
- 'redacted for non-root user', instance_data['merged_cfg'])
- self.assertEqual(self.os_cfg['os'], v1_data['variant'])
- self.assertEqual(self.os_cfg['os'], v1_data['distro'])
- self.assertEqual(
- self.os_cfg['os'], instance_data["sys_info"]['dist'][0],
- "Unexpected sys_info dist value")
- self.assertEqual(self.os_name, v1_data['distro_release'])
- self.assertEqual(
- str(self.os_cfg['version']), v1_data['distro_version'])
- self.assertEqual('x86_64', v1_data['machine'])
- self.assertIsNotNone(
- re.match(r'3.\d\.\d', v1_data['python_version']),
- "unexpected python version: {ver}".format(
- ver=v1_data["python_version"]))
-
-
-class PasswordListTest(CloudTestCase):
- """Base password test case class."""
-
- def test_shadow_passwords(self):
- """Test shadow passwords."""
- shadow = self.get_data_file('shadow')
- users = {}
- dupes = []
- for line in shadow.splitlines():
- user, encpw = line.split(":")[0:2]
- if user in users:
- dupes.append(user)
- users[user] = encpw
-
- jane_enc = "$5$iW$XsxmWCdpwIW8Yhv.Jn/R3uk6A4UaicfW5Xp7C9p9pg."
- self.assertEqual([], dupes)
- self.assertEqual(jane_enc, users['jane'])
-
- mikey_enc = "$5$xZ$B2YGGEx2AOf4PeW48KC6.QyT1W2B4rZ9Qbltudtha89"
- self.assertEqual(mikey_enc, users['mikey'])
-
- # shadow entry is $N$salt$, so we encrypt with the same format
- # and salt and expect the result.
- tom = "mypassword123!"
- fmtsalt = users['tom'][0:users['tom'].rfind("$") + 1]
- tom_enc = crypt.crypt(tom, fmtsalt)
- self.assertEqual(tom_enc, users['tom'])
-
- harry_enc = ("$6$LF$9Z2p6rWK6TNC1DC6393ec0As.18KRAvKDbfsG"
- "JEdWN3sRQRwpdfoh37EQ3yUh69tP4GSrGW5XKHxMLiKowJgm/")
- dick_enc = "$1$ssisyfpf$YqvuJLfrrW6Cg/l53Pi1n1"
-
- # these should have been changed to random values.
- self.assertNotEqual(harry_enc, users['harry'])
- self.assertTrue(users['harry'].startswith("$"))
- self.assertNotEqual(dick_enc, users['dick'])
- self.assertTrue(users['dick'].startswith("$"))
-
- self.assertNotEqual(users['harry'], users['dick'])
-
- def test_shadow_expected_users(self):
- """Test every tom, dick, and harry user in shadow."""
- out = self.get_data_file('shadow')
- self.assertIn('tom:', out)
- self.assertIn('dick:', out)
- self.assertIn('harry:', out)
- self.assertIn('jane:', out)
- self.assertIn('mikey:', out)
-
- def test_sshd_config(self):
- """Test sshd config allows passwords."""
- out = self.get_data_file('sshd_config')
- self.assertIn('PasswordAuthentication yes', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/bugs/README.md b/tests/cloud_tests/testcases/bugs/README.md
deleted file mode 100644
index 09ce0765..00000000
--- a/tests/cloud_tests/testcases/bugs/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
-# Bug Test Configs
-
-## purpose
-Configs that reproduce bugs filed against cloud-init. Having test configs for
-cloud-init bugs ensures that the fixes do not break in the future, and makes it
-easy to see how many systems and platforms are effected by a new bug.
-
-## structure
-Should have one test config for most bugs filed. The name of the test should
-contain ``lp`` followed by the bug number. It may also be useful to add a
-comment to each bug config with a summary copied from the bug report.
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/bugs/lp1511485.py b/tests/cloud_tests/testcases/bugs/lp1511485.py
deleted file mode 100644
index 670d3aff..00000000
--- a/tests/cloud_tests/testcases/bugs/lp1511485.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestLP1511485(base.CloudTestCase):
- """Test LP# 1511485."""
-
- def test_final_message(self):
- """Test final message exists."""
- out = self.get_data_file('cloud-init-output.log')
- self.assertIn('Final message from cloud-config', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/bugs/lp1511485.yaml b/tests/cloud_tests/testcases/bugs/lp1511485.yaml
deleted file mode 100644
index ebf9763f..00000000
--- a/tests/cloud_tests/testcases/bugs/lp1511485.yaml
+++ /dev/null
@@ -1,11 +0,0 @@
-#
-# LP Bug 1511485: final_message is silent on ubuntu-12.04.5 / cloud-init 0.6.3
-#
-# 2016-11-17: Disabled as covered by module based tests
-#
-enabled: False
-cloud_config: |
- #cloud-config
- final_message: "Final message from cloud-config"
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/bugs/lp1611074.yaml b/tests/cloud_tests/testcases/bugs/lp1611074.yaml
deleted file mode 100644
index 960679d5..00000000
--- a/tests/cloud_tests/testcases/bugs/lp1611074.yaml
+++ /dev/null
@@ -1,8 +0,0 @@
-#
-# LP Bug 1611074: Reformatting of ephemeral drive fails on resize of Azure VM
-#
-# 2016-11-18: Disabled until test written
-#
-enabled: False
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/bugs/lp1628337.py b/tests/cloud_tests/testcases/bugs/lp1628337.py
deleted file mode 100644
index a2c90481..00000000
--- a/tests/cloud_tests/testcases/bugs/lp1628337.py
+++ /dev/null
@@ -1,23 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestLP1628337(base.CloudTestCase):
- """Test LP# 1511485."""
-
- def test_fetch_indices(self):
- """Verify no apt errors."""
- out = self.get_data_file('cloud-init-output.log')
- self.assertNotIn('W: Failed to fetch', out)
- self.assertNotIn('W: Some index files failed to download. '
- 'They have been ignored, or old ones used instead.',
- out)
-
- def test_ntp(self):
- """Verify can find ntp and install it."""
- out = self.get_data_file('cloud-init-output.log')
- self.assertNotIn('E: Unable to locate package ntp', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/bugs/lp1628337.yaml b/tests/cloud_tests/testcases/bugs/lp1628337.yaml
deleted file mode 100644
index e39b3cd8..00000000
--- a/tests/cloud_tests/testcases/bugs/lp1628337.yaml
+++ /dev/null
@@ -1,23 +0,0 @@
-#
-# LP Bug 1628337: cloud-init tries to install NTP before even configuring the archives
-#
-required_features:
- - apt
- - lsb_release
-cloud_config: |
- #cloud-config
- ntp:
- servers: ['ntp.ubuntu.com']
- apt:
- primary:
- - arches: [default]
- uri: http://us.archive.ubuntu.com/ubuntu/
-collect_sciprts:
- ntp.conf: |
- #!/bin/bash
- cat /etc/ntp.conf
- sources.list: |
- #!/bin/bash
- cat /etc/apt/sources.list
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/examples/README.md b/tests/cloud_tests/testcases/examples/README.md
deleted file mode 100644
index 110a223b..00000000
--- a/tests/cloud_tests/testcases/examples/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
-# Example Test Configs
-
-## Purpose
-This folder contains example cloud configs found on
-[cloudinit.readthedocs.io](https://cloudinit.readthedocs.io/en/latest/topics/examples.html).
-Examples covered by other tests, like modules, are excluded from tests here
-to prevent duplication and reduce test time.
-
-## Structure
-One test per example test config on cloudinit.readthedocs.io
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/examples/TODO.md b/tests/cloud_tests/testcases/examples/TODO.md
deleted file mode 100644
index cde699a7..00000000
--- a/tests/cloud_tests/testcases/examples/TODO.md
+++ /dev/null
@@ -1,15 +0,0 @@
-# Missing Examples
-
-Below lists each of the issing examples and why it is not currently added.
-
- - Chef (takes > 60 seconds to run)
- - Puppet (takes > 60 seconds to run)
- - Manage resolve.conf (lxd backend overrides changes)
- - Adding a yum repository (need centos system)
- - Register Red Hat Subscription (need centos system + subscription)
- - Adjust mount points mounted (need multiple disks)
- - Call a url when finished (need end point)
- - Reboot/poweroff when finished (how to test)
- - Disk setup (need multiple disks)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/examples/add_apt_repositories.py b/tests/cloud_tests/testcases/examples/add_apt_repositories.py
deleted file mode 100644
index 71eede97..00000000
--- a/tests/cloud_tests/testcases/examples/add_apt_repositories.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestAptconfigurePrimary(base.CloudTestCase):
- """Example cloud-config test."""
-
- def test_ubuntu_sources(self):
- """Test no default Ubuntu entries exist."""
- out = self.get_data_file('ubuntu.sources.list')
- self.assertEqual(0, int(out))
-
- def test_gatech_sources(self):
- """Test GaTech entires exist."""
- out = self.get_data_file('gatech.sources.list')
- self.assertEqual(20, int(out))
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/examples/add_apt_repositories.yaml b/tests/cloud_tests/testcases/examples/add_apt_repositories.yaml
deleted file mode 100644
index 4b8575f7..00000000
--- a/tests/cloud_tests/testcases/examples/add_apt_repositories.yaml
+++ /dev/null
@@ -1,23 +0,0 @@
-#
-# From cloud config examples on cloudinit.readthedocs.io
-#
-# 2016-11-17: Disabled as covered by module based tests
-#
-enabled: False
-required_features:
- - apt
-cloud_config: |
- #cloud-config
- apt:
- primary:
- - arches: [default]
- uri: "http://www.gtlib.gatech.edu/pub/ubuntu-releases/"
-collect_scripts:
- ubuntu.sources.list: |
- #!/bin/bash
- cat /etc/apt/sources.list | grep -v '^#' | sed '/^\s*$/d' | grep archive.ubuntu.com | wc -l
- gatech.sources.list: |
- #!/bin/bash
- cat /etc/apt/sources.list | grep -v '^#' | sed '/^\s*$/d' | grep gtlib.gatech.edu | wc -l
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/examples/alter_completion_message.py b/tests/cloud_tests/testcases/examples/alter_completion_message.py
deleted file mode 100644
index b7b5d5e0..00000000
--- a/tests/cloud_tests/testcases/examples/alter_completion_message.py
+++ /dev/null
@@ -1,40 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestFinalMessage(base.CloudTestCase):
- """Test cloud init module `cc_final_message`."""
-
- subs_char = '$'
-
- def get_final_message_config(self):
- """Get config for final message."""
- self.assertIn('final_message', self.cloud_config)
- return self.cloud_config['final_message']
-
- def get_final_message(self):
- """Get final message from log."""
- out = self.get_data_file('cloud-init-output.log')
- lines = len(self.get_final_message_config().splitlines())
- return '\n'.join(out.splitlines()[-1 * lines:])
-
- def test_final_message_string(self):
- """Ensure final handles regular strings."""
- for actual, config in zip(
- self.get_final_message().splitlines(),
- self.get_final_message_config().splitlines()):
- if self.subs_char not in config:
- self.assertEqual(actual, config)
-
- def test_final_message_subs(self):
- """Test variable substitution in final message."""
- # TODO: add verification of other substitutions
- patterns = {'$datasource': self.get_datasource()}
- for key, expected in patterns.items():
- index = self.get_final_message_config().splitlines().index(key)
- actual = self.get_final_message().splitlines()[index]
- self.assertEqual(actual, expected)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/examples/alter_completion_message.yaml b/tests/cloud_tests/testcases/examples/alter_completion_message.yaml
deleted file mode 100644
index 9e154f80..00000000
--- a/tests/cloud_tests/testcases/examples/alter_completion_message.yaml
+++ /dev/null
@@ -1,16 +0,0 @@
-#
-# From cloud config examples on cloudinit.readthedocs.io
-#
-# 2016-11-17: Disabled as covered by module based tests
-#
-enabled: False
-cloud_config: |
- #cloud-config
- final_message: |
- This is my final message!
- $version
- $timestamp
- $datasource
- $uptime
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/examples/configure_instance_trusted_ca_certificates.py b/tests/cloud_tests/testcases/examples/configure_instance_trusted_ca_certificates.py
deleted file mode 100644
index 38540eb8..00000000
--- a/tests/cloud_tests/testcases/examples/configure_instance_trusted_ca_certificates.py
+++ /dev/null
@@ -1,27 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestTrustedCA(base.CloudTestCase):
- """Example cloud-config test."""
-
- def test_cert_count_ca(self):
- """Test correct count of CAs in .crt."""
- out = self.get_data_file('cert_count_ca')
- self.assertIn('7 /etc/ssl/certs/ca-certificates.crt', out)
-
- def test_cert_count_cloudinit(self):
- """Test correct count of CAs in .pem."""
- out = self.get_data_file('cert_count_cloudinit')
- self.assertIn('7 /etc/ssl/certs/cloud-init-ca-certs.pem', out)
-
- def test_cloudinit_certs(self):
- """Test text of cert."""
- out = self.get_data_file('cloudinit_certs')
- self.assertIn('-----BEGIN CERTIFICATE-----', out)
- self.assertIn('YOUR-ORGS-TRUSTED-CA-CERT-HERE', out)
- self.assertIn('-----END CERTIFICATE-----', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/examples/configure_instance_trusted_ca_certificates.yaml b/tests/cloud_tests/testcases/examples/configure_instance_trusted_ca_certificates.yaml
deleted file mode 100644
index ad32b088..00000000
--- a/tests/cloud_tests/testcases/examples/configure_instance_trusted_ca_certificates.yaml
+++ /dev/null
@@ -1,41 +0,0 @@
-#
-# From cloud config examples on cloudinit.readthedocs.io
-#
-# 2016-11-17: Disabled as covered by module based tests
-#
-enabled: False
-cloud_config: |
- #cloud-config
- ca-certs:
- # If present and set to True, the 'remove-defaults' parameter will remove
- # all the default trusted CA certificates that are normally shipped with
- # Ubuntu.
- # This is mainly for paranoid admins - most users will not need this
- # functionality.
- remove-defaults: true
-
- # If present, the 'trusted' parameter should contain a certificate (or list
- # of certificates) to add to the system as trusted CA certificates.
- # Pay close attention to the YAML multiline list syntax. The example shown
- # here is for a list of multiline certificates.
- trusted:
- - |
- -----BEGIN CERTIFICATE-----
- YOUR-ORGS-TRUSTED-CA-CERT-HERE
- -----END CERTIFICATE-----
- - |
- -----BEGIN CERTIFICATE-----
- YOUR-ORGS-TRUSTED-CA-CERT-HERE
- -----END CERTIFICATE-----
-collect_scripts:
- cloudinit_certs: |
- #!/bin/bash
- cat /etc/ssl/certs/cloud-init-ca-certs.pem
- cert_count_ca: |
- #!/bin/bash
- wc -l /etc/ssl/certs/ca-certificates.crt
- cert_count_cloudinit: |
- #!/bin/bash
- wc -l /etc/ssl/certs/cloud-init-ca-certs.pem
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/examples/configure_instances_ssh_keys.py b/tests/cloud_tests/testcases/examples/configure_instances_ssh_keys.py
deleted file mode 100644
index 691a316b..00000000
--- a/tests/cloud_tests/testcases/examples/configure_instances_ssh_keys.py
+++ /dev/null
@@ -1,31 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestSSHKeys(base.CloudTestCase):
- """Example cloud-config test."""
-
- def test_cert_count(self):
- """Test cert count."""
- out = self.get_data_file('cert_count')
- self.assertEqual(20, int(out))
-
- def test_dsa_public(self):
- """Test DSA key has ending."""
- out = self.get_data_file('dsa_public')
- self.assertIn('ZN4XnifuO5krqAybngIy66PMEoQ= smoser@localhost', out)
-
- def test_rsa_public(self):
- """Test RSA key has specific ending."""
- out = self.get_data_file('rsa_public')
- self.assertIn('PemAWthxHO18QJvWPocKJtlsDNi3 smoser@localhost', out)
-
- def test_auth_keys(self):
- """Test authorized keys has specific ending."""
- out = self.get_data_file('auth_keys')
- self.assertIn('QPOt5Q8zWd9qG7PBl9+eiH5qV7NZ mykey@host', out)
- self.assertIn('Hj29SCmXp5Kt5/82cD/VN3NtHw== smoser@brickies', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/examples/configure_instances_ssh_keys.yaml b/tests/cloud_tests/testcases/examples/configure_instances_ssh_keys.yaml
deleted file mode 100644
index f3eaf3ce..00000000
--- a/tests/cloud_tests/testcases/examples/configure_instances_ssh_keys.yaml
+++ /dev/null
@@ -1,63 +0,0 @@
-#
-# From cloud config examples on cloudinit.readthedocs.io
-#
-# 2016-11-17: Disabled as covered by module based tests
-#
-enabled: False
-cloud_config: |
- #cloud-config
- ssh_authorized_keys:
- - ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAGEA3FSyQwBI6Z+nCSjUUk8EEAnnkhXlukKoUPND/RRClWz2s5TCzIkd3Ou5+Cyz71X0XmazM3l5WgeErvtIwQMyT1KjNoMhoJMrJnWqQPOt5Q8zWd9qG7PBl9+eiH5qV7NZ mykey@host
- - ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA3I7VUf2l5gSn5uavROsc5HRDpZdQueUq5ozemNSj8T7enqKHOEaFoU2VoPgGEWC9RyzSQVeyD6s7APMcE82EtmW4skVEgEGSbDc1pvxzxtchBj78hJP6Cf5TCMFSXw+Fz5rF1dR23QDbN1mkHs7adr8GW4kSWqU7Q7NDwfIrJJtO7Hi42GyXtvEONHbiRPOe8stqUly7MvUoN+5kfjBM8Qqpfl2+FNhTYWpMfYdPUnE7u536WqzFmsaqJctz3gBxH9Ex7dFtrxR4qiqEr9Qtlu3xGn7Bw07/+i1D+ey3ONkZLN+LQ714cgj8fRS4Hj29SCmXp5Kt5/82cD/VN3NtHw== smoser@brickies
-
- # Send pre-generated ssh private keys to the server
- # If these are present, they will be written to /etc/ssh and
- # new random keys will not be generated
- # in addition to 'rsa' and 'dsa' as shown below, 'ecdsa' is also supported
- ssh_keys:
- rsa_private: |
- -----BEGIN RSA PRIVATE KEY-----
- MIIBxwIBAAJhAKD0YSHy73nUgysO13XsJmd4fHiFyQ+00R7VVu2iV9Qcon2LZS/x
- 1cydPZ4pQpfjEha6WxZ6o8ci/Ea/w0n+0HGPwaxlEG2Z9inNtj3pgFrYcRztfECb
- 1j6HCibZbAzYtwIBIwJgO8h72WjcmvcpZ8OvHSvTwAguO2TkR6mPgHsgSaKy6GJo
- PUJnaZRWuba/HX0KGyhz19nPzLpzG5f0fYahlMJAyc13FV7K6kMBPXTRR6FxgHEg
- L0MPC7cdqAwOVNcPY6A7AjEA1bNaIjOzFN2sfZX0j7OMhQuc4zP7r80zaGc5oy6W
- p58hRAncFKEvnEq2CeL3vtuZAjEAwNBHpbNsBYTRPCHM7rZuG/iBtwp8Rxhc9I5w
- ixvzMgi+HpGLWzUIBS+P/XhekIjPAjA285rVmEP+DR255Ls65QbgYhJmTzIXQ2T9
- luLvcmFBC6l35Uc4gTgg4ALsmXLn71MCMGMpSWspEvuGInayTCL+vEjmNBT+FAdO
- W7D4zCpI43jRS9U06JVOeSc9CDk2lwiA3wIwCTB/6uc8Cq85D9YqpM10FuHjKpnP
- REPPOyrAspdeOAV+6VKRavstea7+2DZmSUgE
- -----END RSA PRIVATE KEY-----
-
- rsa_public: ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAGEAoPRhIfLvedSDKw7XdewmZ3h8eIXJD7TRHtVW7aJX1ByifYtlL/HVzJ09nilCl+MSFrpbFnqjxyL8Rr/DSf7QcY/BrGUQbZn2Kc22PemAWthxHO18QJvWPocKJtlsDNi3 smoser@localhost
-
- dsa_private: |
- -----BEGIN DSA PRIVATE KEY-----
- MIIBuwIBAAKBgQDP2HLu7pTExL89USyM0264RCyWX/CMLmukxX0Jdbm29ax8FBJT
- pLrO8TIXVY5rPAJm1dTHnpuyJhOvU9G7M8tPUABtzSJh4GVSHlwaCfycwcpLv9TX
- DgWIpSj+6EiHCyaRlB1/CBp9RiaB+10QcFbm+lapuET+/Au6vSDp9IRtlQIVAIMR
- 8KucvUYbOEI+yv+5LW9u3z/BAoGBAI0q6JP+JvJmwZFaeCMMVxXUbqiSko/P1lsa
- LNNBHZ5/8MOUIm8rB2FC6ziidfueJpqTMqeQmSAlEBCwnwreUnGfRrKoJpyPNENY
- d15MG6N5J+z81sEcHFeprryZ+D3Ge9VjPq3Tf3NhKKwCDQ0240aPezbnjPeFm4mH
- bYxxcZ9GAoGAXmLIFSQgiAPu459rCKxT46tHJtM0QfnNiEnQLbFluefZ/yiI4DI3
- 8UzTCOXLhUA7ybmZha+D/csj15Y9/BNFuO7unzVhikCQV9DTeXX46pG4s1o23JKC
- /QaYWNMZ7kTRv+wWow9MhGiVdML4ZN4XnifuO5krqAybngIy66PMEoQCFEIsKKWv
- 99iziAH0KBMVbxy03Trz
- -----END DSA PRIVATE KEY-----
-
- dsa_public: ssh-dsa AAAAB3NzaC1kc3MAAACBAM/Ycu7ulMTEvz1RLIzTbrhELJZf8Iwua6TFfQl1ubb1rHwUElOkus7xMhdVjms8AmbV1Meem7ImE69T0bszy09QAG3NImHgZVIeXBoJ/JzByku/1NcOBYilKP7oSIcLJpGUHX8IGn1GJoH7XRBwVub6Vqm4RP78C7q9IOn0hG2VAAAAFQCDEfCrnL1GGzhCPsr/uS1vbt8/wQAAAIEAjSrok/4m8mbBkVp4IwxXFdRuqJKSj8/WWxos00Ednn/ww5QibysHYULrOKJ1+54mmpMyp5CZICUQELCfCt5ScZ9GsqgmnI80Q1h3Xkwbo3kn7PzWwRwcV6muvJn4PcZ71WM+rdN/c2EorAINDTbjRo97NueM94WbiYdtjHFxn0YAAACAXmLIFSQgiAPu459rCKxT46tHJtM0QfnNiEnQLbFluefZ/yiI4DI38UzTCOXLhUA7ybmZha+D/csj15Y9/BNFuO7unzVhikCQV9DTeXX46pG4s1o23JKC/QaYWNMZ7kTRv+wWow9MhGiVdML4ZN4XnifuO5krqAybngIy66PMEoQ= smoser@localhost
-collect_scripts:
- cert_count: |
- #!/bin/bash
- ls | wc -l
- dsa_public: |
- #!/bin/bash
- cat /etc/ssh/ssh_host_dsa_key.pub
- rsa_public: |
- #!/bin/bash
- cat /etc/ssh/ssh_host_rsa_key.pub
- auth_keys: |
- #!/bin/bash
- cat /home/ubuntu/.ssh/authorized_keys
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/examples/including_user_groups.py b/tests/cloud_tests/testcases/examples/including_user_groups.py
deleted file mode 100644
index 4067348d..00000000
--- a/tests/cloud_tests/testcases/examples/including_user_groups.py
+++ /dev/null
@@ -1,49 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestUserGroups(base.CloudTestCase):
- """Example cloud-config test."""
-
- def test_group_ubuntu(self):
- """Test ubuntu group exists."""
- out = self.get_data_file('group_ubuntu')
- self.assertRegex(out, r'ubuntu:x:[0-9]{4}:')
-
- def test_group_cloud_users(self):
- """Test cloud users group exists."""
- out = self.get_data_file('group_cloud_users')
- self.assertRegex(out, r'cloud-users:x:[0-9]{4}:barfoo')
-
- def test_user_ubuntu(self):
- """Test ubuntu user exists."""
- out = self.get_data_file('user_ubuntu')
- self.assertRegex(
- out, r'ubuntu:x:[0-9]{4}:[0-9]{4}:Ubuntu:/home/ubuntu:/bin/bash')
-
- def test_user_foobar(self):
- """Test foobar user exists."""
- out = self.get_data_file('user_foobar')
- self.assertRegex(
- out, r'foobar:x:[0-9]{4}:[0-9]{4}:Foo B. Bar:/home/foobar:')
-
- def test_user_barfoo(self):
- """Test barfoo user exists."""
- out = self.get_data_file('user_barfoo')
- self.assertRegex(
- out, r'barfoo:x:[0-9]{4}:[0-9]{4}:Bar B. Foo:/home/barfoo:')
-
- def test_user_cloudy(self):
- """Test cloudy user exists."""
- out = self.get_data_file('user_cloudy')
- self.assertRegex(out, r'cloudy:x:[0-9]{3,4}:')
-
- def test_user_root_in_secret(self):
- """Test root user is in 'secret' group."""
- _user, _, groups = self.get_data_file('root_groups').partition(":")
- self.assertIn("secret", groups.split(),
- msg="User root is not in group 'secret'")
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/examples/including_user_groups.yaml b/tests/cloud_tests/testcases/examples/including_user_groups.yaml
deleted file mode 100644
index 86e392dd..00000000
--- a/tests/cloud_tests/testcases/examples/including_user_groups.yaml
+++ /dev/null
@@ -1,56 +0,0 @@
-#
-# From cloud config examples on cloudinit.readthedocs.io
-#
-# 2016-11-17: Disabled as covered by module based tests
-#
-enabled: False
-cloud_config: |
- #cloud-config
- # Add groups to the system
- groups:
- - secret: [root]
- - cloud-users
-
- # Add users to the system. Users are added after groups are added.
- users:
- - default
- - name: foobar
- gecos: Foo B. Bar
- primary_group: foobar
- groups: users
- expiredate: '2038-01-19'
- lock_passwd: false
- passwd: $6$j212wezy$7H/1LT4f9/N3wpgNunhsIqtMj62OKiS3nyNwuizouQc3u7MbYCarYeAHWYPYb2FT.lbioDm2RrkJPb9BZMN1O/
- - name: barfoo
- gecos: Bar B. Foo
- sudo: ALL=(ALL) NOPASSWD:ALL
- groups: [cloud-users, secret]
- lock_passwd: true
- - name: cloudy
- gecos: Magic Cloud App Daemon User
- inactive: '5'
- system: true
-collect_scripts:
- group_ubuntu: |
- #!/bin/bash
- getent group ubuntu
- group_cloud_users: |
- #!/bin/bash
- getent group cloud-users
- user_ubuntu: |
- #!/bin/bash
- getent passwd ubuntu
- user_foobar: |
- #!/bin/bash
- getent passwd foobar
- user_barfoo: |
- #!/bin/bash
- getent passwd barfoo
- user_cloudy: |
- #!/bin/bash
- getent passwd cloudy
- root_groups: |
- #!/bin/bash
- groups root
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/examples/install_arbitrary_packages.py b/tests/cloud_tests/testcases/examples/install_arbitrary_packages.py
deleted file mode 100644
index df133844..00000000
--- a/tests/cloud_tests/testcases/examples/install_arbitrary_packages.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestInstall(base.CloudTestCase):
- """Example cloud-config test."""
-
- def test_htop(self):
- """Verify htop installed."""
- out = self.get_data_file('htop')
- self.assertEqual(1, int(out))
-
- def test_tree(self):
- """Verify tree installed."""
- out = self.get_data_file('treeutils')
- self.assertEqual(1, int(out))
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/examples/install_arbitrary_packages.yaml b/tests/cloud_tests/testcases/examples/install_arbitrary_packages.yaml
deleted file mode 100644
index d3980228..00000000
--- a/tests/cloud_tests/testcases/examples/install_arbitrary_packages.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
-#
-# From cloud config examples on cloudinit.readthedocs.io
-#
-# 2016-11-17: Disabled as covered by module based tests
-#
-enabled: False
-cloud_config: |
- #cloud-config
- packages:
- - htop
- - tree
-collect_scripts:
- htop: |
- #!/bin/bash
- dpkg -l | grep htop | wc -l
- tree: |
- #!/bin/bash
- dpkg -l | grep tree | wc -l
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/examples/install_run_chef_recipes.py b/tests/cloud_tests/testcases/examples/install_run_chef_recipes.py
deleted file mode 100644
index 4ec26b8f..00000000
--- a/tests/cloud_tests/testcases/examples/install_run_chef_recipes.py
+++ /dev/null
@@ -1,17 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestChefExample(base.CloudTestCase):
- """Test chef module."""
-
- def test_chef_basic(self):
- """Test chef installed."""
- out = self.get_data_file('chef_installed')
- self.assertIn('install ok', out)
-
- # FIXME: Add more tests, and/or replace with comprehensive module tests
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/examples/install_run_chef_recipes.yaml b/tests/cloud_tests/testcases/examples/install_run_chef_recipes.yaml
deleted file mode 100644
index 68ca95b5..00000000
--- a/tests/cloud_tests/testcases/examples/install_run_chef_recipes.yaml
+++ /dev/null
@@ -1,104 +0,0 @@
-#
-# From cloud config examples on cloudinit.readthedocs.io
-#
-# 2017-03-31: Disabled as depends on third party apt repository
-#
-enabled: False
-cloud_config: |
- #cloud-config
- # Key from https://packages.chef.io/chef.asc
- apt:
- sources:
- source1:
- source: "deb http://packages.chef.io/repos/apt/stable $RELEASE main"
- key: |
- -----BEGIN PGP PUBLIC KEY BLOCK-----
- Version: GnuPG v1.4.12 (Darwin)
- Comment: GPGTools - http://gpgtools.org
-
- mQGiBEppC7QRBADfsOkZU6KZK+YmKw4wev5mjKJEkVGlus+NxW8wItX5sGa6kdUu
- twAyj7Yr92rF+ICFEP3gGU6+lGo0Nve7KxkN/1W7/m3G4zuk+ccIKmjp8KS3qn99
- dxy64vcji9jIllVa+XXOGIp0G8GEaj7mbkixL/bMeGfdMlv8Gf2XPpp9vwCgn/GC
- JKacfnw7MpLKUHOYSlb//JsEAJqao3ViNfav83jJKEkD8cf59Y8xKia5OpZqTK5W
- ShVnNWS3U5IVQk10ZDH97Qn/YrK387H4CyhLE9mxPXs/ul18ioiaars/q2MEKU2I
- XKfV21eMLO9LYd6Ny/Kqj8o5WQK2J6+NAhSwvthZcIEphcFignIuobP+B5wNFQpe
- DbKfA/0WvN2OwFeWRcmmd3Hz7nHTpcnSF+4QX6yHRF/5BgxkG6IqBIACQbzPn6Hm
- sMtm/SVf11izmDqSsQptCrOZILfLX/mE+YOl+CwWSHhl+YsFts1WOuh1EhQD26aO
- Z84HuHV5HFRWjDLw9LriltBVQcXbpfSrRP5bdr7Wh8vhqJTPjrQnT3BzY29kZSBQ
- YWNrYWdlcyA8cGFja2FnZXNAb3BzY29kZS5jb20+iGAEExECACAFAkppC7QCGwMG
- CwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAKCRApQKupg++Caj8sAKCOXmdG36gWji/K
- +o+XtBfvdMnFYQCfTCEWxRy2BnzLoBBFCjDSK6sJqCu0IENIRUYgUGFja2FnZXMg
- PHBhY2thZ2VzQGNoZWYuaW8+iGIEExECACIFAlQwYFECGwMGCwkIBwMCBhUIAgkK
- CwQWAgMBAh4BAheAAAoJEClAq6mD74JqX94An26z99XOHWpLN8ahzm7cp13t4Xid
- AJ9wVcgoUBzvgg91lKfv/34cmemZn7kCDQRKaQu0EAgAg7ZLCVGVTmLqBM6njZEd
- Zbv+mZbvwLBSomdiqddE6u3eH0X3GuwaQfQWHUVG2yedyDMiG+EMtCdEeeRebTCz
- SNXQ8Xvi22hRPoEsBSwWLZI8/XNg0n0f1+GEr+mOKO0BxDB2DG7DA0nnEISxwFkK
- OFJFebR3fRsrWjj0KjDxkhse2ddU/jVz1BY7Nf8toZmwpBmdozETMOTx3LJy1HZ/
- Te9FJXJMUaB2lRyluv15MVWCKQJro4MQG/7QGcIfrIZNfAGJ32DDSjV7/YO+IpRY
- IL4CUBQ65suY4gYUG4jhRH6u7H1p99sdwsg5OIpBe/v2Vbc/tbwAB+eJJAp89Zeu
- twADBQf/ZcGoPhTGFuzbkcNRSIz+boaeWPoSxK2DyfScyCAuG41CY9+g0HIw9Sq8
- DuxQvJ+vrEJjNvNE3EAEdKl/zkXMZDb1EXjGwDi845TxEMhhD1dDw2qpHqnJ2mtE
- WpZ7juGwA3sGhi6FapO04tIGacCfNNHmlRGipyq5ZiKIRq9mLEndlECr8cwaKgkS
- 0wWu+xmMZe7N5/t/TK19HXNh4tVacv0F3fYK54GUjt2FjCQV75USnmNY4KPTYLXA
- dzC364hEMlXpN21siIFgB04w+TXn5UF3B4FfAy5hevvr4DtV4MvMiGLu0oWjpaLC
- MpmrR3Ny2wkmO0h+vgri9uIP06ODWIhJBBgRAgAJBQJKaQu0AhsMAAoJEClAq6mD
- 74Jq4hIAoJ5KrYS8kCwj26SAGzglwggpvt3CAJ0bekyky56vNqoegB+y4PQVDv4K
- zA==
- =IxPr
- -----END PGP PUBLIC KEY BLOCK-----
-
- chef:
-
- # Valid values are 'gems' and 'packages' and 'omnibus'
- install_type: "packages"
-
- # Boolean: run 'install_type' code even if chef-client
- # appears already installed.
- force_install: false
-
- # Chef settings
- server_url: "https://chef.yourorg.com:4000"
-
- # Node Name
- # Defaults to the instance-id if not present
- node_name: "your-node-name"
-
- # Environment
- # Defaults to '_default' if not present
- environment: "production"
-
- # Default validation name is chef-validator
- validation_name: "yourorg-validator"
- # if validation_cert's value is "system" then it is expected
- # that the file already exists on the system.
- validation_cert: |
- -----BEGIN RSA PRIVATE KEY-----
- YOUR-ORGS-VALIDATION-KEY-HERE
- -----END RSA PRIVATE KEY-----
-
- # A run list for a first boot json
- run_list:
- - "recipe[apache2]"
- - "role[db]"
-
- # Specify a list of initial attributes used by the cookbooks
- initial_attributes:
- apache:
- prefork:
- maxclients: 100
- keepalive: "off"
-
- # if install_type is 'omnibus', change the url to download
- omnibus_url: "https://www.opscode.com/chef/install.sh"
-
-
- # Capture all subprocess output into a logfile
- # Useful for troubleshooting cloud-init issues
- output: {all: '| tee -a /var/log/cloud-init-output.log'}
-
-collect_scripts:
- chef_installed: |
- #!/bin/sh
- dpkg-query -W -f '${Status}\n' chef
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/examples/run_apt_upgrade.py b/tests/cloud_tests/testcases/examples/run_apt_upgrade.py
deleted file mode 100644
index 744e49cb..00000000
--- a/tests/cloud_tests/testcases/examples/run_apt_upgrade.py
+++ /dev/null
@@ -1,19 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestUpgrade(base.CloudTestCase):
- """Example cloud-config test."""
-
- def test_upgrade(self):
- """Test upgrade exists in apt history."""
- out = self.get_data_file('cloud-init.log')
- self.assertIn(
- '[CLOUDINIT] util.py[DEBUG]: apt-upgrade '
- '[eatmydata apt-get --option=Dpkg::Options::=--force-confold '
- '--option=Dpkg::options::=--force-unsafe-io --assume-yes --quiet '
- 'dist-upgrade] took', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/examples/run_apt_upgrade.yaml b/tests/cloud_tests/testcases/examples/run_apt_upgrade.yaml
deleted file mode 100644
index 2b7eae4c..00000000
--- a/tests/cloud_tests/testcases/examples/run_apt_upgrade.yaml
+++ /dev/null
@@ -1,11 +0,0 @@
-#
-# From cloud config examples on cloudinit.readthedocs.io
-#
-# 2016-11-17: Disabled as covered by module based tests
-#
-enabled: False
-cloud_config: |
- #cloud-config
- package_upgrade: true
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/examples/run_commands.py b/tests/cloud_tests/testcases/examples/run_commands.py
deleted file mode 100644
index 01d5d4fc..00000000
--- a/tests/cloud_tests/testcases/examples/run_commands.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestRunCmd(base.CloudTestCase):
- """Example cloud-config test."""
-
- def test_run_cmd(self):
- """Test run command worked."""
- out = self.get_data_file('run_cmd')
- self.assertIn('cloud-init run cmd test', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/examples/run_commands.yaml b/tests/cloud_tests/testcases/examples/run_commands.yaml
deleted file mode 100644
index f80eb8ce..00000000
--- a/tests/cloud_tests/testcases/examples/run_commands.yaml
+++ /dev/null
@@ -1,16 +0,0 @@
-#
-# From cloud config examples on cloudinit.readthedocs.io
-#
-# 2016-11-17: Disabled as covered by module based tests
-#
-enabled: False
-cloud_config: |
- #cloud-config
- runcmd:
- - echo cloud-init run cmd test > /var/tmp/run_cmd
-collect_scripts:
- run_cmd: |
- #!/bin/bash
- cat /var/tmp/run_cmd
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/examples/run_commands_first_boot.py b/tests/cloud_tests/testcases/examples/run_commands_first_boot.py
deleted file mode 100644
index 3f3d8f84..00000000
--- a/tests/cloud_tests/testcases/examples/run_commands_first_boot.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestBootCmd(base.CloudTestCase):
- """Example cloud-config test."""
-
- def test_bootcmd_host(self):
- """Test boot command worked."""
- out = self.get_data_file('hosts')
- self.assertIn('192.168.1.130 us.archive.ubuntu.com', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/examples/run_commands_first_boot.yaml b/tests/cloud_tests/testcases/examples/run_commands_first_boot.yaml
deleted file mode 100644
index 7bd803db..00000000
--- a/tests/cloud_tests/testcases/examples/run_commands_first_boot.yaml
+++ /dev/null
@@ -1,16 +0,0 @@
-#
-# From cloud config examples on cloudinit.readthedocs.io
-#
-# 2016-11-17: Disabled as covered by module based tests
-#
-enabled: False
-cloud_config: |
- #cloud-config
- bootcmd:
- - echo 192.168.1.130 us.archive.ubuntu.com > /etc/hosts
-collect_scripts:
- hosts: |
- #!/bin/bash
- cat /etc/hosts
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/examples/setup_run_puppet.yaml b/tests/cloud_tests/testcases/examples/setup_run_puppet.yaml
deleted file mode 100644
index cdb1c28d..00000000
--- a/tests/cloud_tests/testcases/examples/setup_run_puppet.yaml
+++ /dev/null
@@ -1,55 +0,0 @@
-#
-# From cloud config examples on cloudinit.readthedocs.io
-#
-# 2016-11-17: Disabled as test suite fails this long running test currently
-#
-enabled: False
-cloud_config: |
- #cloud-config
- puppet:
- # Every key present in the conf object will be added to puppet.conf:
- # [name]
- # subkey=value
- #
- # For example the configuration below will have the following section
- # added to puppet.conf:
- # [puppetd]
- # server=puppetserver.example.org
- # certname=i-0123456.ip-X-Y-Z.cloud.internal
- #
- # The puppmaster ca certificate will be available in
- # /var/lib/puppet/ssl/certs/ca.pem
- conf:
- agent:
- server: "puppetserver.example.org"
- # certname supports substitutions at runtime:
- # %i: instanceid
- # Example: i-0123456
- # %f: fqdn of the machine
- # Example: ip-X-Y-Z.cloud.internal
- #
- # NB: the certname will automatically be lowercased as required by puppet
- certname: "%i.%f"
- # ca_cert is a special case. It won't be added to puppet.conf.
- # It holds the puppetserver certificate in pem format.
- # It should be a multi-line string (using the | yaml notation for
- # multi-line strings).
- # The puppetserver certificate is located in
- # /var/lib/puppet/ssl/ca/ca_crt.pem on the puppetserver host.
- #
- ca_cert: |
- -----BEGIN CERTIFICATE-----
- MIICCTCCAXKgAwIBAgIBATANBgkqhkiG9w0BAQUFADANMQswCQYDVQQDDAJjYTAe
- Fw0xMDAyMTUxNzI5MjFaFw0xNTAyMTQxNzI5MjFaMA0xCzAJBgNVBAMMAmNhMIGf
- MA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCu7Q40sm47/E1Pf+r8AYb/V/FWGPgc
- b014OmNoX7dgCxTDvps/h8Vw555PdAFsW5+QhsGr31IJNI3kSYprFQcYf7A8tNWu
- 1MASW2CfaEiOEi9F1R3R4Qlz4ix+iNoHiUDTjazw/tZwEdxaQXQVLwgTGRwVa+aA
- qbutJKi93MILLwIDAQABo3kwdzA4BglghkgBhvhCAQ0EKxYpUHVwcGV0IFJ1Ynkv
- T3BlblNTTCBHZW5lcmF0ZWQgQ2VydGlmaWNhdGUwDwYDVR0TAQH/BAUwAwEB/zAd
- BgNVHQ4EFgQUu4+jHB+GYE5Vxo+ol1OAhevspjAwCwYDVR0PBAQDAgEGMA0GCSqG
- SIb3DQEBBQUAA4GBAH/rxlUIjwNb3n7TXJcDJ6MMHUlwjr03BDJXKb34Ulndkpaf
- +GAlzPXWa7bO908M9I8RnPfvtKnteLbvgTK+h+zX1XCty+S2EQWk29i2AdoqOTxb
- hppiGMp0tT5Havu4aceCXiy2crVcudj3NFciy8X66SoECemW9UYDCb9T5D0d
- -----END CERTIFICATE-----
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/examples/writing_out_arbitrary_files.py b/tests/cloud_tests/testcases/examples/writing_out_arbitrary_files.py
deleted file mode 100644
index 7bd520f6..00000000
--- a/tests/cloud_tests/testcases/examples/writing_out_arbitrary_files.py
+++ /dev/null
@@ -1,30 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestWriteFiles(base.CloudTestCase):
- """Example cloud-config test."""
-
- def test_b64(self):
- """Test b64 encoded file reads as ascii."""
- out = self.get_data_file('file_b64')
- self.assertIn('ASCII text', out)
-
- def test_binary(self):
- """Test binary file reads as executable."""
- out = self.get_data_file('file_binary')
- self.assertIn('ELF 64-bit LSB executable, x86-64, version 1', out)
-
- def test_gzip(self):
- """Test gzip file shows up as a shell script."""
- out = self.get_data_file('file_gzip')
- self.assertIn('POSIX shell script, ASCII text executable', out)
-
- def test_text(self):
- """Test text shows up as ASCII text."""
- out = self.get_data_file('file_text')
- self.assertIn('ASCII text', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/examples/writing_out_arbitrary_files.yaml b/tests/cloud_tests/testcases/examples/writing_out_arbitrary_files.yaml
deleted file mode 100644
index 6f78f994..00000000
--- a/tests/cloud_tests/testcases/examples/writing_out_arbitrary_files.yaml
+++ /dev/null
@@ -1,45 +0,0 @@
-#
-# From cloud config examples on cloudinit.readthedocs.io
-#
-# 2016-11-17: Disabled as covered by module based tests
-#
-enabled: False
-cloud_config: |
- #cloud-config
- write_files:
- - encoding: b64
- content: CiMgVGhpcyBmaWxlIGNvbnRyb2xzIHRoZSBzdGF0ZSBvZiBTRUxpbnV4
- owner: root:root
- path: /root/file_b64
- permissions: '0644'
- - content: |
- # My new /root/file_text
-
- SMBDOPTIONS="-D"
- path: /root/file_text
- - content: !!binary |
- f0VMRgIBAQAAAAAAAAAAAAIAPgABAAAAwARAAAAAAABAAAAAAAAAAJAVAAAAAAAAAAAAAEAAOAAI
- AEAAHgAdAAYAAAAFAAAAQAAAAAAAAABAAEAAAAAAAEAAQAAAAAAAwAEAAAAAAADAAQAAAAAAAAgA
- AAAAAAAAAwAAAAQAAAAAAgAAAAAAAAACQAAAAAAAAAJAAAAAAAAcAAAAAAAAABwAAAAAAAAAAQAA
- path: /root/file_binary
- permissions: '0555'
- - encoding: gzip
- content: !!binary |
- H4sIAIDb/U8C/1NW1E/KzNMvzuBKTc7IV8hIzcnJVyjPL8pJ4QIA6N+MVxsAAAA=
- path: /root/file_gzip
- permissions: '0755'
-collect_scripts:
- file_b64: |
- #!/bin/bash
- file /root/file_b64
- file_text: |
- #!/bin/bash
- file /root/file_text
- file_binary: |
- #!/bin/bash
- file /root/file_binary
- file_gzip: |
- #!/bin/bash
- file /root/file_gzip
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/main/README.md b/tests/cloud_tests/testcases/main/README.md
deleted file mode 100644
index 60346063..00000000
--- a/tests/cloud_tests/testcases/main/README.md
+++ /dev/null
@@ -1,11 +0,0 @@
-# Main Functionality Test Configs
-
-## purpose
-Test main features and config options of cloud-init such as logging, output
-redirection, early init and integration with init system
-
-## structure
-Should have one or more test configs for all main cloud-init output and logging
-options, and basic functionality test cases
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/main/__init__.py b/tests/cloud_tests/testcases/main/__init__.py
deleted file mode 100644
index 0a592637..00000000
--- a/tests/cloud_tests/testcases/main/__init__.py
+++ /dev/null
@@ -1,8 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Test verifiers for cloud-init main features.
-
-See configs/main/README.md for more information
-"""
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/main/command_output_simple.py b/tests/cloud_tests/testcases/main/command_output_simple.py
deleted file mode 100644
index 80a2c8d7..00000000
--- a/tests/cloud_tests/testcases/main/command_output_simple.py
+++ /dev/null
@@ -1,21 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestCommandOutputSimple(base.CloudTestCase):
- """Test functionality of simple output redirection."""
-
- expected_warnings = ('Stdout, stderr changing to',)
-
- def test_output_file(self):
- """Ensure that the output file is not empty and has all stages."""
- data = self.get_data_file('cloud-init-test-output')
- self.assertNotEqual(len(data), 0, "specified log empty")
- self.assertEqual(self.get_config_entry('final_message'),
- data.splitlines()[-1].strip())
- # TODO: need to test that all stages redirected here
-
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/main/command_output_simple.yaml b/tests/cloud_tests/testcases/main/command_output_simple.yaml
deleted file mode 100644
index 08ca8940..00000000
--- a/tests/cloud_tests/testcases/main/command_output_simple.yaml
+++ /dev/null
@@ -1,13 +0,0 @@
-#
-# Test functionality of simple output redirection
-#
-cloud_config: |
- #cloud-config
- output: { all: "| tee -a /var/log/cloud-init-test-output" }
- final_message: "should be last line in cloud-init-test-output file"
-collect_scripts:
- cloud-init-test-output: |
- #!/bin/bash
- cat /var/log/cloud-init-test-output
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/README.md b/tests/cloud_tests/testcases/modules/README.md
deleted file mode 100644
index d66101f2..00000000
--- a/tests/cloud_tests/testcases/modules/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
-# Module Test Configs
-
-## Purpose
-Test functionality of cloud config modules. See
-[here](https://cloudinit.readthedocs.io/en/latest/topics/modules.html) for
-a full list.
-
-## Structure
-Should have one or more test configs for each module in cloudinit/config/. The
-name of the test should indicate which module the config is verifying.
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/TODO.md b/tests/cloud_tests/testcases/modules/TODO.md
deleted file mode 100644
index 9513cb2d..00000000
--- a/tests/cloud_tests/testcases/modules/TODO.md
+++ /dev/null
@@ -1,95 +0,0 @@
-# TODO
-
-The following lists complete or partially misisng modules. If a module is
-listed with nothing below it indicates that no work is completed on that
-module. If there is a list below the module name that is the remainig
-identified work.
-
-## apt_configure
-
- * apt_get_wrapper
- * What does this do? How to use it?
- * apt_get_command
- * To specify a different 'apt-get' command, set 'apt_get_command'.
- This must be a list, and the subcommand (update, upgrade) is appended to it.
- * Modify default and verify the options got passed correctly.
- * preserve sources
- * TBD
-
-## chef
-2016-11-17: Tests took > 60 seconds and test framework times out currently.
-
-## disable EC2 metadata
-
-## disk setup
-
-## emit upstart
-
-## fan
-
-## growpart
-
-## grub dpkg
-
-## landscape
-2016-11-17: Module is not working
-
-## lxd
-2016-11-17: Need a zfs backed test written
-
-## mcollective
-
-## migrator
-
-## mounts
-
-## phone home
-
-## power state change
-
-## puppet
-2016-11-17: Tests took > 60 seconds and test framework times out currently.
-
-## resizefs
-
-## resolv conf
-2016-11-17: Issues with changing resolv.conf and lxc backend.
-
-## redhat subscription
-2016-11-17: Need RH support in test framework.
-
-## rightscale userdata
-2016-11-17: Specific to RightScale cloud enviornment.
-
-## rsyslog
-
-## scripts per boot
-Not applicable to write a test for this as it specifies when something should be run.
-
-## scripts per instance
-Not applicable to write a test for this as it specifies when something should be run.
-
-## scripts per once
-Not applicable to write a test for this as it specifies when something should be run.
-
-## scripts user
-Not applicable to write a test for this as it specifies when something should be run.
-
-## scripts vendor
-Not applicable to write a test for this as it specifies when something should be run.
-
-## snap
-2019-12-19: Need to investigate
-
-## spacewalk
-
-## ssh authkey fingerprints
-The authkey_hash key does not appear to work. In fact the default claims to be md5, however syslog only shows sha256
-
-## update etc hosts
-2016-11-17: Issues with changing /etc/hosts and lxc backend.
-
-## yum add repo
-2016-11-17: Need RH support in test framework.
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/__init__.py b/tests/cloud_tests/testcases/modules/__init__.py
deleted file mode 100644
index 6ab8114d..00000000
--- a/tests/cloud_tests/testcases/modules/__init__.py
+++ /dev/null
@@ -1,8 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Test verifiers for cloud-init cc modules.
-
-See configs/modules/README.md for more information
-"""
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/apt_configure_conf.py b/tests/cloud_tests/testcases/modules/apt_configure_conf.py
deleted file mode 100644
index 3bf93447..00000000
--- a/tests/cloud_tests/testcases/modules/apt_configure_conf.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestAptconfigureConf(base.CloudTestCase):
- """Test apt-configure module."""
-
- def test_apt_conf_assumeyes(self):
- """Test config assumes true."""
- out = self.get_data_file('94cloud-init-config')
- self.assertIn('Assume-Yes "true";', out)
-
- def test_apt_conf_fixbroken(self):
- """Test config fixes broken."""
- out = self.get_data_file('94cloud-init-config')
- self.assertIn('Fix-Broken "true";', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/apt_configure_conf.yaml b/tests/cloud_tests/testcases/modules/apt_configure_conf.yaml
deleted file mode 100644
index de453000..00000000
--- a/tests/cloud_tests/testcases/modules/apt_configure_conf.yaml
+++ /dev/null
@@ -1,21 +0,0 @@
-#
-# Provide a configuration for APT
-#
-required_features:
- - apt
-cloud_config: |
- #cloud-config
- apt:
- conf: |
- APT {
- Get {
- Assume-Yes "true";
- Fix-Broken "true";
- }
- }
-collect_scripts:
- 94cloud-init-config: |
- #!/bin/bash
- cat /etc/apt/apt.conf.d/94cloud-init-config
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/apt_configure_disable_suites.py b/tests/cloud_tests/testcases/modules/apt_configure_disable_suites.py
deleted file mode 100644
index eabe4607..00000000
--- a/tests/cloud_tests/testcases/modules/apt_configure_disable_suites.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestAptconfigureDisableSuites(base.CloudTestCase):
- """Test apt-configure module."""
-
- def test_empty_sourcelist(self):
- """Test source list is empty."""
- out = self.get_data_file('sources.list')
- self.assertEqual('', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/apt_configure_disable_suites.yaml b/tests/cloud_tests/testcases/modules/apt_configure_disable_suites.yaml
deleted file mode 100644
index 98800673..00000000
--- a/tests/cloud_tests/testcases/modules/apt_configure_disable_suites.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
-#
-# Disables everything in sources.list
-#
-required_features:
- - apt
- - lsb_release
-cloud_config: |
- #cloud-config
- apt:
- disable_suites:
- - $RELEASE
- - $RELEASE-updates
- - $RELEASE-backports
- - $RELEASE-security
-collect_scripts:
- sources.list: |
- #!/bin/bash
- grep -v '^#' /etc/apt/sources.list | sed '/^\s*$/d'
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/apt_configure_primary.py b/tests/cloud_tests/testcases/modules/apt_configure_primary.py
deleted file mode 100644
index 4950a2ef..00000000
--- a/tests/cloud_tests/testcases/modules/apt_configure_primary.py
+++ /dev/null
@@ -1,24 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestAptconfigurePrimary(base.CloudTestCase):
- """Test apt-configure module."""
-
- def test_ubuntu_sources(self):
- """Test no default Ubuntu entries exist."""
- out = self.get_data_file('sources.list')
- ubuntu_source_count = len(
- [line for line in out.split('\n') if 'archive.ubuntu.com' in line])
- self.assertEqual(0, ubuntu_source_count)
-
- def test_gatech_sources(self):
- """Test GaTech entries exist."""
- out = self.get_data_file('sources.list')
- gatech_source_count = len(
- [line for line in out.split('\n') if 'gtlib.gatech.edu' in line])
- self.assertGreater(gatech_source_count, 0)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/apt_configure_primary.yaml b/tests/cloud_tests/testcases/modules/apt_configure_primary.yaml
deleted file mode 100644
index cc067d4f..00000000
--- a/tests/cloud_tests/testcases/modules/apt_configure_primary.yaml
+++ /dev/null
@@ -1,19 +0,0 @@
-#
-# Setup a custome primary sources.list
-#
-required_features:
- - apt
- - apt_src_cont
-cloud_config: |
- #cloud-config
- apt:
- primary:
- - arches:
- - default
- uri: "http://www.gtlib.gatech.edu/pub/ubuntu-releases/"
-collect_scripts:
- sources.list: |
- #!/bin/bash
- cat /etc/apt/sources.list
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/apt_configure_proxy.py b/tests/cloud_tests/testcases/modules/apt_configure_proxy.py
deleted file mode 100644
index 0c61b6cc..00000000
--- a/tests/cloud_tests/testcases/modules/apt_configure_proxy.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestAptconfigureProxy(base.CloudTestCase):
- """Test apt-configure module."""
-
- def test_proxy_config(self):
- """Test proxy options added to apt config."""
- out = self.get_data_file('90cloud-init-aptproxy')
- self.assertIn(
- 'Acquire::http::Proxy "http://squid.internal:3128";', out)
- self.assertIn(
- 'Acquire::http::Proxy "http://squid.internal:3128";', out)
- self.assertIn(
- 'Acquire::ftp::Proxy "ftp://squid.internal:3128";', out)
- self.assertIn(
- 'Acquire::https::Proxy "https://squid.internal:3128";', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/apt_configure_proxy.yaml b/tests/cloud_tests/testcases/modules/apt_configure_proxy.yaml
deleted file mode 100644
index be6c6f81..00000000
--- a/tests/cloud_tests/testcases/modules/apt_configure_proxy.yaml
+++ /dev/null
@@ -1,18 +0,0 @@
-#
-# Set apt proxy
-#
-required_features:
- - apt
-cloud_config: |
- #cloud-config
- apt:
- proxy: "http://squid.internal:3128"
- http_proxy: "http://squid.internal:3128"
- ftp_proxy: "ftp://squid.internal:3128"
- https_proxy: "https://squid.internal:3128"
-collect_scripts:
- 90cloud-init-aptproxy: |
- #!/bin/bash
- cat /etc/apt/apt.conf.d/90cloud-init-aptproxy
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/apt_configure_security.py b/tests/cloud_tests/testcases/modules/apt_configure_security.py
deleted file mode 100644
index 7d7e2585..00000000
--- a/tests/cloud_tests/testcases/modules/apt_configure_security.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestAptconfigureSecurity(base.CloudTestCase):
- """Test apt-configure module."""
-
- def test_security_mirror(self):
- """Test security lines added and uncommented in source.list."""
- out = self.get_data_file('sources.list')
- self.assertEqual(6, int(out))
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/apt_configure_security.yaml b/tests/cloud_tests/testcases/modules/apt_configure_security.yaml
deleted file mode 100644
index 83dd51df..00000000
--- a/tests/cloud_tests/testcases/modules/apt_configure_security.yaml
+++ /dev/null
@@ -1,18 +0,0 @@
-#
-# Add security to sources.list
-#
-required_features:
- - apt
- - ubuntu_repos
-cloud_config: |
- #cloud-config
- apt:
- security:
- - arches:
- - default
-collect_scripts:
- sources.list: |
- #!/bin/bash
- grep -c security.ubuntu.com /etc/apt/sources.list
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/apt_configure_sources_key.py b/tests/cloud_tests/testcases/modules/apt_configure_sources_key.py
deleted file mode 100644
index d9061f3c..00000000
--- a/tests/cloud_tests/testcases/modules/apt_configure_sources_key.py
+++ /dev/null
@@ -1,23 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestAptconfigureSourcesKey(base.CloudTestCase):
- """Test apt-configure module."""
-
- def test_apt_key_list(self):
- """Test key list updated."""
- out = self.get_data_file('apt_key_list')
- self.assertIn(
- '1FF0 D853 5EF7 E719 E5C8 1B9C 083D 06FB E4D3 04DF', out)
- self.assertIn('Launchpad PPA for cloud init development team', out)
-
- def test_source_list(self):
- """Test source.list updated."""
- out = self.get_data_file('sources.list')
- self.assertIn(
- 'http://ppa.launchpad.net/cloud-init-dev/test-archive/ubuntu', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/apt_configure_sources_key.yaml b/tests/cloud_tests/testcases/modules/apt_configure_sources_key.yaml
deleted file mode 100644
index bde9398a..00000000
--- a/tests/cloud_tests/testcases/modules/apt_configure_sources_key.yaml
+++ /dev/null
@@ -1,50 +0,0 @@
-#
-# Add a sources.list entry with a given key (Debian Jessie)
-#
-required_features:
- - apt
- - lsb_release
-cloud_config: |
- #cloud-config
- apt:
- sources:
- source1:
- source: "deb http://ppa.launchpad.net/cloud-init-dev/test-archive/ubuntu $RELEASE main"
- key: |
- -----BEGIN PGP PUBLIC KEY BLOCK-----
- Version: SKS 1.1.6
- Comment: Hostname: keyserver.ubuntu.com
-
- mQINBFbZRUIBEAC+A0PIKYBP9kLC4hQtRrffRS11uLo8/BdtmOdrlW0hpPHzCfKnjR3tvSEI
- lqPHG1QrrjAXKZDnZMRz+h/px7lUztvytGzHPSJd5ARUzAyjyRezUhoJ3VSCxrPqx62avuWf
- RfoJaIeHfDehL5/dTVkyiWxfVZ369ZX6JN2AgLsQTeybTQ75+2z0xPrrhnGmgh6g0qTYcAaq
- M5ONOGiqeSBX/Smjh6ALy5XkhUiFGLsI7Yluf6XSICY/x7gd6RAfgSIQrUTNMoS1sqhT4aot
- +xvOfQy8ySkfAK4NddXql6E/+ZqTmBY/Lr0YklFBy8jGT+UysfiIznPMIwbmgq5Li7BtDDtX
- b8Uyi4edPpjtextezfXYn4NVIpPL5dPZS/FXh4HpzyH0pYCfrH4QDGA7i52AGmhpiOFjJMo6
- N33sdjZHOH/2Vyp+QZaQnsdUAi1N4M6c33tQbpIScn1SY+El8z5JDA4PBzkw8HpLCi1gGoa6
- V4kfbWqXXbGAJFkLkP/vc4+pY9axOlmCkJg7xCPwhI75y1cONgovhz+BEXOzolh5KZuGbGbj
- xe0wva5DLBeIg7EQFf+99pOS7Syby3Xpm6ZbswEFV0cllK4jf/QMjtfInxobuMoI0GV0bE5l
- WlRtPCK5FnbHwxi0wPNzB/5fwzJ77r6HgPrR0OkT0lWmbUyoOQARAQABtC1MYXVuY2hwYWQg
- UFBBIGZvciBjbG91ZCBpbml0IGRldmVsb3BtZW50IHRlYW2JAjgEEwECACIFAlbZRUICGwMG
- CwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAAAoJEAg9Bvvk0wTfHfcP/REK5N2s1JYc69qEa9ZN
- o6oi+A7l6AYw+ZY88O5TJe7F9otv5VXCIKSUT0Vsepjgf0mtXAgf/sb2lsJn/jp7tzgov3YH
- vSrkTkRydz8xcA87gwQKePuvTLxQpftF4flrBxgSueIn5O/tPrBOxLz7EVYBc78SKg9aj9L2
- yUp+YuNevlwfZCTYeBb9r3FHaab2HcgkwqYch66+nKYfwiLuQ9NzXXm0Wn0JcEQ6pWvJscbj
- C9BdawWovfvMK5/YLfI6Btm7F4mIpQBdhSOUp/YXKmdvHpmwxMCN2QhqYK49SM7qE9aUDbJL
- arppSEBtlCLWhRBZYLTUna+BkuQ1bHz4St++XTR49Qd7vDERALpApDjB2dxPfMiBzCMwQQyq
- uy13exU8o2ETLg+dZSLfDTzrBNsBFmXlw8WW17nTISYdKeGKL+QdlUjpzdwUMMzHhAO8SmMH
- zjeSlDSRMXBJFAFSbCl7EwmMKa3yVX0zInT91fNllZ3iatAmtVdqVH/BFQfTIMH2ET7A8WzJ
- ZzVSuMRhqoKdr5AMcHuJGPUoVkVJHQA+NNvEiXSysF3faL7jmKapmUwrhpYYX2H8pf+VMu2e
- cLflKTI28dl+ZQ4Pl/aVsxrti/pzhdYy05Sn5ddtySyIkvo8L1cU5MWpbvSlFPkTstBUDLBf
- pb0uBy+g0oxJQg15
- =uy53
- -----END PGP PUBLIC KEY BLOCK-----
-collect_scripts:
- sources.list: |
- #!/bin/bash
- cat /etc/apt/sources.list.d/source1.list
- apt_key_list: |
- #!/bin/bash
- apt-key finger
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/apt_configure_sources_keyserver.py b/tests/cloud_tests/testcases/modules/apt_configure_sources_keyserver.py
deleted file mode 100644
index ddc86174..00000000
--- a/tests/cloud_tests/testcases/modules/apt_configure_sources_keyserver.py
+++ /dev/null
@@ -1,23 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestAptconfigureSourcesKeyserver(base.CloudTestCase):
- """Test apt-configure module."""
-
- def test_apt_key_list(self):
- """Test specific key added."""
- out = self.get_data_file('apt_key_list')
- self.assertIn(
- '1FF0 D853 5EF7 E719 E5C8 1B9C 083D 06FB E4D3 04DF', out)
- self.assertIn('Launchpad PPA for cloud init development team', out)
-
- def test_source_list(self):
- """Test source.list updated."""
- out = self.get_data_file('sources.list')
- self.assertIn(
- 'http://ppa.launchpad.net/cloud-init-dev/test-archive/ubuntu', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/apt_configure_sources_keyserver.yaml b/tests/cloud_tests/testcases/modules/apt_configure_sources_keyserver.yaml
deleted file mode 100644
index 25088135..00000000
--- a/tests/cloud_tests/testcases/modules/apt_configure_sources_keyserver.yaml
+++ /dev/null
@@ -1,23 +0,0 @@
-#
-# Add a sources.list entry with a key from a keyserver
-#
-required_features:
- - apt
- - lsb_release
-cloud_config: |
- #cloud-config
- apt:
- sources:
- source1:
- keyid: 1FF0D8535EF7E719E5C81B9C083D06FBE4D304DF
- keyserver: keyserver.ubuntu.com
- source: "deb http://ppa.launchpad.net/cloud-init-dev/test-archive/ubuntu $RELEASE main"
-collect_scripts:
- sources.list: |
- #!/bin/bash
- cat /etc/apt/sources.list.d/source1.list
- apt_key_list: |
- #!/bin/bash
- apt-key finger
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/apt_configure_sources_list.py b/tests/cloud_tests/testcases/modules/apt_configure_sources_list.py
deleted file mode 100644
index cf84e056..00000000
--- a/tests/cloud_tests/testcases/modules/apt_configure_sources_list.py
+++ /dev/null
@@ -1,31 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestAptconfigureSourcesList(base.CloudTestCase):
- """Test apt-configure module."""
-
- def test_sources_list(self):
- """Test sources.list includes sources."""
- out = self.get_data_file('sources.list')
-
- # Verify we have 6 entires
- self.assertEqual(6, len(out.rstrip().split('\n')))
-
- # Verify the keys generated the list correctly
- self.assertRegex(out, r'deb http:\/\/archive.ubuntu.com\/ubuntu '
- '[a-z].* main restricted')
- self.assertRegex(out, r'deb-src http:\/\/archive.ubuntu.com\/ubuntu '
- '[a-z].* main restricted')
- self.assertRegex(out, r'deb http:\/\/archive.ubuntu.com\/ubuntu '
- '[a-z].* universe restricted')
- self.assertRegex(out, r'deb-src http:\/\/archive.ubuntu.com\/ubuntu '
- '[a-z].* universe restricted')
- self.assertRegex(out, r'deb http:\/\/security.ubuntu.com\/ubuntu '
- '[a-z].*security multiverse')
- self.assertRegex(out, r'deb-src http:\/\/security.ubuntu.com\/ubuntu '
- '[a-z].*security multiverse')
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/apt_configure_sources_list.yaml b/tests/cloud_tests/testcases/modules/apt_configure_sources_list.yaml
deleted file mode 100644
index 87e470c1..00000000
--- a/tests/cloud_tests/testcases/modules/apt_configure_sources_list.yaml
+++ /dev/null
@@ -1,28 +0,0 @@
-#
-# Generate a sources.list
-#
-required_features:
- - apt
- - lsb_release
-cloud_config: |
- #cloud-config
- apt:
- primary:
- - arches: [default]
- uri: http://archive.ubuntu.com/ubuntu
- security:
- - arches: [default]
- uri: http://security.ubuntu.com/ubuntu
- sources_list: |
- deb $MIRROR $RELEASE main restricted
- deb-src $MIRROR $RELEASE main restricted
- deb $PRIMARY $RELEASE universe restricted
- deb-src $PRIMARY $RELEASE universe restricted
- deb $SECURITY $RELEASE-security multiverse
- deb-src $SECURITY $RELEASE-security multiverse
-collect_scripts:
- sources.list: |
- #/bin/bash
- cat /etc/apt/sources.list
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/apt_configure_sources_ppa.py b/tests/cloud_tests/testcases/modules/apt_configure_sources_ppa.py
deleted file mode 100644
index dfbdeadf..00000000
--- a/tests/cloud_tests/testcases/modules/apt_configure_sources_ppa.py
+++ /dev/null
@@ -1,23 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestAptconfigureSourcesPPA(base.CloudTestCase):
- """Test apt-configure module."""
-
- def test_ppa(self):
- """Test specific ppa added."""
- out = self.get_data_file('sources.list')
- self.assertIn(
- 'http://ppa.launchpad.net/cloud-init-dev/test-archive/ubuntu', out)
-
- def test_ppa_key(self):
- """Test ppa key added."""
- out = self.get_data_file('apt-key')
- self.assertIn(
- '1FF0 D853 5EF7 E719 E5C8 1B9C 083D 06FB E4D3 04DF', out)
- self.assertIn('Launchpad PPA for cloud init development team', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/apt_configure_sources_ppa.yaml b/tests/cloud_tests/testcases/modules/apt_configure_sources_ppa.yaml
deleted file mode 100644
index b997bcfb..00000000
--- a/tests/cloud_tests/testcases/modules/apt_configure_sources_ppa.yaml
+++ /dev/null
@@ -1,29 +0,0 @@
-#
-# Add a PPA to source.list
-#
-# NOTE: on older ubuntu releases the sources file added is named
-# 'cloud-init-dev-test-archive-trusty', without 'ubuntu' in the middle
-required_features:
- - apt
- - ppa
- - ppa_file_name
-cloud_config: |
- #cloud-config
- apt:
- sources:
- source1:
- keyid: 0165013E
- keyserver: keyserver.ubuntu.com
- source: "ppa:cloud-init-dev/test-archive"
-collect_scripts:
- sources.list: |
- #!/bin/bash
- cat /etc/apt/sources.list.d/cloud-init-dev-ubuntu-test-archive-*.list
- apt-key: |
- #!/bin/bash
- apt-key finger
- sources_full: |
- #!/bin/bash
- cat /etc/apt/sources.list
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/apt_pipelining_disable.py b/tests/cloud_tests/testcases/modules/apt_pipelining_disable.py
deleted file mode 100644
index c98eedef..00000000
--- a/tests/cloud_tests/testcases/modules/apt_pipelining_disable.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestAptPipeliningDisable(base.CloudTestCase):
- """Test apt-pipelining module."""
-
- def test_disable_pipelining(self):
- """Test pipelining disabled."""
- out = self.get_data_file('90cloud-init-pipelining')
- self.assertIn('Acquire::http::Pipeline-Depth "0";', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/apt_pipelining_disable.yaml b/tests/cloud_tests/testcases/modules/apt_pipelining_disable.yaml
deleted file mode 100644
index 22a31dc4..00000000
--- a/tests/cloud_tests/testcases/modules/apt_pipelining_disable.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
-#
-# Disable apt pipelining value
-#
-required_features:
- - apt
-cloud_config: |
- #cloud-config
- apt_pipelining: false
-collect_scripts:
- 90cloud-init-pipelining: |
- #!/bin/bash
- cat /etc/apt/apt.conf.d/90cloud-init-pipelining
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/apt_pipelining_os.py b/tests/cloud_tests/testcases/modules/apt_pipelining_os.py
deleted file mode 100644
index 2b940a66..00000000
--- a/tests/cloud_tests/testcases/modules/apt_pipelining_os.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestAptPipeliningOS(base.CloudTestCase):
- """Test apt-pipelining module."""
-
- def test_os_pipelining(self):
- """test 'os' settings does not write apt config file."""
- out = self.get_data_file('90cloud-init-pipelining_not_written')
- self.assertEqual(0, int(out))
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/apt_pipelining_os.yaml b/tests/cloud_tests/testcases/modules/apt_pipelining_os.yaml
deleted file mode 100644
index 86d5220b..00000000
--- a/tests/cloud_tests/testcases/modules/apt_pipelining_os.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
-#
-# Set apt pipelining value to OS, no conf written
-#
-required_features:
- - apt
-cloud_config: |
- #cloud-config
- apt_pipelining: os
-collect_scripts:
- 90cloud-init-pipelining_not_written: |
- #!/bin/bash
- ls /etc/apt/apt.conf.d/90cloud-init-pipelining | wc -l
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/bootcmd.py b/tests/cloud_tests/testcases/modules/bootcmd.py
deleted file mode 100644
index f5b86b03..00000000
--- a/tests/cloud_tests/testcases/modules/bootcmd.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestBootCmd(base.CloudTestCase):
- """Test bootcmd module."""
-
- def test_bootcmd_host(self):
- """Test boot cmd worked."""
- out = self.get_data_file('hosts')
- self.assertIn('192.168.1.130 us.archive.ubuntu.com', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/bootcmd.yaml b/tests/cloud_tests/testcases/modules/bootcmd.yaml
deleted file mode 100644
index 3a73994e..00000000
--- a/tests/cloud_tests/testcases/modules/bootcmd.yaml
+++ /dev/null
@@ -1,13 +0,0 @@
-#
-# Early boot command
-#
-cloud_config: |
- #cloud-config
- bootcmd:
- - echo 192.168.1.130 us.archive.ubuntu.com > /etc/hosts
-collect_scripts:
- hosts: |
- #!/bin/bash
- cat /etc/hosts
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/byobu.py b/tests/cloud_tests/testcases/modules/byobu.py
deleted file mode 100644
index 74d0529a..00000000
--- a/tests/cloud_tests/testcases/modules/byobu.py
+++ /dev/null
@@ -1,24 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestByobu(base.CloudTestCase):
- """Test Byobu module."""
-
- def test_byobu_installed(self):
- """Test byobu installed."""
- self.assertPackageInstalled('byobu')
-
- def test_byobu_profile_enabled(self):
- """Test byobu profile.d file exists."""
- out = self.get_data_file('byobu_profile_enabled')
- self.assertIn('/etc/profile.d/Z97-byobu.sh', out)
-
- def test_byobu_launch_exists(self):
- """Test byobu-launch exists."""
- out = self.get_data_file('byobu_launch_exists')
- self.assertIn('/usr/bin/byobu-launch', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/byobu.yaml b/tests/cloud_tests/testcases/modules/byobu.yaml
deleted file mode 100644
index d002a611..00000000
--- a/tests/cloud_tests/testcases/modules/byobu.yaml
+++ /dev/null
@@ -1,17 +0,0 @@
-#
-# Install and enable byobu system wide and default user
-#
-required_features:
- - byobu
-cloud_config: |
- #cloud-config
- byobu_by_default: enable
-collect_scripts:
- byobu_profile_enabled: |
- #!/bin/bash
- ls /etc/profile.d/Z97-byobu.sh
- byobu_launch_exists: |
- #!/bin/bash
- which /usr/bin/byobu-launch
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/ca_certs.py b/tests/cloud_tests/testcases/modules/ca_certs.py
deleted file mode 100644
index 6b56f639..00000000
--- a/tests/cloud_tests/testcases/modules/ca_certs.py
+++ /dev/null
@@ -1,33 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestCaCerts(base.CloudTestCase):
- """Test ca certs module."""
-
- def test_certs_updated(self):
- """Test certs have been updated in /etc/ssl/certs."""
- out = self.get_data_file('cert_links')
- # Bionic update-ca-certificates creates less links debian #895075
- unlinked_files = []
- links = {}
- for cert_line in out.splitlines():
- if '->' in cert_line:
- fname, _sep, link = cert_line.split()
- links[fname] = link
- else:
- unlinked_files.append(cert_line)
- self.assertEqual(['ca-certificates.crt'], unlinked_files)
- self.assertEqual('cloud-init-ca-certs.pem', links['a535c1f3.0'])
- self.assertEqual(
- '/usr/share/ca-certificates/cloud-init-ca-certs.crt',
- links['cloud-init-ca-certs.pem'])
-
- def test_cert_installed(self):
- """Test line from our cert exists."""
- out = self.get_data_file('cert')
- self.assertIn('a36c744454555024e7f82edc420fd2c8', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/ca_certs.yaml b/tests/cloud_tests/testcases/modules/ca_certs.yaml
deleted file mode 100644
index 2cd91551..00000000
--- a/tests/cloud_tests/testcases/modules/ca_certs.yaml
+++ /dev/null
@@ -1,56 +0,0 @@
-#
-# Remove existing ca_certs and install custom ca-cert
-#
-cloud_config: |
- #cloud-config
- ca-certs:
- remove-defaults: true
- trusted:
- - |
- -----BEGIN CERTIFICATE-----
- MIIGJzCCBA+gAwIBAgIBATANBgkqhkiG9w0BAQUFADCBsjELMAkGA1UEBhMCRlIx
- DzANBgNVBAgMBkFsc2FjZTETMBEGA1UEBwwKU3RyYXNib3VyZzEYMBYGA1UECgwP
- d3d3LmZyZWVsYW4ub3JnMRAwDgYDVQQLDAdmcmVlbGFuMS0wKwYDVQQDDCRGcmVl
- bGFuIFNhbXBsZSBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkxIjAgBgkqhkiG9w0BCQEW
- E2NvbnRhY3RAZnJlZWxhbi5vcmcwHhcNMTIwNDI3MTAzMTE4WhcNMjIwNDI1MTAz
- MTE4WjB+MQswCQYDVQQGEwJGUjEPMA0GA1UECAwGQWxzYWNlMRgwFgYDVQQKDA93
- d3cuZnJlZWxhbi5vcmcxEDAOBgNVBAsMB2ZyZWVsYW4xDjAMBgNVBAMMBWFsaWNl
- MSIwIAYJKoZIhvcNAQkBFhNjb250YWN0QGZyZWVsYW4ub3JnMIICIjANBgkqhkiG
- 9w0BAQEFAAOCAg8AMIICCgKCAgEA3W29+ID6194bH6ejLrIC4hb2Ugo8v6ZC+Mrc
- k2dNYMNPjcOKABvxxEtBamnSaeU/IY7FC/giN622LEtV/3oDcrua0+yWuVafyxmZ
- yTKUb4/GUgafRQPf/eiX9urWurtIK7XgNGFNUjYPq4dSJQPPhwCHE/LKAykWnZBX
- RrX0Dq4XyApNku0IpjIjEXH+8ixE12wH8wt7DEvdO7T3N3CfUbaITl1qBX+Nm2Z6
- q4Ag/u5rl8NJfXg71ZmXA3XOj7zFvpyapRIZcPmkvZYn7SMCp8dXyXHPdpSiIWL2
- uB3KiO4JrUYvt2GzLBUThp+lNSZaZ/Q3yOaAAUkOx+1h08285Pi+P8lO+H2Xic4S
- vMq1xtLg2bNoPC5KnbRfuFPuUD2/3dSiiragJ6uYDLOyWJDivKGt/72OVTEPAL9o
- 6T2pGZrwbQuiFGrGTMZOvWMSpQtNl+tCCXlT4mWqJDRwuMGrI4DnnGzt3IKqNwS4
- Qyo9KqjMIPwnXZAmWPm3FOKe4sFwc5fpawKO01JZewDsYTDxVj+cwXwFxbE2yBiF
- z2FAHwfopwaH35p3C6lkcgP2k/zgAlnBluzACUI+MKJ/G0gv/uAhj1OHJQ3L6kn1
- SpvQ41/ueBjlunExqQSYD7GtZ1Kg8uOcq2r+WISE3Qc9MpQFFkUVllmgWGwYDuN3
- Zsez95kCAwEAAaN7MHkwCQYDVR0TBAIwADAsBglghkgBhvhCAQ0EHxYdT3BlblNT
- TCBHZW5lcmF0ZWQgQ2VydGlmaWNhdGUwHQYDVR0OBBYEFFlfyRO6G8y5qEFKikl5
- ajb2fT7XMB8GA1UdIwQYMBaAFCNsLT0+KV14uGw+quK7Lh5sh/JTMA0GCSqGSIb3
- DQEBBQUAA4ICAQAT5wJFPqervbja5+90iKxi1d0QVtVGB+z6aoAMuWK+qgi0vgvr
- mu9ot2lvTSCSnRhjeiP0SIdqFMORmBtOCFk/kYDp9M/91b+vS+S9eAlxrNCB5VOf
- PqxEPp/wv1rBcE4GBO/c6HcFon3F+oBYCsUQbZDKSSZxhDm3mj7pb67FNbZbJIzJ
- 70HDsRe2O04oiTx+h6g6pW3cOQMgIAvFgKN5Ex727K4230B0NIdGkzuj4KSML0NM
- slSAcXZ41OoSKNjy44BVEZv0ZdxTDrRM4EwJtNyggFzmtTuV02nkUj1bYYYC5f0L
- ADr6s0XMyaNk8twlWYlYDZ5uKDpVRVBfiGcq0uJIzIvemhuTrofh8pBQQNkPRDFT
- Rq1iTo1Ihhl3/Fl1kXk1WR3jTjNb4jHX7lIoXwpwp767HAPKGhjQ9cFbnHMEtkro
- RlJYdtRq5mccDtwT0GFyoJLLBZdHHMHJz0F9H7FNk2tTQQMhK5MVYwg+LIaee586
- CQVqfbscp7evlgjLW98H+5zylRHAgoH2G79aHljNKMp9BOuq6SnEglEsiWGVtu2l
- hnx8SB3sVJZHeer8f/UQQwqbAO+Kdy70NmbSaqaVtp8jOxLiidWkwSyRTsuU6D8i
- DiH5uEqBXExjrj0FslxcVKdVj5glVcSmkLwZKbEU1OKwleT/iXFhvooWhQ==
- -----END CERTIFICATE-----
-collect_scripts:
- cert_links: |
- #!/bin/bash
- # links printed <filename> -> <link target>
- # non-links printed <filename>
- for file in `ls /etc/ssl/certs`; do
- [ -h /etc/ssl/certs/$file ] && echo -n $file ' -> ' && readlink /etc/ssl/certs/$file || echo $file;
- done
- cert: |
- #!/bin/bash
- md5sum /etc/ssl/certs/ca-certificates.crt
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/debug_disable.py b/tests/cloud_tests/testcases/modules/debug_disable.py
deleted file mode 100644
index e40e4b89..00000000
--- a/tests/cloud_tests/testcases/modules/debug_disable.py
+++ /dev/null
@@ -1,16 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestDebugDisable(base.CloudTestCase):
- """Disable debug messages."""
-
- def test_debug_disable(self):
- """Test verbose output missing from logs."""
- out = self.get_data_file('cloud-init.log')
- self.assertNotIn(
- out, r'Skipping module named [a-z].* verbose printing disabled')
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/debug_disable.yaml b/tests/cloud_tests/testcases/modules/debug_disable.yaml
deleted file mode 100644
index 63218b18..00000000
--- a/tests/cloud_tests/testcases/modules/debug_disable.yaml
+++ /dev/null
@@ -1,9 +0,0 @@
-#
-# Do not run in debug mode
-#
-cloud_config: |
- #cloud-config
- debug:
- verbose: False
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/debug_enable.py b/tests/cloud_tests/testcases/modules/debug_enable.py
deleted file mode 100644
index 28d26062..00000000
--- a/tests/cloud_tests/testcases/modules/debug_enable.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestDebugEnable(base.CloudTestCase):
- """Test debug messages."""
-
- def test_debug_enable(self):
- """Test debug messages in cloud-init log."""
- out = self.get_data_file('cloud-init.log')
- self.assertIn('[DEBUG]', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/debug_enable.yaml b/tests/cloud_tests/testcases/modules/debug_enable.yaml
deleted file mode 100644
index d44147db..00000000
--- a/tests/cloud_tests/testcases/modules/debug_enable.yaml
+++ /dev/null
@@ -1,9 +0,0 @@
-#
-# Run in debug mode
-#
-cloud_config: |
- #cloud-config
- debug:
- verbose: True
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/final_message.py b/tests/cloud_tests/testcases/modules/final_message.py
deleted file mode 100644
index b7b5d5e0..00000000
--- a/tests/cloud_tests/testcases/modules/final_message.py
+++ /dev/null
@@ -1,40 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestFinalMessage(base.CloudTestCase):
- """Test cloud init module `cc_final_message`."""
-
- subs_char = '$'
-
- def get_final_message_config(self):
- """Get config for final message."""
- self.assertIn('final_message', self.cloud_config)
- return self.cloud_config['final_message']
-
- def get_final_message(self):
- """Get final message from log."""
- out = self.get_data_file('cloud-init-output.log')
- lines = len(self.get_final_message_config().splitlines())
- return '\n'.join(out.splitlines()[-1 * lines:])
-
- def test_final_message_string(self):
- """Ensure final handles regular strings."""
- for actual, config in zip(
- self.get_final_message().splitlines(),
- self.get_final_message_config().splitlines()):
- if self.subs_char not in config:
- self.assertEqual(actual, config)
-
- def test_final_message_subs(self):
- """Test variable substitution in final message."""
- # TODO: add verification of other substitutions
- patterns = {'$datasource': self.get_datasource()}
- for key, expected in patterns.items():
- index = self.get_final_message_config().splitlines().index(key)
- actual = self.get_final_message().splitlines()[index]
- self.assertEqual(actual, expected)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/final_message.yaml b/tests/cloud_tests/testcases/modules/final_message.yaml
deleted file mode 100644
index c9ed6118..00000000
--- a/tests/cloud_tests/testcases/modules/final_message.yaml
+++ /dev/null
@@ -1,13 +0,0 @@
-#
-# Print a final message with various predefined variables
-#
-cloud_config: |
- #cloud-config
- final_message: |
- This is my final message!
- $version
- $timestamp
- $datasource
- $uptime
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/keys_to_console.py b/tests/cloud_tests/testcases/modules/keys_to_console.py
deleted file mode 100644
index 07f38112..00000000
--- a/tests/cloud_tests/testcases/modules/keys_to_console.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestKeysToConsole(base.CloudTestCase):
- """Test proper keys are included and excluded to console."""
-
- def test_excluded_keys(self):
- """Test excluded keys missing."""
- out = self.get_data_file('syslog')
- self.assertNotIn('(DSA)', out)
- self.assertNotIn('(ECDSA)', out)
-
- def test_expected_keys(self):
- """Test expected keys exist."""
- out = self.get_data_file('syslog')
- self.assertIn('(ED25519)', out)
- self.assertIn('(RSA)', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/keys_to_console.yaml b/tests/cloud_tests/testcases/modules/keys_to_console.yaml
deleted file mode 100644
index 5d86e739..00000000
--- a/tests/cloud_tests/testcases/modules/keys_to_console.yaml
+++ /dev/null
@@ -1,15 +0,0 @@
-#
-# Hide printing of ssh key and fingerprints for specific keys
-#
-required_features:
- - syslog
-cloud_config: |
- #cloud-config
- ssh_fp_console_blacklist: [ssh-dss, ssh-dsa, ecdsa-sha2-nistp256]
- ssh_key_console_blacklist: [ssh-dss, ssh-dsa, ecdsa-sha2-nistp256]
-collect_scripts:
- syslog: |
- #!/bin/bash
- cat /var/log/syslog
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/landscape.yaml b/tests/cloud_tests/testcases/modules/landscape.yaml
deleted file mode 100644
index ed2c37c4..00000000
--- a/tests/cloud_tests/testcases/modules/landscape.yaml
+++ /dev/null
@@ -1,28 +0,0 @@
-#
-# Setup landscape client settings
-#
-# 2016-11-17: Disabled due to this not working
-#
-enabled: false
-required_features:
- - landscape
-cloud_config: |
- #cloud-conifg
- landscape:
- client:
- log_level: "info"
- url: "https://landscape.canonical.com/message-system"
- ping_url: "http://landscape.canonical.com/ping"
- data_path: "/var/lib/landscape/client"
- http_proxy: "http://my.proxy.com/foobar"
- https_proxy: "https://my.proxy.com/foobar"
- tags: "server,cloud"
- computer_title: "footitle"
- registration_key: "fookey"
- account_name: "fooaccount"
-collect_scripts:
- client.conf: |
- #!/bin/bash
- cat /etc/landscape/client.conf
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/locale.py b/tests/cloud_tests/testcases/modules/locale.py
deleted file mode 100644
index cb9e1dce..00000000
--- a/tests/cloud_tests/testcases/modules/locale.py
+++ /dev/null
@@ -1,30 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-from cloudinit import util
-
-
-class TestLocale(base.CloudTestCase):
- """Test locale is set properly."""
-
- def test_locale(self):
- """Test locale is set properly."""
- data = util.load_shell_content(self.get_data_file('locale_default'))
- self.assertIn("LANG", data)
- self.assertEqual('en_GB.UTF-8', data['LANG'])
-
- def test_locale_a(self):
- """Test locale -a has both options."""
- out = self.get_data_file('locale_a')
- self.assertIn('en_GB.utf8', out)
- self.assertIn('en_US.utf8', out)
-
- def test_locale_gen(self):
- """Test local.gen file has all entries."""
- out = self.get_data_file('locale_gen')
- self.assertIn('en_GB.UTF-8', out)
- self.assertIn('en_US.UTF-8', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/locale.yaml b/tests/cloud_tests/testcases/modules/locale.yaml
deleted file mode 100644
index e01518a1..00000000
--- a/tests/cloud_tests/testcases/modules/locale.yaml
+++ /dev/null
@@ -1,22 +0,0 @@
-#
-# Set locale to non-default option and verify
-#
-required_features:
- - engb_locale
- - locale_gen
-cloud_config: |
- #cloud-config
- locale: en_GB.UTF-8
- locale_configfile: /etc/default/locale
-collect_scripts:
- locale_default: |
- #!/bin/bash
- cat /etc/default/locale
- locale_a: |
- #!/bin/bash
- locale -a
- locale_gen: |
- #!/bin/bash
- cat /etc/locale.gen | grep -v '^#' | uniq
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/lxd_bridge.py b/tests/cloud_tests/testcases/modules/lxd_bridge.py
deleted file mode 100644
index ea545e0a..00000000
--- a/tests/cloud_tests/testcases/modules/lxd_bridge.py
+++ /dev/null
@@ -1,36 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestLxdBridge(base.CloudTestCase):
- """Test LXD module."""
-
- @classmethod
- def maybeSkipTest(cls):
- """Skip on cosmic for two reasons:
- a.) LP: #1795036 - 'lxd init' fails on cosmic kernel.
- b.) apt install lxd installs via snap which can be slow
- as that will download core snap and lxd."""
- os_name = cls.data.get('os_name', 'UNKNOWN')
- if os_name == "cosmic":
- raise base.SkipTest('Skipping test on cosmic (LP: #1795036).')
-
- def test_lxd(self):
- """Test lxd installed."""
- out = self.get_data_file('lxd')
- self.assertIn('/lxd', out)
-
- def test_lxc(self):
- """Test lxc installed."""
- out = self.get_data_file('lxc')
- self.assertIn('/lxc', out)
-
- def test_bridge(self):
- """Test bridge config."""
- out = self.get_data_file('lxc-bridge')
- self.assertIn('lxdbr0', out)
- self.assertIn('10.100.100.1/24', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/lxd_bridge.yaml b/tests/cloud_tests/testcases/modules/lxd_bridge.yaml
deleted file mode 100644
index e6b7e76a..00000000
--- a/tests/cloud_tests/testcases/modules/lxd_bridge.yaml
+++ /dev/null
@@ -1,32 +0,0 @@
-#
-# LXD configured with directory backend and IPv4 bridge
-#
-required_features:
- - lxd
-cloud_config: |
- #cloud-config
- lxd:
- init:
- storage_backend: dir
- bridge:
- mode: new
- name: lxdbr0
- ipv4_address: 10.100.100.1
- ipv4_netmask: 24
- ipv4_dhcp_first: 10.100.100.100
- ipv4_dhcp_last: 10.100.100.200
- ipv4_nat: true
- domain: lxd
-collect_scripts:
- lxc: |
- #!/bin/bash
- which lxc
- lxd: |
- #!/bin/bash
- which lxd
- lxc-bridge: |
- #!/bin/bash
- ip addr show lxdbr0
- cat /etc/default/lxd-bridge 2>/dev/null | grep -v ^# | sort -u
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/lxd_dir.py b/tests/cloud_tests/testcases/modules/lxd_dir.py
deleted file mode 100644
index 797bafed..00000000
--- a/tests/cloud_tests/testcases/modules/lxd_dir.py
+++ /dev/null
@@ -1,30 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestLxdDir(base.CloudTestCase):
- """Test LXD module."""
-
- @classmethod
- def maybeSkipTest(cls):
- """Skip on cosmic for two reasons:
- a.) LP: #1795036 - 'lxd init' fails on cosmic kernel.
- b.) apt install lxd installs via snap which can be slow
- as that will download core snap and lxd."""
- os_name = cls.data.get('os_name', 'UNKNOWN')
- if os_name == "cosmic":
- raise base.SkipTest('Skipping test on cosmic (LP: #1795036).')
-
- def test_lxd(self):
- """Test lxd installed."""
- out = self.get_data_file('lxd')
- self.assertIn('/lxd', out)
-
- def test_lxc(self):
- """Test lxc installed."""
- out = self.get_data_file('lxc')
- self.assertIn('/lxc', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/lxd_dir.yaml b/tests/cloud_tests/testcases/modules/lxd_dir.yaml
deleted file mode 100644
index f93a3fa7..00000000
--- a/tests/cloud_tests/testcases/modules/lxd_dir.yaml
+++ /dev/null
@@ -1,19 +0,0 @@
-#
-# LXD configured with directory backend
-#
-required_features:
- - lxd
-cloud_config: |
- #cloud-config
- lxd:
- init:
- storage_backend: dir
-collect_scripts:
- lxc: |
- #!/bin/bash
- which lxc
- lxd: |
- #!/bin/bash
- which lxd
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/ntp.py b/tests/cloud_tests/testcases/modules/ntp.py
deleted file mode 100644
index c63cc15e..00000000
--- a/tests/cloud_tests/testcases/modules/ntp.py
+++ /dev/null
@@ -1,24 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestNtp(base.CloudTestCase):
- """Test ntp module"""
-
- def test_ntp_installed(self):
- """Test ntp installed"""
- self.assertPackageInstalled('ntp')
-
- def test_ntp_dist_entries(self):
- """Test dist config file is empty"""
- out = self.get_data_file('ntp_conf_dist_empty')
- self.assertEqual(0, int(out))
-
- def test_ntp_entries(self):
- """Test config entries"""
- out = self.get_data_file('ntp_conf_pool_list')
- self.assertIn('pool.ntp.org iburst', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/ntp.yaml b/tests/cloud_tests/testcases/modules/ntp.yaml
deleted file mode 100644
index 7ea0707d..00000000
--- a/tests/cloud_tests/testcases/modules/ntp.yaml
+++ /dev/null
@@ -1,22 +0,0 @@
-#
-# Emtpy NTP config to setup using defaults
-#
-cloud_config: |
- #cloud-config
- ntp:
- ntp_client: ntp
- pools: []
- servers: []
-collect_scripts:
- ntp_installed: |
- #!/bin/bash
- ntpd --version > /dev/null 2>&1
- echo $?
- ntp_conf_dist_empty: |
- #!/bin/bash
- ls /etc/ntp.conf.dist | wc -l
- ntp_conf_pool_list: |
- #!/bin/bash
- grep 'pool.ntp.org' /etc/ntp.conf | grep -v ^#
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/ntp_chrony.py b/tests/cloud_tests/testcases/modules/ntp_chrony.py
deleted file mode 100644
index 7d341773..00000000
--- a/tests/cloud_tests/testcases/modules/ntp_chrony.py
+++ /dev/null
@@ -1,26 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-import unittest
-
-from tests.cloud_tests.testcases import base
-
-
-class TestNtpChrony(base.CloudTestCase):
- """Test ntp module with chrony client"""
-
- def setUp(self):
- """Skip this suite of tests on lxd and artful or older."""
- if self.platform == 'lxd':
- if self.is_distro('ubuntu') and self.os_version_cmp('artful') <= 0:
- raise unittest.SkipTest(
- 'No support for chrony on containers <= artful.'
- ' LP: #1589780')
- return super(TestNtpChrony, self).setUp()
-
- def test_chrony_entries(self):
- """Test chrony config entries"""
- out = self.get_data_file('chrony_conf')
- self.assertIn('.pool.ntp.org', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/ntp_chrony.yaml b/tests/cloud_tests/testcases/modules/ntp_chrony.yaml
deleted file mode 100644
index 120735e2..00000000
--- a/tests/cloud_tests/testcases/modules/ntp_chrony.yaml
+++ /dev/null
@@ -1,17 +0,0 @@
-#
-# ntp enabled, chrony selected, check conf file
-# as chrony won't start in a container
-#
-cloud_config: |
- #cloud-config
- ntp:
- enabled: true
- ntp_client: chrony
-collect_scripts:
- chrony_conf: |
- #!/bin/sh
- set -- /etc/chrony.conf /etc/chrony/chrony.conf
- for p in "$@"; do
- [ -e "$p" ] && { cat "$p"; exit; }
- done
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/ntp_pools.py b/tests/cloud_tests/testcases/modules/ntp_pools.py
deleted file mode 100644
index 152fd3f1..00000000
--- a/tests/cloud_tests/testcases/modules/ntp_pools.py
+++ /dev/null
@@ -1,34 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestNtpPools(base.CloudTestCase):
- """Test ntp module."""
-
- def test_ntp_installed(self):
- """Test ntp installed"""
- out = self.get_data_file('ntp_installed_pools')
- self.assertEqual(0, int(out))
-
- def test_ntp_dist_entries(self):
- """Test dist config file is empty"""
- out = self.get_data_file('ntp_conf_dist_pools')
- self.assertEqual(0, int(out))
-
- def test_ntp_entires(self):
- """Test config entries"""
- out = self.get_data_file('ntp_conf_pools')
- pools = self.cloud_config.get('ntp').get('pools')
- for pool in pools:
- self.assertIn('pool %s iburst' % pool, out)
-
- def test_ntpq_servers(self):
- """Test ntpq output has configured servers"""
- out = self.get_data_file('ntpq_servers')
- pools = self.cloud_config.get('ntp').get('pools')
- for pool in pools:
- self.assertIn(pool, out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/ntp_pools.yaml b/tests/cloud_tests/testcases/modules/ntp_pools.yaml
deleted file mode 100644
index 60fa0fd1..00000000
--- a/tests/cloud_tests/testcases/modules/ntp_pools.yaml
+++ /dev/null
@@ -1,32 +0,0 @@
-#
-# NTP config using specific pools
-#
-# NOTE: lsb_release listed here because with recent cloud-init deb with
-# (LP: 1628337) resolved, cloud-init will attempt to configure archives.
-# this fails without lsb_release as UNAVAILABLE is used for $RELEASE
-required_features:
- - lsb_release
-cloud_config: |
- #cloud-config
- ntp:
- ntp_client: ntp
- pools:
- - 0.cloud-init.mypool
- - 1.cloud-init.mypool
- - 172.16.15.14
-collect_scripts:
- ntp_installed_pools: |
- #!/bin/bash
- ntpd --version > /dev/null 2>&1
- echo $?
- ntp_conf_dist_pools: |
- #!/bin/bash
- ls /etc/ntp.conf.dist | wc -l
- ntp_conf_pools: |
- #!/bin/bash
- grep '^pool' /etc/ntp.conf
- ntpq_servers: |
- #!/bin/sh
- ntpq -p -w -n
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/ntp_servers.py b/tests/cloud_tests/testcases/modules/ntp_servers.py
deleted file mode 100644
index 8d2a68b3..00000000
--- a/tests/cloud_tests/testcases/modules/ntp_servers.py
+++ /dev/null
@@ -1,34 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script"""
-from tests.cloud_tests.testcases import base
-
-
-class TestNtpServers(base.CloudTestCase):
- """Test ntp module"""
-
- def test_ntp_installed(self):
- """Test ntp installed"""
- out = self.get_data_file('ntp_installed_servers')
- self.assertEqual(0, int(out))
-
- def test_ntp_dist_entries(self):
- """Test dist config file is empty"""
- out = self.get_data_file('ntp_conf_dist_servers')
- self.assertEqual(0, int(out))
-
- def test_ntp_entries(self):
- """Test config server entries"""
- out = self.get_data_file('ntp_conf_servers')
- servers = self.cloud_config.get('ntp').get('servers')
- for server in servers:
- self.assertIn('server %s iburst' % server, out)
-
- def test_ntpq_servers(self):
- """Test ntpq output has configured servers"""
- out = self.get_data_file('ntpq_servers')
- servers = self.cloud_config.get('ntp').get('servers')
- for server in servers:
- self.assertIn(server, out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/ntp_servers.yaml b/tests/cloud_tests/testcases/modules/ntp_servers.yaml
deleted file mode 100644
index ee636679..00000000
--- a/tests/cloud_tests/testcases/modules/ntp_servers.yaml
+++ /dev/null
@@ -1,28 +0,0 @@
-#
-# NTP config using specific servers
-#
-required_features:
- - lsb_release
-cloud_config: |
- #cloud-config
- ntp:
- ntp_client: ntp
- servers:
- - 172.16.15.14
- - 172.16.17.18
-collect_scripts:
- ntp_installed_servers: |
- #!/bin/sh
- ntpd --version > /dev/null 2>&1
- echo $?
- ntp_conf_dist_servers: |
- #!/bin/sh
- cat /etc/ntp.conf.dist | wc -l
- ntp_conf_servers: |
- #!/bin/sh
- grep '^server' /etc/ntp.conf
- ntpq_servers: |
- #!/bin/sh
- ntpq -p -w -n
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/ntp_timesyncd.py b/tests/cloud_tests/testcases/modules/ntp_timesyncd.py
deleted file mode 100644
index eca750bc..00000000
--- a/tests/cloud_tests/testcases/modules/ntp_timesyncd.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestNtpTimesyncd(base.CloudTestCase):
- """Test ntp module with systemd-timesyncd client"""
-
- def test_timesyncd_entries(self):
- """Test timesyncd config entries"""
- out = self.get_data_file('timesyncd_conf')
- self.assertIn('.pool.ntp.org', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/ntp_timesyncd.yaml b/tests/cloud_tests/testcases/modules/ntp_timesyncd.yaml
deleted file mode 100644
index ee47a741..00000000
--- a/tests/cloud_tests/testcases/modules/ntp_timesyncd.yaml
+++ /dev/null
@@ -1,15 +0,0 @@
-#
-# ntp enabled, systemd-timesyncd selected, check conf file
-# as systemd-timesyncd won't start in a container
-#
-cloud_config: |
- #cloud-config
- ntp:
- enabled: true
- ntp_client: systemd-timesyncd
-collect_scripts:
- timesyncd_conf: |
- #!/bin/sh
- cat /etc/systemd/timesyncd.conf.d/cloud-init.conf
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/package_update_upgrade_install.py b/tests/cloud_tests/testcases/modules/package_update_upgrade_install.py
deleted file mode 100644
index fecad768..00000000
--- a/tests/cloud_tests/testcases/modules/package_update_upgrade_install.py
+++ /dev/null
@@ -1,36 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestPackageInstallUpdateUpgrade(base.CloudTestCase):
- """Test package install update upgrade module."""
-
- def test_installed_sl(self):
- """Test sl got installed."""
- self.assertPackageInstalled('sl')
-
- def test_installed_tree(self):
- """Test tree got installed."""
- self.assertPackageInstalled('tree')
-
- def test_apt_history(self):
- """Test apt history for update command."""
- out = self.get_data_file('apt_history_cmdline')
- self.assertIn(
- 'Commandline: /usr/bin/apt-get --option=Dpkg::Options'
- '::=--force-confold --option=Dpkg::options::=--force-unsafe-io '
- '--assume-yes --quiet install sl tree', out)
-
- def test_cloud_init_output(self):
- """Test cloud-init-output for install & upgrade stuff."""
- out = self.get_data_file('cloud-init-output.log')
- self.assertIn('Setting up tree (', out)
- self.assertIn('Setting up sl (', out)
- self.assertIn('Reading package lists...', out)
- self.assertIn('Building dependency tree...', out)
- self.assertIn('Reading state information...', out)
- self.assertIn('Calculating upgrade...', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/package_update_upgrade_install.yaml b/tests/cloud_tests/testcases/modules/package_update_upgrade_install.yaml
deleted file mode 100644
index dd79e438..00000000
--- a/tests/cloud_tests/testcases/modules/package_update_upgrade_install.yaml
+++ /dev/null
@@ -1,30 +0,0 @@
-#
-# Update/upgrade via apt and then install a pair of packages
-#
-# NOTE: this should not require apt feature, use 'which' rather than 'dpkg -l'
-# NOTE: the testcase for this looks for the command in history.log as
-# /usr/bin/apt-get..., which is not how it always appears. it should
-# instead look for just apt-get...
-# NOTE: this testcase should not require 'apt_up_out', and should look for a
-# call to 'apt-get upgrade' or 'apt-get dist-upgrade' in cloud-init.log
-# rather than 'Calculating upgrade...' in output
-required_features:
- - apt
- - apt_hist_fmt
- - apt_up_out
-cloud_config: |
- #cloud-config
- packages:
- - sl
- - tree
- package_update: true
- package_upgrade: true
-collect_scripts:
- apt_history_cmdline: |
- #!/bin/bash
- grep ^Commandline: /var/log/apt/history.log
- dpkg_show: |
- #!/bin/bash
- dpkg-query --show
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/runcmd.py b/tests/cloud_tests/testcases/modules/runcmd.py
deleted file mode 100644
index 9fce3062..00000000
--- a/tests/cloud_tests/testcases/modules/runcmd.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestRunCmd(base.CloudTestCase):
- """Test runcmd module."""
-
- def test_run_cmd(self):
- """Test run command worked."""
- out = self.get_data_file('run_cmd')
- self.assertIn('cloud-init run cmd test', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/runcmd.yaml b/tests/cloud_tests/testcases/modules/runcmd.yaml
deleted file mode 100644
index 8309a883..00000000
--- a/tests/cloud_tests/testcases/modules/runcmd.yaml
+++ /dev/null
@@ -1,13 +0,0 @@
-#
-# Run a simple command
-#
-cloud_config: |
- #cloud-config
- runcmd:
- - echo cloud-init run cmd test > /var/tmp/run_cmd
-collect_scripts:
- run_cmd: |
- #!/bin/bash
- cat /var/tmp/run_cmd
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/seed_random_command.yaml b/tests/cloud_tests/testcases/modules/seed_random_command.yaml
deleted file mode 100644
index 6a9157eb..00000000
--- a/tests/cloud_tests/testcases/modules/seed_random_command.yaml
+++ /dev/null
@@ -1,18 +0,0 @@
-#
-# Use uuid to create a random string
-#
-# 2016-11-15 Disabled as this is not working currently
-#
-enabled: False
-cloud_config: |
- #cloud-config
- random_seed:
- command: ["cat", "/proc/sys/kernel/random/uuid"]
- command_required: true
- file: /root/seed
-collect_scripts:
- seed_data: |
- #!/bin/bash
- cat /root/seed
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/seed_random_data.py b/tests/cloud_tests/testcases/modules/seed_random_data.py
deleted file mode 100644
index db433d26..00000000
--- a/tests/cloud_tests/testcases/modules/seed_random_data.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestSeedRandom(base.CloudTestCase):
- """Test seed random module."""
-
- def test_random_seed_data(self):
- """Test random data passed in exists."""
- out = self.get_data_file('seed_data')
- self.assertIn('MYUb34023nD:LFDK10913jk;dfnk:Df', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/seed_random_data.yaml b/tests/cloud_tests/testcases/modules/seed_random_data.yaml
deleted file mode 100644
index a9b2c885..00000000
--- a/tests/cloud_tests/testcases/modules/seed_random_data.yaml
+++ /dev/null
@@ -1,15 +0,0 @@
-#
-# Push in random raw string to set as seed
-#
-cloud_config: |
- #cloud-config
- random_seed:
- data: 'MYUb34023nD:LFDK10913jk;dfnk:Df'
- encoding: raw
- file: /root/seed
-collect_scripts:
- seed_data: |
- #!/bin/bash
- cat /root/seed
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/set_hostname.py b/tests/cloud_tests/testcases/modules/set_hostname.py
deleted file mode 100644
index 1dbe64c2..00000000
--- a/tests/cloud_tests/testcases/modules/set_hostname.py
+++ /dev/null
@@ -1,17 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestHostname(base.CloudTestCase):
- """Test hostname module."""
-
- ex_hostname = "cloudinit2"
-
- def test_hostname(self):
- """Test hostname command shows correct output."""
- out = self.get_data_file('hostname')
- self.assertIn(self.ex_hostname, out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/set_hostname.yaml b/tests/cloud_tests/testcases/modules/set_hostname.yaml
deleted file mode 100644
index 071fb220..00000000
--- a/tests/cloud_tests/testcases/modules/set_hostname.yaml
+++ /dev/null
@@ -1,21 +0,0 @@
-#
-# Set the hostname and update /etc/hosts
-#
-required_features:
- - hostname
-cloud_config: |
- #cloud-config
- hostname: cloudinit2
-
-collect_scripts:
- hosts: |
- #!/bin/bash
- grep ^127 /etc/hosts
- hostname: |
- #!/bin/bash
- hostname
- fqdn: |
- #!/bin/bash
- hostname --fqdn
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/set_hostname_fqdn.py b/tests/cloud_tests/testcases/modules/set_hostname_fqdn.py
deleted file mode 100644
index a405b30b..00000000
--- a/tests/cloud_tests/testcases/modules/set_hostname_fqdn.py
+++ /dev/null
@@ -1,31 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests import CI_DOMAIN
-from tests.cloud_tests.testcases import base
-
-
-class TestHostnameFqdn(base.CloudTestCase):
- """Test Hostname module."""
-
- ex_hostname = "cloudinit1"
- ex_fqdn = "cloudinit2." + CI_DOMAIN
-
- def test_hostname(self):
- """Test hostname output."""
- out = self.get_data_file('hostname')
- self.assertIn(self.ex_hostname, out)
-
- def test_hostname_fqdn(self):
- """Test hostname fqdn output."""
- out = self.get_data_file('fqdn')
- self.assertIn(self.ex_fqdn, out)
-
- def test_hosts(self):
- """Test /etc/hosts file."""
- out = self.get_data_file('hosts')
- self.assertIn('127.0.1.1 %s %s' % (self.ex_fqdn, self.ex_hostname),
- out)
- self.assertIn('127.0.0.1 localhost', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/set_hostname_fqdn.yaml b/tests/cloud_tests/testcases/modules/set_hostname_fqdn.yaml
deleted file mode 100644
index a85ee79e..00000000
--- a/tests/cloud_tests/testcases/modules/set_hostname_fqdn.yaml
+++ /dev/null
@@ -1,23 +0,0 @@
-#
-# Set the hostname and update /etc/hosts
-#
-required_features:
- - hostname
-cloud_config: |
- #cloud-config
- manage_etc_hosts: true
- hostname: cloudinit1
- # this needs changing if CI_DOMAIN were updated.
- fqdn: cloudinit2.i9n.cloud-init.io
-collect_scripts:
- hosts: |
- #!/bin/bash
- grep ^127 /etc/hosts
- hostname: |
- #!/bin/bash
- hostname
- fqdn: |
- #!/bin/bash
- hostname --fqdn
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/set_password.py b/tests/cloud_tests/testcases/modules/set_password.py
deleted file mode 100644
index a29b2261..00000000
--- a/tests/cloud_tests/testcases/modules/set_password.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestPassword(base.CloudTestCase):
- """Test password module."""
-
- # TODO add test to make sure password is actually "password"
-
- def test_shadow(self):
- """Test ubuntu user in shadow."""
- out = self.get_data_file('shadow')
- self.assertIn('ubuntu:', out)
-
- def test_sshd_config(self):
- """Test sshd config allows passwords."""
- out = self.get_data_file('sshd_config')
- self.assertIn('PasswordAuthentication yes', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/set_password.yaml b/tests/cloud_tests/testcases/modules/set_password.yaml
deleted file mode 100644
index 04d7c58a..00000000
--- a/tests/cloud_tests/testcases/modules/set_password.yaml
+++ /dev/null
@@ -1,19 +0,0 @@
-#
-# Set password of default user
-#
-required_features:
- - ubuntu_user
-cloud_config: |
- #cloud-config
- password: password
- chpasswd: { expire: False }
- ssh_pwauth: True
-collect_scripts:
- shadow: |
- #!/bin/bash
- cat /etc/shadow
- sshd_config: |
- #!/bin/bash
- grep '^PasswordAuth' /etc/ssh/sshd_config
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/set_password_expire.py b/tests/cloud_tests/testcases/modules/set_password_expire.py
deleted file mode 100644
index 967aca7b..00000000
--- a/tests/cloud_tests/testcases/modules/set_password_expire.py
+++ /dev/null
@@ -1,23 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestPasswordExpire(base.CloudTestCase):
- """Test password module."""
-
- def test_shadow(self):
- """Test user frozen in shadow."""
- out = self.get_data_file('shadow')
- self.assertIn('harry:!:', out)
- self.assertIn('dick:!:', out)
- self.assertIn('tom:!:', out)
- self.assertIn('harry:!:', out)
-
- def test_sshd_config(self):
- """Test sshd config allows passwords."""
- out = self.get_data_file('sshd_config')
- self.assertIn('PasswordAuthentication yes', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/set_password_expire.yaml b/tests/cloud_tests/testcases/modules/set_password_expire.yaml
deleted file mode 100644
index ba6344b9..00000000
--- a/tests/cloud_tests/testcases/modules/set_password_expire.yaml
+++ /dev/null
@@ -1,32 +0,0 @@
-#
-# Expire password for all users
-#
-required_features:
- - sshd
-cloud_config: |
- #cloud-config
- chpasswd: { expire: True }
- ssh_pwauth: yes
- users:
- - default
- - name: tom
- password: $1$xyz$sPMsLNmf66Ohl.ol6JvzE.
- lock_passwd: false
- - name: dick
- password: $1$xyz$sPMsLNmf66Ohl.ol6JvzE.
- lock_passwd: false
- - name: harry
- password: $1$xyz$sPMsLNmf66Ohl.ol6JvzE.
- lock_passwd: false
- - name: jane
- password: $1$xyz$sPMsLNmf66Ohl.ol6JvzE.
- lock_passwd: false
-collect_scripts:
- shadow: |
- #!/bin/bash
- cat /etc/shadow
- sshd_config: |
- #!/bin/bash
- grep '^PasswordAuth' /etc/ssh/sshd_config
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/set_password_list.py b/tests/cloud_tests/testcases/modules/set_password_list.py
deleted file mode 100644
index 375cd27d..00000000
--- a/tests/cloud_tests/testcases/modules/set_password_list.py
+++ /dev/null
@@ -1,12 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestPasswordList(base.PasswordListTest, base.CloudTestCase):
- """Test password setting via list in chpasswd/list."""
-
- __test__ = True
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/set_password_list.yaml b/tests/cloud_tests/testcases/modules/set_password_list.yaml
deleted file mode 100644
index fd3e1e44..00000000
--- a/tests/cloud_tests/testcases/modules/set_password_list.yaml
+++ /dev/null
@@ -1,41 +0,0 @@
-#
-# Set password of list of users
-#
-cloud_config: |
- #cloud-config
- ssh_pwauth: yes
- users:
- - default
- - name: tom
- # md5 gotomgo
- passwd: "$1$S7$tT1BEDIYrczeryDQJfdPe0"
- lock_passwd: false
- - name: dick
- # md5 gocubsgo
- passwd: "$1$ssisyfpf$YqvuJLfrrW6Cg/l53Pi1n1"
- lock_passwd: false
- - name: harry
- # sha512 goharrygo
- passwd: "$6$LF$9Z2p6rWK6TNC1DC6393ec0As.18KRAvKDbfsGJEdWN3sRQRwpdfoh37EQ3yUh69tP4GSrGW5XKHxMLiKowJgm/"
- lock_passwd: false
- - name: jane
- # sha256 gojanego
- passwd: "$5$iW$XsxmWCdpwIW8Yhv.Jn/R3uk6A4UaicfW5Xp7C9p9pg."
- lock_passwd: false
- - name: "mikey"
- lock_passwd: false
- chpasswd:
- list:
- - tom:mypassword123!
- - dick:RANDOM
- - harry:RANDOM
- - mikey:$5$xZ$B2YGGEx2AOf4PeW48KC6.QyT1W2B4rZ9Qbltudtha89
-collect_scripts:
- shadow: |
- #!/bin/bash
- cat /etc/shadow
- sshd_config: |
- #!/bin/bash
- grep '^PasswordAuth' /etc/ssh/sshd_config
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/set_password_list_string.py b/tests/cloud_tests/testcases/modules/set_password_list_string.py
deleted file mode 100644
index 8c2634c5..00000000
--- a/tests/cloud_tests/testcases/modules/set_password_list_string.py
+++ /dev/null
@@ -1,12 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestPasswordListString(base.PasswordListTest, base.CloudTestCase):
- """Test password setting via string in chpasswd/list."""
-
- __test__ = True
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/set_password_list_string.yaml b/tests/cloud_tests/testcases/modules/set_password_list_string.yaml
deleted file mode 100644
index e9fe54b0..00000000
--- a/tests/cloud_tests/testcases/modules/set_password_list_string.yaml
+++ /dev/null
@@ -1,41 +0,0 @@
-#
-# Set password of list of users as a string
-#
-cloud_config: |
- #cloud-config
- ssh_pwauth: yes
- users:
- - default
- - name: tom
- # md5 gotomgo
- passwd: "$1$S7$tT1BEDIYrczeryDQJfdPe0"
- lock_passwd: false
- - name: dick
- # md5 gocubsgo
- passwd: "$1$ssisyfpf$YqvuJLfrrW6Cg/l53Pi1n1"
- lock_passwd: false
- - name: harry
- # sha512 goharrygo
- passwd: "$6$LF$9Z2p6rWK6TNC1DC6393ec0As.18KRAvKDbfsGJEdWN3sRQRwpdfoh37EQ3yUh69tP4GSrGW5XKHxMLiKowJgm/"
- lock_passwd: false
- - name: jane
- # sha256 gojanego
- passwd: "$5$iW$XsxmWCdpwIW8Yhv.Jn/R3uk6A4UaicfW5Xp7C9p9pg."
- lock_passwd: false
- - name: "mikey"
- lock_passwd: false
- chpasswd:
- list: |
- tom:mypassword123!
- dick:RANDOM
- harry:RANDOM
- mikey:$5$xZ$B2YGGEx2AOf4PeW48KC6.QyT1W2B4rZ9Qbltudtha89
-collect_scripts:
- shadow: |
- #!/bin/bash
- cat /etc/shadow
- sshd_config: |
- #!/bin/bash
- grep '^PasswordAuth' /etc/ssh/sshd_config
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/snap.py b/tests/cloud_tests/testcases/modules/snap.py
deleted file mode 100644
index ff68abbe..00000000
--- a/tests/cloud_tests/testcases/modules/snap.py
+++ /dev/null
@@ -1,16 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script"""
-from tests.cloud_tests.testcases import base
-
-
-class TestSnap(base.CloudTestCase):
- """Test snap module"""
-
- def test_snappy_version(self):
- """Expect hello-world and core snaps are installed."""
- out = self.get_data_file('snaplist')
- self.assertIn('core', out)
- self.assertIn('hello-world', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/snap.yaml b/tests/cloud_tests/testcases/modules/snap.yaml
deleted file mode 100644
index 322199c3..00000000
--- a/tests/cloud_tests/testcases/modules/snap.yaml
+++ /dev/null
@@ -1,21 +0,0 @@
-#
-# Install snappy
-#
-# Aug 23, 2018: Disabled due to requiring a proxy for testing
-# tests do not handle the proxy well at this time.
-enabled: False
-required_features:
- - snap
-cloud_config: |
- #cloud-config
- package_update: true
- snap:
- squashfuse_in_container: true
- commands:
- - snap install hello-world
-collect_scripts:
- snaplist: |
- #!/bin/bash
- snap list
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/ssh_auth_key_fingerprints_disable.py b/tests/cloud_tests/testcases/modules/ssh_auth_key_fingerprints_disable.py
deleted file mode 100644
index 02935447..00000000
--- a/tests/cloud_tests/testcases/modules/ssh_auth_key_fingerprints_disable.py
+++ /dev/null
@@ -1,16 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestSshKeyFingerprintsDisable(base.CloudTestCase):
- """Test ssh key fingerprints module."""
-
- def test_cloud_init_log(self):
- """Verify disabled."""
- out = self.get_data_file('cloud-init.log')
- self.assertIn('Skipping module named ssh-authkey-fingerprints, '
- 'logging of SSH fingerprints disabled', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/ssh_auth_key_fingerprints_disable.yaml b/tests/cloud_tests/testcases/modules/ssh_auth_key_fingerprints_disable.yaml
deleted file mode 100644
index d93893e2..00000000
--- a/tests/cloud_tests/testcases/modules/ssh_auth_key_fingerprints_disable.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
-#
-# Disable fingerprint printing
-#
-required_features:
- - syslog
-cloud_config: |
- #cloud-config
- no_ssh_fingerprints: true
-collect_scripts:
- syslog: |
- #!/bin/bash
- cat /var/log/syslog
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/ssh_auth_key_fingerprints_enable.py b/tests/cloud_tests/testcases/modules/ssh_auth_key_fingerprints_enable.py
deleted file mode 100644
index 3510e75a..00000000
--- a/tests/cloud_tests/testcases/modules/ssh_auth_key_fingerprints_enable.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestSshKeyFingerprintsEnable(base.CloudTestCase):
- """Test ssh key fingerprints module."""
-
- def test_syslog(self):
- """Verify output of syslog."""
- out = self.get_data_file('syslog')
- self.assertRegex(out, r'256 SHA256:.*(ECDSA)')
- self.assertRegex(out, r'256 SHA256:.*(ED25519)')
- self.assertNotRegex(out, r'1024 SHA256:.*(DSA)')
- self.assertNotRegex(out, r'2048 SHA256:.*(RSA)')
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/ssh_auth_key_fingerprints_enable.yaml b/tests/cloud_tests/testcases/modules/ssh_auth_key_fingerprints_enable.yaml
deleted file mode 100644
index 9f5dc34a..00000000
--- a/tests/cloud_tests/testcases/modules/ssh_auth_key_fingerprints_enable.yaml
+++ /dev/null
@@ -1,21 +0,0 @@
-#
-# Print auth keys with different hash than md5
-#
-# NOTE: testcase checks for '256 SHA256:.*(ECDSA)' on output line on trusty
-# this fails as line in output reads '256:.*(ECDSA)'
-required_features:
- - syslog
- - ssh_key_fmt
-cloud_config: |
- #cloud-config
- ssh_genkeytypes:
- - ecdsa
- - ed25519
- ssh_authorized_keys:
- - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDXW9Gg5H7ehjdSc6qDzwNtgCy94XYHhEYlXZMO2+FJrH3wfHGiMfCwOHxcOMt2QiXItULthdeQWS9QjBSSjVRXf6731igFrqPFyS9qBlOQ5D29C4HBXFnQggGVpBNJ82IRJv7szbbe/vpgLBP4kttUza9Dr4e1YM1ln4PRnjfXea6T0m+m1ixNb5432pTXlqYOnNOxSIm1gHgMLxPuDrJvQERDKrSiKSjIdyC9Jd8t2e1tkNLY0stmckVRbhShmcJvlyofHWbc2Ca1mmtP7MlS1VQnfLkvU1IrFwkmaQmaggX6WR6coRJ6XFXdWcq/AI2K6GjSnl1dnnCxE8VCEXBlXgFzad+PMSG4yiL5j8Oo1ZVpkTdgBnw4okGqTYCXyZg6X00As9IBNQfZMFlQXlIo4FiWgj3CO5QHQOyOX6FuEumaU13GnERrSSdp9tCs1Qm3/DG2RSCQBWTfcgMcStIvKqvJ3IjFn0vGLvI3Ampnq9q1SHwmmzAPSdzcMA76HyMUA5VWaBvWHlUxzIM6unxZASnwvuCzpywSEB5J2OF+p6H+cStJwQ32XwmOG8pLp1srlVWpqZI58Du/lzrkPqONphoZx0LDV86w7RUz1ksDzAdcm0tvmNRFMN1a0frDs506oA3aWK0oDk4Nmvk8sXGTYYw3iQSkOvDUUlIsqdaO+w==
-collect_scripts:
- syslog: |
- #!/bin/bash
- cat /var/log/syslog
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/ssh_import_id.py b/tests/cloud_tests/testcases/modules/ssh_import_id.py
deleted file mode 100644
index ef156f47..00000000
--- a/tests/cloud_tests/testcases/modules/ssh_import_id.py
+++ /dev/null
@@ -1,17 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestSshImportId(base.CloudTestCase):
- """Test ssh import id module."""
-
- def test_authorized_keys(self):
- """Test that ssh keys were imported."""
- out = self.get_data_file('auth_keys_ubuntu')
-
- self.assertIn('# ssh-import-id gh:powersj', out)
- self.assertIn('# ssh-import-id lp:smoser', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/ssh_import_id.yaml b/tests/cloud_tests/testcases/modules/ssh_import_id.yaml
deleted file mode 100644
index b62d3f69..00000000
--- a/tests/cloud_tests/testcases/modules/ssh_import_id.yaml
+++ /dev/null
@@ -1,17 +0,0 @@
-#
-# Import a user's ssh key via gh or lp
-#
-required_features:
- - ubuntu_user
- - sudo
-cloud_config: |
- #cloud-config
- ssh_import_id:
- - gh:powersj
- - lp:smoser
-collect_scripts:
- auth_keys_ubuntu: |
- #!/bin/bash
- cat /home/ubuntu/.ssh/authorized_keys
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/ssh_keys_generate.py b/tests/cloud_tests/testcases/modules/ssh_keys_generate.py
deleted file mode 100644
index b68f5565..00000000
--- a/tests/cloud_tests/testcases/modules/ssh_keys_generate.py
+++ /dev/null
@@ -1,52 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestSshKeysGenerate(base.CloudTestCase):
- """Test ssh keys module."""
-
- # TODO: Check cloud-init-output for the correct keys being generated
-
- def test_dsa_public(self):
- """Test dsa public key not generated."""
- out = self.get_data_file('dsa_public')
- self.assertEqual('', out)
-
- def test_dsa_private(self):
- """Test dsa private key not generated."""
- out = self.get_data_file('dsa_private')
- self.assertEqual('', out)
-
- def test_rsa_public(self):
- """Test rsa public key not generated."""
- out = self.get_data_file('rsa_public')
- self.assertEqual('', out)
-
- def test_rsa_private(self):
- """Test rsa public key not generated."""
- out = self.get_data_file('rsa_private')
- self.assertEqual('', out)
-
- def test_ecdsa_public(self):
- """Test ecdsa public key generated."""
- out = self.get_data_file('ecdsa_public')
- self.assertIsNotNone(out)
-
- def test_ecdsa_private(self):
- """Test ecdsa public key generated."""
- out = self.get_data_file('ecdsa_private')
- self.assertIsNotNone(out)
-
- def test_ed25519_public(self):
- """Test ed25519 public key generated."""
- out = self.get_data_file('ed25519_public')
- self.assertIsNotNone(out)
-
- def test_ed25519_private(self):
- """Test ed25519 public key generated."""
- out = self.get_data_file('ed25519_private')
- self.assertIsNotNone(out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/ssh_keys_generate.yaml b/tests/cloud_tests/testcases/modules/ssh_keys_generate.yaml
deleted file mode 100644
index 0a7adf62..00000000
--- a/tests/cloud_tests/testcases/modules/ssh_keys_generate.yaml
+++ /dev/null
@@ -1,38 +0,0 @@
-#
-# SSH keys generated using cloud-init
-#
-required_features:
- - ubuntu_user
-cloud_config: |
- #cloud-config
- ssh_genkeytypes:
- - ecdsa
- - ed25519
- authkey_hash: sha512
-collect_scripts:
- dsa_public: |
- #!/bin/bash
- cat /etc/ssh/ssh_host_dsa_key.pub
- dsa_private: |
- #!/bin/bash
- cat /etc/ssh/ssh_host_dsa_key
- rsa_public: |
- #!/bin/bash
- cat /etc/ssh/ssh_host_rsa_key.pub
- rsa_private: |
- #!/bin/bash
- cat /etc/ssh/ssh_host_rsa_key
- ecdsa_public: |
- #!/bin/bash
- cat /etc/ssh/ssh_host_ecdsa_key.pub
- ecdsa_private: |
- #!/bin/bash
- cat /etc/ssh/ssh_host_ecdsa_key
- ed25519_public: |
- #!/bin/bash
- cat /etc/ssh/ssh_host_ed25519_key.pub
- ed25519_private: |
- #!/bin/bash
- cat /etc/ssh/ssh_host_ed25519_key
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/ssh_keys_provided.py b/tests/cloud_tests/testcases/modules/ssh_keys_provided.py
deleted file mode 100644
index add3f469..00000000
--- a/tests/cloud_tests/testcases/modules/ssh_keys_provided.py
+++ /dev/null
@@ -1,58 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestSshKeysProvided(base.CloudTestCase):
- """Test ssh keys module."""
-
- def test_dsa_public(self):
- """Test dsa public key passed in."""
- out = self.get_data_file('dsa_public')
- self.assertIn('AAAAB3NzaC1kc3MAAACBAPkWy1zbchVIN7qTgM0/yyY8q4RZS8c'
- 'NM4ZpeuE5UB/Nnr6OSU/nmbO8LuM', out)
-
- def test_dsa_private(self):
- """Test dsa private key passed in."""
- out = self.get_data_file('dsa_private')
- self.assertIn('MIIBuwIBAAKBgQD5Fstc23IVSDe6k4DNP8smPKuEWUvHDTOGaXr'
- 'hOVAfzZ6+jklP', out)
-
- def test_rsa_public(self):
- """Test rsa public key passed in."""
- out = self.get_data_file('rsa_public')
- self.assertIn('AAAAB3NzaC1yc2EAAAADAQABAAABAQC0/Ho+o3eJISydO2JvIgT'
- 'LnZOtrxPl+fSvJfKDjoOLY0HB2eOjy2s2/2N6d9X9SGZ4', out)
-
- def test_rsa_private(self):
- """Test rsa public key passed in."""
- out = self.get_data_file('rsa_private')
- self.assertIn('4DOkqNiUGl80Zp1RgZNohHUXlJMtAbrIlAVEk+mTmg7vjfyp2un'
- 'RQvLZpMRdywBm', out)
-
- def test_ecdsa_public(self):
- """Test ecdsa public key passed in."""
- out = self.get_data_file('ecdsa_public')
- self.assertIn('AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAAB'
- 'BBFsS5Tvky/IC/dXhE/afxxU', out)
-
- def test_ecdsa_private(self):
- """Test ecdsa public key passed in."""
- out = self.get_data_file('ecdsa_private')
- self.assertIn('AwEHoUQDQgAEWxLlO+TL8gL91eET9p/HFQbqR1A691AkJgZk3jY'
- '5mpZqxgX4vcgb', out)
-
- def test_ed25519_public(self):
- """Test ed25519 public key passed in."""
- out = self.get_data_file('ed25519_public')
- self.assertIn('AAAAC3NzaC1lZDI1NTE5AAAAINudAZSu4vjZpVWzId5pXmZg1M6'
- 'G15dqjQ2XkNVOEnb5', out)
-
- def test_ed25519_private(self):
- """Test ed25519 public key passed in."""
- out = self.get_data_file('ed25519_private')
- self.assertIn('XAAAAAtzc2gtZWQyNTUxOQAAACDbnQGUruL42aVVsyHeaV5mYNT'
- 'OhteXao0Nl5DVThJ2+Q', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/ssh_keys_provided.yaml b/tests/cloud_tests/testcases/modules/ssh_keys_provided.yaml
deleted file mode 100644
index 41f63550..00000000
--- a/tests/cloud_tests/testcases/modules/ssh_keys_provided.yaml
+++ /dev/null
@@ -1,99 +0,0 @@
-#
-# SSH keys provided via cloud config
-#
-enabled: False
-required_features:
- - ubuntu_user
- - sudo
-cloud_config: |
- #cloud-config
- disable_root: false
- ssh_authorized_keys:
- - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDXW9Gg5H7ehjdSc6qDzwNtgCy94XYHhEYlXZMO2+FJrH3wfHGiMfCwOHxcOMt2QiXItULthdeQWS9QjBSSjVRXf6731igFrqPFyS9qBlOQ5D29C4HBXFnQggGVpBNJ82IRJv7szbbe/vpgLBP4kttUza9Dr4e1YM1ln4PRnjfXea6T0m+m1ixNb5432pTXlqYOnNOxSIm1gHgMLxPuDrJvQERDKrSiKSjIdyC9Jd8t2e1tkNLY0stmckVRbhShmcJvlyofHWbc2Ca1mmtP7MlS1VQnfLkvU1IrFwkmaQmaggX6WR6coRJ6XFXdWcq/AI2K6GjSnl1dnnCxE8VCEXBlXgFzad+PMSG4yiL5j8Oo1ZVpkTdgBnw4okGqTYCXyZg6X00As9IBNQfZMFlQXlIo4FiWgj3CO5QHQOyOX6FuEumaU13GnERrSSdp9tCs1Qm3/DG2RSCQBWTfcgMcStIvKqvJ3IjFn0vGLvI3Ampnq9q1SHwmmzAPSdzcMA76HyMUA5VWaBvWHlUxzIM6unxZASnwvuCzpywSEB5J2OF+p6H+cStJwQ32XwmOG8pLp1srlVWpqZI58Du/lzrkPqONphoZx0LDV86w7RUz1ksDzAdcm0tvmNRFMN1a0frDs506oA3aWK0oDk4Nmvk8sXGTYYw3iQSkOvDUUlIsqdaO+w==
- ssh_keys:
- rsa_private: |
- -----BEGIN RSA PRIVATE KEY-----
- MIIEowIBAAKCAQEAtPx6PqN3iSEsnTtibyIEy52Tra8T5fn0ryXyg46Di2NBwdnj
- o8trNv9jenfV/UhmePl58lXjT43wV8OCMl6KsYXyBdegM35NNtono4I4mLLKFMR9
- 9TOtDn6iYcaNenVhF3ZCj9Z2nNOlTrdc0uchHqKMrxLjCRCUrL91Uf+xioTF901Y
- RM+ZqC5lT92yAL76F4qPF+Lq1QtUfNfUIwwvOp5ccDZLPxij0YvyBzubYye9hJHu
- yjbJv78R4JHV+L2WhzSoX3W/6WrxVzeXqFGqH894ccOaC/7tnqSP6V8lIQ6fE2+c
- DurJcpM3CJRgkndGHjtU55Y71YkcdLksSMvezQIDAQABAoIBAQCrU4IJP8dNeaj5
- IpkY6NQvR/jfZqfogYi+MKb1IHin/4rlDfUvPcY9pt8ttLlObjYK+OcWn3Vx/sRw
- 4DOkqNiUGl80Zp1RgZNohHUXlJMtAbrIlAVEk+mTmg7vjfyp2unRQvLZpMRdywBm
- lq95OrCghnG03aUsFJUZPpi5ydnwbA12ma+KHkG0EzaVlhA7X9N6z0K6U+zue2gl
- goMLt/MH0rsYawkHrwiwXaIFQeyV4MJP0vmrZLbFk1bycu9X/xPtTYotWyWo4eKA
- cb05uu04qwexkKHDM0KXtT0JecbTo2rOefFo8Uuab6uJY+fEHNocZ+v1vLA4aOxJ
- ovp1JuXlAoGBAOWYNgKrlTfy5n0sKsNk+1RuL2jHJZJ3HMd0EIt7/fFQN3Fi08Hu
- jtntqD30Wj+DJK8b8Lrt66FruxyEJm5VhVmwkukrLR5ige2f6ftZnoFCmdyy+0zP
- dnPZSUe2H5ZPHa+qthJgHLn+al2P04tGh+1fGHC2PbP+e0Co+/ZRIOxrAoGBAMnN
- IEen9/FRsqvnDd36I8XnJGskVRTZNjylxBmbKcuMWm+gNhOI7gsCAcqzD4BYZjjW
- pLhrt/u9p+l4MOJy6OUUdM/okg12SnJEGryysOcVBcXyrvOfklWnANG4EAH5jt1N
- ftTb1XTxzvWVuR/WJK0B5MZNYM71cumBdUDtPi+nAoGAYmoIXMSnxb+8xNL10aOr
- h9ljQQp8NHgSQfyiSufvRk0YNuYh1vMnEIsqnsPrG2Zfhx/25GmvoxXGssaCorDN
- 5FAn6QK06F1ZTD5L0Y3sv4OI6G1gAuC66ZWuL6sFhyyKkQ4f1WiVZ7SCa3CHQSAO
- i9VDaKz1bf4bXvAQcNj9v9kCgYACSOZCqW4vN0OUmqsXhkt9ZB6Pb/veno70pNPR
- jmYsvcwQU3oJQpWfXkhy6RAV3epaXmPDCsUsfns2M3wqNC7a2R5xdCqjKGGzZX4A
- AO3rz9se4J6Gd5oKijeCKFlWDGNHsibrdgm2pz42nZlY+O21X74dWKbt8O16I1MW
- hxkbJQKBgAXfuen/srVkJgPuqywUYag90VWCpHsuxdn+fZJa50SyZADr+RbiDfH2
- vek8Uo8ap8AEsv4Rfs9opUcUZevLp3g2741eOaidHVLm0l4iLIVl03otGOqvSzs+
- A3tFPEOxauXpzCt8f8eXsz0WQXAgIKW2h8zu5QHjomioU3i27mtE
- -----END RSA PRIVATE KEY-----
- rsa_public: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC0/Ho+o3eJISydO2JvIgTLnZOtrxPl+fSvJfKDjoOLY0HB2eOjy2s2/2N6d9X9SGZ4+XnyVeNPjfBXw4IyXoqxhfIF16Azfk022iejgjiYssoUxH31M60OfqJhxo16dWEXdkKP1nac06VOt1zS5yEeooyvEuMJEJSsv3VR/7GKhMX3TVhEz5moLmVP3bIAvvoXio8X4urVC1R819QjDC86nlxwNks/GKPRi/IHO5tjJ72Eke7KNsm/vxHgkdX4vZaHNKhfdb/pavFXN5eoUaofz3hxw5oL/u2epI/pXyUhDp8Tb5wO6slykzcIlGCSd0YeO1TnljvViRx0uSxIy97N root@xenial-lxd
- dsa_private: |
- -----BEGIN DSA PRIVATE KEY-----
- MIIBuwIBAAKBgQD5Fstc23IVSDe6k4DNP8smPKuEWUvHDTOGaXrhOVAfzZ6+jklP
- 55mzvC7jO53PWWC31hq10xBoWdev0WtcNF9Tv+4bAa1263y51Rqo4GI7xx+xic1d
- mLqqfYijBT9k48J/1tV0cs1Wjs6FP/IJTD/kYVC930JjYQMi722lBnUxsQIVAL7i
- z3fTGKTvSzvW0wQlwnYpS2QFAoGANp+KdyS9V93HgxGQEN1rlj/TSv/a3EVdCKtE
- nQf55aPHxDAVDVw5JtRh4pZbbRV4oGRPc9KOdjo5BU28vSM3Lmhkb+UaaDXwHkgI
- nK193o74DKjADWZxuLyyiKHiMOhxozoxDfjWxs8nz6uqvSW0pr521EwIY6RajbED
- nZ2a3GkCgYEAyoUomNRB6bmpsIfzt8zdtqLP5umIj2uhr9MVPL8/QdbxmJ72Z7pf
- Q2z1B7QAdIBGOlqJXtlau7ABhWK29Efe+99ObyTSSdDc6RCDeAwUmBAiPRQhDH2E
- wExw3doDSCUb28L1B50wBzQ8mC3KXp6C7IkBXWspb16DLHUHFSI8bkICFA5kVUcW
- nCPOXEQsayANi8+Cb7BH
- -----END DSA PRIVATE KEY-----
- dsa_public: ssh-dss AAAAB3NzaC1kc3MAAACBAPkWy1zbchVIN7qTgM0/yyY8q4RZS8cNM4ZpeuE5UB/Nnr6OSU/nmbO8LuM7nc9ZYLfWGrXTEGhZ16/Ra1w0X1O/7hsBrXbrfLnVGqjgYjvHH7GJzV2Yuqp9iKMFP2Tjwn/W1XRyzVaOzoU/8glMP+RhUL3fQmNhAyLvbaUGdTGxAAAAFQC+4s930xik70s71tMEJcJ2KUtkBQAAAIA2n4p3JL1X3ceDEZAQ3WuWP9NK/9rcRV0Iq0SdB/nlo8fEMBUNXDkm1GHillttFXigZE9z0o52OjkFTby9IzcuaGRv5RpoNfAeSAicrX3ejvgMqMANZnG4vLKIoeIw6HGjOjEN+NbGzyfPq6q9JbSmvnbUTAhjpFqNsQOdnZrcaQAAAIEAyoUomNRB6bmpsIfzt8zdtqLP5umIj2uhr9MVPL8/QdbxmJ72Z7pfQ2z1B7QAdIBGOlqJXtlau7ABhWK29Efe+99ObyTSSdDc6RCDeAwUmBAiPRQhDH2EwExw3doDSCUb28L1B50wBzQ8mC3KXp6C7IkBXWspb16DLHUHFSI8bkI= root@xenial-lxd
- ed25519_private: |
- -----BEGIN OPENSSH PRIVATE KEY-----
- b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW
- QyNTUxOQAAACDbnQGUruL42aVVsyHeaV5mYNTOhteXao0Nl5DVThJ2+QAAAJgwt+lcMLfp
- XAAAAAtzc2gtZWQyNTUxOQAAACDbnQGUruL42aVVsyHeaV5mYNTOhteXao0Nl5DVThJ2+Q
- AAAEDQlFZpz9q8+/YJHS9+jPAqy2ZT6cGEv8HTB6RZtTjd/dudAZSu4vjZpVWzId5pXmZg
- 1M6G15dqjQ2XkNVOEnb5AAAAD3Jvb3RAeGVuaWFsLWx4ZAECAwQFBg==
- -----END OPENSSH PRIVATE KEY-----
- ed25519_public: ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAINudAZSu4vjZpVWzId5pXmZg1M6G15dqjQ2XkNVOEnb5 root@xenial-lxd
- ecdsa_private: |
- -----BEGIN EC PRIVATE KEY-----
- MHcCAQEEIDuK+QFc1wmyJY8uDqQVa1qHte30Rk/fdLxGIBkwJAyOoAoGCCqGSM49
- AwEHoUQDQgAEWxLlO+TL8gL91eET9p/HFQbqR1A691AkJgZk3jY5mpZqxgX4vcgb
- 7f/CtXuM6s2svcDJqAeXr6Wk8OJJcMxylA==
- -----END EC PRIVATE KEY-----
- ecdsa_public: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBFsS5Tvky/IC/dXhE/afxxUG6kdQOvdQJCYGZN42OZqWasYF+L3IG+3/wrV7jOrNrL3AyagHl6+lpPDiSXDMcpQ= root@xenial-lxd
-collect_scripts:
- dsa_public: |
- #!/bin/bash
- cat /etc/ssh/ssh_host_dsa_key.pub
- dsa_private: |
- #!/bin/bash
- cat /etc/ssh/ssh_host_dsa_key
- rsa_public: |
- #!/bin/bash
- cat /etc/ssh/ssh_host_rsa_key.pub
- rsa_private: |
- #!/bin/bash
- cat /etc/ssh/ssh_host_rsa_key
- ecdsa_public: |
- #!/bin/bash
- cat /etc/ssh/ssh_host_ecdsa_key.pub
- ecdsa_private: |
- #!/bin/bash
- cat /etc/ssh/ssh_host_ecdsa_key
- ed25519_public: |
- #!/bin/bash
- cat /etc/ssh/ssh_host_ed25519_key.pub
- ed25519_private: |
- #!/bin/bash
- cat /etc/ssh/ssh_host_ed25519_key
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/timezone.py b/tests/cloud_tests/testcases/modules/timezone.py
deleted file mode 100644
index 654fa53d..00000000
--- a/tests/cloud_tests/testcases/modules/timezone.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestTimezone(base.CloudTestCase):
- """Test timezone module."""
-
- def test_timezone(self):
- """Test date prints correct timezone."""
- out = self.get_data_file('timezone')
- self.assertEqual('HDT', out.rstrip())
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/timezone.yaml b/tests/cloud_tests/testcases/modules/timezone.yaml
deleted file mode 100644
index 5112aa9f..00000000
--- a/tests/cloud_tests/testcases/modules/timezone.yaml
+++ /dev/null
@@ -1,16 +0,0 @@
-#
-# Set system timezone
-#
-required_features:
- - daylight_time
-cloud_config: |
- #cloud-config
- timezone: US/Aleutian
-collect_scripts:
- timezone: |
- #!/bin/bash
- # date will convert this to system's configured time zone.
- # use a static date to avoid dealing with daylight savings.
- date "+%Z" --date="Thu, 03 Nov 2016 00:47:00 -0400"
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/user_groups.py b/tests/cloud_tests/testcases/modules/user_groups.py
deleted file mode 100644
index 4067348d..00000000
--- a/tests/cloud_tests/testcases/modules/user_groups.py
+++ /dev/null
@@ -1,49 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestUserGroups(base.CloudTestCase):
- """Example cloud-config test."""
-
- def test_group_ubuntu(self):
- """Test ubuntu group exists."""
- out = self.get_data_file('group_ubuntu')
- self.assertRegex(out, r'ubuntu:x:[0-9]{4}:')
-
- def test_group_cloud_users(self):
- """Test cloud users group exists."""
- out = self.get_data_file('group_cloud_users')
- self.assertRegex(out, r'cloud-users:x:[0-9]{4}:barfoo')
-
- def test_user_ubuntu(self):
- """Test ubuntu user exists."""
- out = self.get_data_file('user_ubuntu')
- self.assertRegex(
- out, r'ubuntu:x:[0-9]{4}:[0-9]{4}:Ubuntu:/home/ubuntu:/bin/bash')
-
- def test_user_foobar(self):
- """Test foobar user exists."""
- out = self.get_data_file('user_foobar')
- self.assertRegex(
- out, r'foobar:x:[0-9]{4}:[0-9]{4}:Foo B. Bar:/home/foobar:')
-
- def test_user_barfoo(self):
- """Test barfoo user exists."""
- out = self.get_data_file('user_barfoo')
- self.assertRegex(
- out, r'barfoo:x:[0-9]{4}:[0-9]{4}:Bar B. Foo:/home/barfoo:')
-
- def test_user_cloudy(self):
- """Test cloudy user exists."""
- out = self.get_data_file('user_cloudy')
- self.assertRegex(out, r'cloudy:x:[0-9]{3,4}:')
-
- def test_user_root_in_secret(self):
- """Test root user is in 'secret' group."""
- _user, _, groups = self.get_data_file('root_groups').partition(":")
- self.assertIn("secret", groups.split(),
- msg="User root is not in group 'secret'")
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/user_groups.yaml b/tests/cloud_tests/testcases/modules/user_groups.yaml
deleted file mode 100644
index 91b0e281..00000000
--- a/tests/cloud_tests/testcases/modules/user_groups.yaml
+++ /dev/null
@@ -1,55 +0,0 @@
-#
-# Create groups and users with various options
-#
-required_features:
- - ubuntu_user
-cloud_config: |
- #cloud-config
- # Add groups to the system
- groups:
- - secret: [root]
- - cloud-users
-
- # Add users to the system. Users are added after groups are added.
- users:
- - default
- - name: foobar
- gecos: Foo B. Bar
- primary_group: foobar
- groups: users
- expiredate: '2038-01-19'
- lock_passwd: false
- passwd: $6$j212wezy$7H/1LT4f9/N3wpgNunhsIqtMj62OKiS3nyNwuizouQc3u7MbYCarYeAHWYPYb2FT.lbioDm2RrkJPb9BZMN1O/
- - name: barfoo
- gecos: Bar B. Foo
- sudo: ALL=(ALL) NOPASSWD:ALL
- groups: [cloud-users, secret]
- lock_passwd: true
- - name: cloudy
- gecos: Magic Cloud App Daemon User
- inactive: '5'
- system: true
-collect_scripts:
- group_ubuntu: |
- #!/bin/bash
- getent group ubuntu
- group_cloud_users: |
- #!/bin/bash
- getent group cloud-users
- user_ubuntu: |
- #!/bin/bash
- getent passwd ubuntu
- user_foobar: |
- #!/bin/bash
- getent passwd foobar
- user_barfoo: |
- #!/bin/bash
- getent passwd barfoo
- user_cloudy: |
- #!/bin/bash
- getent passwd cloudy
- root_groups: |
- #!/bin/bash
- groups root
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/write_files.py b/tests/cloud_tests/testcases/modules/write_files.py
deleted file mode 100644
index 526a2ebd..00000000
--- a/tests/cloud_tests/testcases/modules/write_files.py
+++ /dev/null
@@ -1,33 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestWriteFiles(base.CloudTestCase):
- """Example cloud-config test."""
-
- def test_b64(self):
- """Test b64 encoded file reads as ascii."""
- out = self.get_data_file('file_b64')
- self.assertIn('ASCII text', out)
-
- def test_binary(self):
- """Test binary file reads as executable."""
- out = self.get_data_file('file_binary').strip()
- md5 = "3801184b97bb8c6e63fa0e1eae2920d7"
- sha256 = ("2c791c4037ea5bd7e928d6a87380f8ba7a803cd83d"
- "5e4f269e28f5090f0f2c9a")
- self.assertIn(out, (md5 + " -", sha256 + " -"))
-
- def test_gzip(self):
- """Test gzip file shows up as a shell script."""
- out = self.get_data_file('file_gzip')
- self.assertIn('POSIX shell script, ASCII text executable', out)
-
- def test_text(self):
- """Test text shows up as ASCII text."""
- out = self.get_data_file('file_text')
- self.assertIn('ASCII text', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/write_files.yaml b/tests/cloud_tests/testcases/modules/write_files.yaml
deleted file mode 100644
index cc7ea4bd..00000000
--- a/tests/cloud_tests/testcases/modules/write_files.yaml
+++ /dev/null
@@ -1,53 +0,0 @@
-#
-# Write various file types
-#
-# NOTE: on trusty 'file' has an output formatting error for binary files and
-# has 2 spaces in 'LSB executable', which causes a failure here
-#
-# NOTE: the binary data can be any binary data, not only executables
-# and can be generated via the base 64 command as such:
-# $ base64 < hello > hello.txt
-# the opposite is running:
-# $ base64 -d < hello.txt > hello
-#
-required_features:
- - no_file_fmt_e
-cloud_config: |
- #cloud-config
- write_files:
- - encoding: b64
- content: CiMgVGhpcyBmaWxlIGNvbnRyb2xzIHRoZSBzdGF0ZSBvZiBTRUxpbnV4
- owner: root:root
- path: /root/file_b64
- permissions: '0644'
- - content: |
- # My new /root/file_text
-
- SMBDOPTIONS="-D"
- path: /root/file_text
- - content: !!binary |
- /Z/xrHR4WINT0UNoKPQKbuovp6+Js+JK
- path: /root/file_binary
- permissions: '0555'
- - encoding: gzip
- content: !!binary |
- H4sIAIDb/U8C/1NW1E/KzNMvzuBKTc7IV8hIzcnJVyjPL8pJ4QIA6N+MVxsAAAA=
- path: /root/file_gzip
- permissions: '0755'
-collect_scripts:
- file_b64: |
- #!/bin/bash
- file /root/file_b64
- file_text: |
- #!/bin/bash
- file /root/file_text
- file_binary: |
- #!/bin/bash
- for hasher in md5sum sha256sum; do
- $hasher </root/file_binary && break
- done
- file_gzip: |
- #!/bin/bash
- file /root/file_gzip
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/util.py b/tests/cloud_tests/util.py
deleted file mode 100644
index 49baadb0..00000000
--- a/tests/cloud_tests/util.py
+++ /dev/null
@@ -1,532 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Utilities for re-use across integration tests."""
-
-import base64
-import copy
-import glob
-import multiprocessing
-import os
-import random
-import shlex
-import shutil
-import string
-import subprocess
-import tempfile
-import time
-import yaml
-from contextlib import contextmanager
-
-from cloudinit import subp
-from cloudinit import util as c_util
-from tests.cloud_tests import LOG
-
-OS_FAMILY_MAPPING = {
- 'debian': ['debian', 'ubuntu'],
- 'redhat': ['centos', 'photon', 'rhel', 'fedora'],
- 'gentoo': ['gentoo'],
- 'freebsd': ['freebsd'],
- 'suse': ['sles'],
- 'arch': ['arch'],
-}
-
-
-def list_test_data(data_dir):
- """Find all tests with test data available in data_dir.
-
- @param data_dir: should contain <platforms>/<os_name>/<testnames>/<data>
- @return_value: {<platform>: {<os_name>: [<testname>]}}
- """
- if not os.path.isdir(data_dir):
- raise ValueError("bad data dir")
-
- res = {}
- for platform in os.listdir(data_dir):
- if not os.path.isdir(os.path.join(data_dir, platform)):
- continue
-
- res[platform] = {}
- for os_name in os.listdir(os.path.join(data_dir, platform)):
- res[platform][os_name] = [
- os.path.sep.join(f.split(os.path.sep)[-2:]) for f in
- glob.glob(os.sep.join((data_dir, platform, os_name, '*/*')))]
-
- LOG.debug('found test data: %s\n', res)
- return res
-
-
-def gen_instance_name(prefix='cloud-test', image_desc=None, use_desc=None,
- max_len=63, delim='-', max_tries=16, used_list=None,
- valid=string.ascii_lowercase + string.digits):
- """Generate an unique name for a test instance.
-
- @param prefix: name prefix, defaults to cloud-test, default should be left
- @param image_desc: short string (len <= 16) with image desc
- @param use_desc: short string (len <= 30) with usage desc
- @param max_len: maximum name length, defaults to 64 chars
- @param delim: delimiter to use between tokens
- @param max_tries: maximum tries to find a unique name before giving up
- @param used_list: already used names, or none to not check
- @param valid: string of valid characters for name
- @return_value: valid, unused name, may raise StopIteration
- """
- unknown = 'unknown'
-
- def join(*args):
- """Join args with delim."""
- return delim.join(args)
-
- def fill(*args):
- """Join name elems and fill rest with random data."""
- name = join(*args)
- num = max_len - len(name) - len(delim)
- return join(name, ''.join(random.choice(valid) for _ in range(num)))
-
- def clean(elem, max_len):
- """Filter bad characters out of elem and trim to length."""
- elem = elem.lower()[:max_len] if elem else unknown
- return ''.join(c if c in valid else delim for c in elem)
-
- return next(name for name in
- (fill(prefix, clean(image_desc, 16), clean(use_desc, 30))
- for _ in range(max_tries))
- if not used_list or name not in used_list)
-
-
-def sorted_unique(iterable, key=None, reverse=False):
- """Create unique sorted list.
-
- @param iterable: the data structure to sort
- @param key: if you have a specific key
- @param reverse: to reverse or not
- @return_value: a sorted list of unique items in iterable
- """
- return sorted(set(iterable), key=key, reverse=reverse)
-
-
-def get_os_family(os_name):
- """Get os family type for os_name.
-
- @param os_name: name of os
- @return_value: family name for os_name
- """
- return next((k for k, v in OS_FAMILY_MAPPING.items()
- if os_name.lower() in v), None)
-
-
-def current_verbosity():
- """Get verbosity currently in effect from log level.
-
- @return_value: verbosity, 0-2, 2=verbose, 0=quiet
- """
- return max(min(3 - int(LOG.level / 10), 2), 0)
-
-
-@contextmanager
-def emit_dots_on_travis():
- """
- A context manager that emits a dot every 10 seconds if running on Travis.
-
- Travis will kill jobs that don't emit output for a certain amount of time.
- This context manager spins up a background process which will emit a dot to
- stdout every 10 seconds to avoid being killed.
-
- It should be wrapped selectively around operations that are known to take a
- long time.
- """
- if os.environ.get('TRAVIS') != "true":
- # If we aren't on Travis, don't do anything.
- yield
- return
-
- def emit_dots():
- while True:
- print(".")
- time.sleep(10)
-
- dot_process = multiprocessing.Process(target=emit_dots)
- dot_process.start()
- try:
- yield
- finally:
- dot_process.terminate()
-
-
-def is_writable_dir(path):
- """Make sure dir is writable.
-
- @param path: path to determine if writable
- @return_value: boolean with result
- """
- try:
- c_util.ensure_dir(path)
- os.remove(tempfile.mkstemp(dir=os.path.abspath(path))[1])
- except (IOError, OSError):
- return False
- return True
-
-
-def is_clean_writable_dir(path):
- """Make sure dir is empty and writable, creating it if it does not exist.
-
- @param path: path to check
- @return_value: True/False if successful
- """
- path = os.path.abspath(path)
- if not (is_writable_dir(path) and len(os.listdir(path)) == 0):
- return False
- return True
-
-
-def configure_yaml():
- """Clean yaml."""
- yaml.add_representer(str, (lambda dumper, data: dumper.represent_scalar(
- 'tag:yaml.org,2002:str', data, style='|' if '\n' in data else '')))
-
-
-def yaml_format(data, content_type=None):
- """Format data as yaml.
-
- @param data: data to dump
- @param header: if specified, add a header to the dumped data
- @return_value: yaml string
- """
- configure_yaml()
- content_type = (
- '#{}\n'.format(content_type.strip('#\n')) if content_type else '')
- return content_type + yaml.dump(data, indent=2, default_flow_style=False)
-
-
-def yaml_dump(data, path):
- """Dump data to path in yaml format."""
- c_util.write_file(os.path.abspath(path), yaml_format(data), omode='w')
-
-
-def merge_results(data, path):
- """Handle merging results from collect phase and verify phase."""
- current = {}
- if os.path.exists(path):
- with open(path, 'r') as fp:
- current = c_util.load_yaml(fp.read())
- current.update(data)
- yaml_dump(current, path)
-
-
-def rel_files(basedir):
- """List of files under directory by relative path, not including dirs.
-
- @param basedir: directory to search
- @return_value: list or relative paths
- """
- basedir = os.path.normpath(basedir)
- return [path[len(basedir) + 1:] for path in
- glob.glob(os.path.join(basedir, '**'), recursive=True)
- if not os.path.isdir(path)]
-
-
-def flat_tar(output, basedir, owner='root', group='root'):
- """Create a flat tar archive (no leading ./) from basedir.
-
- @param output: output tar file to write
- @param basedir: base directory for archive
- @param owner: owner of archive files
- @param group: group archive files belong to
- @return_value: none
- """
- subp.subp(['tar', 'cf', output, '--owner', owner, '--group', group,
- '-C', basedir] + rel_files(basedir), capture=True)
-
-
-def parse_conf_list(entries, valid=None, boolean=False):
- """Parse config in a list of strings in key=value format.
-
- @param entries: list of key=value strings
- @param valid: list of valid keys in result, return None if invalid input
- @param boolean: if true, then interpret all values as booleans
- @return_value: dict of configuration or None if invalid
- """
- res = {key: value.lower() == 'true' if boolean else value
- for key, value in (i.split('=') for i in entries)}
- return res if not valid or all(k in valid for k in res.keys()) else None
-
-
-def update_args(args, updates, preserve_old=True):
- """Update cmdline arguments from a dictionary.
-
- @param args: cmdline arguments
- @param updates: dictionary of {arg_name: new_value} mappings
- @param preserve_old: if true, create a deep copy of args before updating
- @return_value: updated cmdline arguments
- """
- args = copy.deepcopy(args) if preserve_old else args
- if updates:
- vars(args).update(updates)
- return args
-
-
-def update_user_data(user_data, updates, dump_to_yaml=True):
- """Update user_data from dictionary.
-
- @param user_data: user data as yaml string or dict
- @param updates: dictionary to merge with user data
- @param dump_to_yaml: return as yaml dumped string if true
- @return_value: updated user data, as yaml string if dump_to_yaml is true
- """
- user_data = (c_util.load_yaml(user_data)
- if isinstance(user_data, str) else copy.deepcopy(user_data))
- user_data.update(updates)
- return (yaml_format(user_data, content_type='cloud-config')
- if dump_to_yaml else user_data)
-
-
-def shell_safe(cmd):
- """Produce string safe shell string.
-
- Create a string that can be passed to:
- set -- <string>
- to produce the same array that cmd represents.
-
- Internally we utilize 'getopt's ability/knowledge on how to quote
- strings to be safe for shell. This implementation could be changed
- to be pure python. It is just a matter of correctly escaping
- or quoting characters like: ' " ^ & $ ; ( ) ...
-
- @param cmd: command as a list
- """
- out = subprocess.check_output(
- ["getopt", "--shell", "sh", "--options", "", "--", "--"] + list(cmd))
- # out contains ' -- <data>\n'. drop the ' -- ' and the '\n'
- return out.decode()[4:-1]
-
-
-def shell_pack(cmd):
- """Return a string that can shuffled through 'sh' and execute cmd.
-
- In Python subprocess terms:
- check_output(cmd) == check_output(shell_pack(cmd), shell=True)
-
- @param cmd: list or string of command to pack up
- """
-
- if isinstance(cmd, str):
- cmd = [cmd]
- else:
- cmd = list(cmd)
-
- stuffed = shell_safe(cmd)
- # for whatever reason b64encode returns bytes when it is clearly
- # representable as a string by nature of being base64 encoded.
- b64 = base64.b64encode(stuffed.encode()).decode()
- return 'eval set -- "$(echo %s | base64 --decode)" && exec "$@"' % b64
-
-
-def shell_quote(cmd):
- if isinstance(cmd, (tuple, list)):
- return ' '.join([shlex.quote(x) for x in cmd])
- return shlex.quote(cmd)
-
-
-class TargetBase(object):
- _tmp_count = 0
-
- def execute(self, command, stdin=None, env=None,
- rcs=None, description=None):
- """Execute command in instance, recording output, error and exit code.
-
- Assumes functional networking and execution as root with the
- target filesystem being available at /.
-
- @param command: the command to execute as root inside the image
- if command is a string, then it will be executed as:
- ['sh', '-c', command]
- @param stdin: bytes content for standard in
- @param env: environment variables
- @param rcs: return codes.
- None (default): non-zero exit code will raise exception.
- False: any is allowed (No execption raised).
- list of int: any rc not in the list will raise exception.
- @param description: purpose of command
- @return_value: tuple containing stdout data, stderr data, exit code
- """
- if isinstance(command, str):
- command = ['sh', '-c', command]
-
- if rcs is None:
- rcs = (0,)
-
- if description:
- LOG.debug('executing "%s"', description)
- else:
- LOG.debug("executing command: %s", shell_quote(command))
-
- out, err, rc = self._execute(command=command, stdin=stdin, env=env)
-
- # False means accept anything.
- if (rcs is False or rc in rcs):
- return out, err, rc
-
- raise InTargetExecuteError(out, err, rc, command, description)
-
- def _execute(self, command, stdin=None, env=None):
- """Execute command in inside, return stdout, stderr and exit code.
-
- Assumes functional networking and execution as root with the
- target filesystem being available at /.
-
- @param stdin: bytes content for standard in
- @param env: environment variables
- @return_value: tuple containing stdout data, stderr data, exit code
-
- This is intended to be implemented by the Image or Instance.
- Many callers will use the higher level 'execute'."""
- raise NotImplementedError("_execute must be implemented by subclass.")
-
- def read_data(self, remote_path, decode=False):
- """Read data from instance filesystem.
-
- @param remote_path: path in instance
- @param decode: decode data before returning.
- @return_value: content of remote_path as bytes if 'decode' is False,
- and as string if 'decode' is True.
- """
- # when sh is invoked with '-c', then the first argument is "$0"
- # which is commonly understood as the "program name".
- # 'read_data' is the program name, and 'remote_path' is '$1'
- stdout, _stderr, rc = self._execute(
- ["sh", "-c", 'exec cat "$1"', 'read_data', remote_path])
- if rc != 0:
- raise RuntimeError("Failed to read file '%s'" % remote_path)
-
- if decode:
- return stdout.decode()
- return stdout
-
- def write_data(self, remote_path, data):
- """Write data to instance filesystem.
-
- @param remote_path: path in instance
- @param data: data to write in bytes
- """
- # when sh is invoked with '-c', then the first argument is "$0"
- # which is commonly understood as the "program name".
- # 'write_data' is the program name, and 'remote_path' is '$1'
- _, _, rc = self._execute(
- ["sh", "-c", 'exec cat >"$1"', 'write_data', remote_path],
- stdin=data)
-
- if rc != 0:
- raise RuntimeError("Failed to write to '%s'" % remote_path)
- return
-
- def pull_file(self, remote_path, local_path):
- """Copy file at 'remote_path', from instance to 'local_path'.
-
- @param remote_path: path on remote instance
- @param local_path: path on local instance
- """
- with open(local_path, 'wb') as fp:
- fp.write(self.read_data(remote_path))
-
- def push_file(self, local_path, remote_path):
- """Copy file at 'local_path' to instance at 'remote_path'.
-
- @param local_path: path on local instance
- @param remote_path: path on remote instance"""
- with open(local_path, "rb") as fp:
- self.write_data(remote_path, data=fp.read())
-
- def run_script(self, script, rcs=None, description=None):
- """Run script in target and return stdout.
-
- @param script: script contents
- @param rcs: allowed return codes from script
- @param description: purpose of script
- @return_value: stdout from script
- """
- # Just write to a file, add execute, run it, then remove it.
- shblob = '; '.join((
- 'set -e',
- 's="$1"',
- 'shift',
- 'cat > "$s"',
- 'trap "rm -f $s" EXIT',
- 'chmod +x "$s"',
- '"$s" "$@"'))
- return self.execute(
- ['sh', '-c', shblob, 'runscript', self.tmpfile()],
- stdin=script, description=description, rcs=rcs)
-
- def tmpfile(self):
- """Get a tmp file in the target.
-
- @return_value: path to new file in target
- """
- path = "/tmp/%s-%04d" % (type(self).__name__, self._tmp_count)
- self._tmp_count += 1
- return path
-
-
-class InTargetExecuteError(subp.ProcessExecutionError):
- """Error type for in target commands that fail."""
-
- default_desc = 'Unexpected error while running command.'
-
- def __init__(self, stdout, stderr, exit_code, cmd, description=None,
- reason=None):
- """Init error and parent error class."""
- super(InTargetExecuteError, self).__init__(
- stdout=stdout, stderr=stderr, exit_code=exit_code,
- cmd=shell_quote(cmd),
- description=description if description else self.default_desc,
- reason=reason)
-
-
-class PlatformError(IOError):
- """Error type for platform errors."""
-
- default_desc = 'unexpected error in platform.'
-
- def __init__(self, operation, description=None):
- """Init error and parent error class."""
- description = description if description else self.default_desc
-
- message = '%s: %s' % (operation, description)
- IOError.__init__(self, message)
-
-
-def mkdtemp(prefix='cloud_test_data'):
- return tempfile.mkdtemp(prefix=prefix)
-
-
-class TempDir(object):
- """Configurable temporary directory like tempfile.TemporaryDirectory."""
-
- def __init__(self, tmpdir=None, preserve=False, prefix='cloud_test_data_'):
- """Initialize.
-
- @param tmpdir: directory to use as tempdir
- @param preserve: if true, always preserve data on exit
- @param prefix: prefix to use for tempfile name
- """
- self.tmpdir = tmpdir
- self.preserve = preserve
- self.prefix = prefix
-
- def __enter__(self):
- """Create tempdir.
-
- @return_value: tempdir path
- """
- if not self.tmpdir:
- self.tmpdir = mkdtemp(prefix=self.prefix)
- LOG.debug('using tmpdir: %s', self.tmpdir)
- return self.tmpdir
-
- def __exit__(self, etype, value, trace):
- """Destroy tempdir if no errors occurred."""
- if etype or self.preserve:
- LOG.info('leaving data in %s', self.tmpdir)
- else:
- shutil.rmtree(self.tmpdir)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/verify.py b/tests/cloud_tests/verify.py
deleted file mode 100644
index 0295af40..00000000
--- a/tests/cloud_tests/verify.py
+++ /dev/null
@@ -1,149 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Verify test results."""
-
-import os
-import unittest
-
-from tests.cloud_tests import (config, LOG, util, testcases)
-
-
-def verify_data(data_dir, platform, os_name, tests):
- """Verify test data is correct.
-
- @param data_dir: top level directory for all tests
- @param platform: The platform name we for this test data (e.g. lxd)
- @param os_name: The operating system under test (xenial, artful, etc.).
- @param tests: list of test names
- @return_value: {<test_name>: {passed: True/False, failures: []}}
- """
- base_dir = os.sep.join((data_dir, platform, os_name))
- runner = unittest.TextTestRunner(verbosity=util.current_verbosity())
- res = {}
- for test_name in tests:
- LOG.debug('verifying test data for %s', test_name)
-
- # get cloudconfig for test
- test_conf = config.load_test_config(test_name)
- test_module = config.name_to_module(test_name)
- cloud_conf = test_conf['cloud_config']
-
- # load script outputs
- data = {'platform': platform, 'os_name': os_name}
- test_dir = os.path.join(base_dir, test_name)
- for script_name in os.listdir(test_dir):
- with open(os.path.join(test_dir, script_name), 'rb') as fp:
- data[script_name] = fp.read()
-
- # get test suite and launch tests
- suite = testcases.get_suite(test_module, data, cloud_conf)
- suite_results = runner.run(suite)
- res[test_name] = {
- 'passed': suite_results.wasSuccessful(),
- 'failures': [{'module': type(test_class).__base__.__module__,
- 'class': type(test_class).__base__.__name__,
- 'function': str(test_class).split()[0],
- 'error': trace.splitlines()[-1],
- 'traceback': trace, }
- for test_class, trace in suite_results.failures]
- }
-
- for failure in res[test_name]['failures']:
- LOG.warning('test case: %s failed %s.%s with: %s',
- test_name, failure['class'], failure['function'],
- failure['error'])
-
- return res
-
-
-def format_test_failures(test_result):
- """Return a human-readable printable format of test failures."""
- if not test_result['failures']:
- return ''
- failure_hdr = ' test failures:'
- failure_fmt = ' * {module}.{class}.{function}\n '
- output = []
- for failure in test_result['failures']:
- if not output:
- output = [failure_hdr]
- msg = failure_fmt.format(**failure)
- if failure.get('error'):
- msg += failure['error']
- else:
- msg += failure.get('traceback', '')
- output.append(msg)
- return '\n'.join(output)
-
-
-def format_results(res):
- """Return human-readable results as a string"""
- platform_hdr = 'Platform: {platform}'
- distro_hdr = ' Distro: {distro}'
- distro_summary_fmt = (
- ' test modules passed:{passed} tests failed:{failed}')
- output = ['']
- counts = {}
- for platform, platform_data in res.items():
- output.append(platform_hdr.format(platform=platform))
- counts[platform] = {}
- for distro, distro_data in platform_data.items():
- distro_failure_output = []
- output.append(distro_hdr.format(distro=distro))
- counts[platform][distro] = {'passed': 0, 'failed': 0}
- for _, test_result in distro_data.items():
- if test_result['passed']:
- counts[platform][distro]['passed'] += 1
- else:
- counts[platform][distro]['failed'] += len(
- test_result['failures'])
- failure_output = format_test_failures(test_result)
- if failure_output:
- distro_failure_output.append(failure_output)
- output.append(
- distro_summary_fmt.format(**counts[platform][distro]))
- if distro_failure_output:
- output.extend(distro_failure_output)
- return '\n'.join(output)
-
-
-def verify(args):
- """Verify test data.
-
- @param args: directory of test data
- @return_value: 0 for success, or number of failed tests
- """
- failed = 0
- res = {}
-
- # find test data
- tests = util.list_test_data(args.data_dir)
-
- for platform in tests.keys():
- res[platform] = {}
- for os_name in tests[platform].keys():
- test_name = "platform='{}', os='{}'".format(platform, os_name)
- LOG.info('test: %s verifying test data', test_name)
-
- # run test
- res[platform][os_name] = verify_data(
- args.data_dir, platform, os_name,
- tests[platform][os_name])
-
- # handle results
- fail_list = [k for k, v in res[platform][os_name].items()
- if not v.get('passed')]
- if len(fail_list) == 0:
- LOG.info('test: %s passed all tests', test_name)
- else:
- LOG.warning('test: %s failed %s tests', test_name,
- len(fail_list))
- failed += len(fail_list)
-
- # dump results
- LOG.debug('\n---- Verify summarized results:\n%s', format_results(res))
- if args.result:
- util.merge_results({'verify': res}, args.result)
-
- return failed
-
-# vi: ts=4 expandtab
diff --git a/tests/configs/sample1.yaml b/tests/configs/sample1.yaml
deleted file mode 100644
index ae935cc0..00000000
--- a/tests/configs/sample1.yaml
+++ /dev/null
@@ -1,49 +0,0 @@
-#cloud-config
-#apt_update: false
-#apt_upgrade: true
-packages: [ bzr, pastebinit, ubuntu-dev-tools, ccache, bzr-builddeb, vim-nox, git-core, lftp ]
-
-#disable_root: False
-
-# mounts:
-# - [ ephemeral0, /mnt ]
-# - [ swap, none, swap, sw, 0, 0 ]
-
-ssh_import_id: [smoser ]
-
-#!/bin/sh
-
-output: {all: '| tee -a /var/log/cloud-init-output.log'}
-
-sm_misc:
- - &user_setup |
- set -x; exec > ~/user_setup.log 2>&1
- echo "starting at $(date -R)"
- echo "set -o vi" >> ~/.bashrc
- cat >> ~/.profile <<"EOF"
- export EDITOR=vi
- export DEB_BUILD_OPTIONS=parallel=4
- export PATH=/usr/lib/ccache:$PATH
- EOF
-
- mkdir ~/bin
- chmod 755 ~/bin
- cat > ~/bin/mdebuild <<"EOF"
- #!/bin/sh
- exec debuild --prepend-path /usr/lib/ccache "$@"
- EOF
- chmod 755 ~/bin/*
-
- #byobu-launcher-install
- byobu-ctrl-a screen 2>&1 || :
-
- echo "pinging 8.8.8.8"
- ping -c 4 8.8.8.8
-
-runcmd:
- - [ sudo, -Hu, ubuntu, sh, -c, '[ -e /var/log/cloud-init.log ] || exit 0; grep "cloud-init.*running" /var/log/cloud-init.log > ~/runcmd.log' ]
- - [ sudo, -Hu, ubuntu, sh, -c, 'read up sleep < /proc/uptime; echo $(date): runcmd up at $up | tee -a ~/runcmd.log' ]
- - [ sudo, -Hu, ubuntu, sh, -c, *user_setup ]
-
-
-byobu_by_default: user
diff --git a/tests/integration_tests/bugs/test_gh632.py b/tests/integration_tests/bugs/test_gh632.py
index 3c1f9347..f3702a2e 100644
--- a/tests/integration_tests/bugs/test_gh632.py
+++ b/tests/integration_tests/bugs/test_gh632.py
@@ -3,16 +3,15 @@
Verify that if cloud-init is using DataSourceRbxCloud, there is
no traceback if the metadata disk cannot be found.
"""
-
import pytest
from tests.integration_tests.instances import IntegrationInstance
+from tests.integration_tests.util import verify_clean_log
# With some datasource hacking, we can run this on a NoCloud instance
@pytest.mark.lxd_container
@pytest.mark.lxd_vm
-@pytest.mark.sru_2020_11
def test_datasource_rbx_no_stacktrace(client: IntegrationInstance):
client.write_to_file(
'/etc/cloud/cloud.cfg.d/90_dpkg.cfg',
@@ -26,8 +25,7 @@ def test_datasource_rbx_no_stacktrace(client: IntegrationInstance):
client.restart()
log = client.read_from_file('/var/log/cloud-init.log')
- assert 'WARNING' not in log
- assert 'Traceback' not in log
+ verify_clean_log(log)
assert 'Failed to load metadata and userdata' not in log
assert ("Getting data from <class 'cloudinit.sources.DataSourceRbxCloud."
"DataSourceRbxCloud'> failed") not in log
diff --git a/tests/integration_tests/bugs/test_gh868.py b/tests/integration_tests/bugs/test_gh868.py
index 838efca6..73c03451 100644
--- a/tests/integration_tests/bugs/test_gh868.py
+++ b/tests/integration_tests/bugs/test_gh868.py
@@ -1,6 +1,8 @@
"""Ensure no Traceback when 'chef_license' is set"""
import pytest
+
from tests.integration_tests.instances import IntegrationInstance
+from tests.integration_tests.util import verify_clean_log
USERDATA = """\
@@ -17,4 +19,4 @@ chef:
@pytest.mark.user_data(USERDATA)
def test_chef_license(client: IntegrationInstance):
log = client.read_from_file('/var/log/cloud-init.log')
- assert 'Traceback' not in log
+ verify_clean_log(log)
diff --git a/tests/integration_tests/bugs/test_lp1813396.py b/tests/integration_tests/bugs/test_lp1813396.py
index 68b96b1d..27d41c2b 100644
--- a/tests/integration_tests/bugs/test_lp1813396.py
+++ b/tests/integration_tests/bugs/test_lp1813396.py
@@ -29,6 +29,5 @@ def test_gpg_no_tty(client: IntegrationInstance):
"'--keyserver=keyserver.ubuntu.com', '--recv-keys', 'E4D304DF'] "
"with allowed return codes [0] (shell=False, capture=True)",
"Imported key 'E4D304DF' from keyserver 'keyserver.ubuntu.com'",
- "finish: modules-config/config-apt-configure: SUCCESS",
]
verify_ordered_items_in_text(to_verify, log)
diff --git a/tests/integration_tests/bugs/test_lp1835584.py b/tests/integration_tests/bugs/test_lp1835584.py
index 660d2a2a..732f2179 100644
--- a/tests/integration_tests/bugs/test_lp1835584.py
+++ b/tests/integration_tests/bugs/test_lp1835584.py
@@ -59,6 +59,9 @@ def _check_iid_insensitive_across_kernel_upgrade(
result = instance.execute("apt-get install linux-azure --assume-yes")
if not result.ok:
pytest.fail("Unable to install linux-azure kernel: {}".format(result))
+ # Remove ubuntu-azure-fips metapkg which mandates FIPS-flavour kernel
+ result = instance.execute("ua disable fips --assume-yes")
+ assert result.ok, "Unable to disable fips: {}".format(result)
instance.restart()
new_kernel = instance.execute("uname -r").strip()
assert orig_kernel != new_kernel
diff --git a/tests/integration_tests/bugs/test_lp1886531.py b/tests/integration_tests/bugs/test_lp1886531.py
index 058ea8bb..6dd61222 100644
--- a/tests/integration_tests/bugs/test_lp1886531.py
+++ b/tests/integration_tests/bugs/test_lp1886531.py
@@ -11,6 +11,8 @@ https://bugs.launchpad.net/ubuntu/+source/cloud-init/+bug/1886531
"""
import pytest
+from tests.integration_tests.util import verify_clean_log
+
USER_DATA = """\
#cloud-config
@@ -24,4 +26,4 @@ class TestLp1886531:
@pytest.mark.user_data(USER_DATA)
def test_lp1886531(self, client):
log_content = client.read_from_file("/var/log/cloud-init.log")
- assert "WARNING" not in log_content
+ verify_clean_log(log_content)
diff --git a/tests/integration_tests/bugs/test_lp1898997.py b/tests/integration_tests/bugs/test_lp1898997.py
index bde93d06..909bc690 100644
--- a/tests/integration_tests/bugs/test_lp1898997.py
+++ b/tests/integration_tests/bugs/test_lp1898997.py
@@ -10,7 +10,9 @@ network configuration, and confirms that the bridge can be used to ping the
default gateway.
"""
import pytest
+
from tests.integration_tests import random_mac_address
+from tests.integration_tests.util import verify_clean_log
MAC_ADDRESS = random_mac_address()
@@ -59,7 +61,7 @@ class TestInterfaceListingWithOpenvSwitch:
cloudinit_output = client.read_from_file("/var/log/cloud-init.log")
# Confirm that the network configuration was applied successfully
- assert "WARN" not in cloudinit_output
+ verify_clean_log(cloudinit_output)
# Confirm that the applied network config created the OVS bridge
assert "ovs-br" in client.execute("ip addr")
diff --git a/tests/integration_tests/clouds.py b/tests/integration_tests/clouds.py
index f2362b5d..dee2adff 100644
--- a/tests/integration_tests/clouds.py
+++ b/tests/integration_tests/clouds.py
@@ -1,7 +1,10 @@
# This file is part of cloud-init. See LICENSE file for license information.
from abc import ABC, abstractmethod
+import datetime
import logging
import os.path
+import random
+import string
from uuid import UUID
from pycloudlib import (
@@ -28,7 +31,7 @@ from tests.integration_tests.instances import (
from tests.integration_tests.util import emit_dots_on_travis
try:
- from typing import Optional
+ from typing import Optional # noqa: F401
except ImportError:
pass
@@ -309,8 +312,12 @@ class _LxdIntegrationCloud(IntegrationCloud):
except KeyError:
profile_list = self._get_or_set_profile_list(release)
+ prefix = datetime.datetime.utcnow().strftime("cloudinit-%m%d-%H%M%S")
+ default_name = prefix + "".join(
+ random.choices(string.ascii_lowercase + string.digits, k=8)
+ )
pycloudlib_instance = self.cloud_instance.init(
- launch_kwargs.pop('name', None),
+ launch_kwargs.pop('name', default_name),
release,
profile_list=profile_list,
**launch_kwargs
diff --git a/tests/integration_tests/datasources/test_lxd_discovery.py b/tests/integration_tests/datasources/test_lxd_discovery.py
new file mode 100644
index 00000000..be76e179
--- /dev/null
+++ b/tests/integration_tests/datasources/test_lxd_discovery.py
@@ -0,0 +1,62 @@
+import json
+import pytest
+import yaml
+
+from tests.integration_tests.instances import IntegrationInstance
+from tests.integration_tests.util import verify_clean_log
+
+
+def _customize_envionment(client: IntegrationInstance):
+ client.write_to_file(
+ '/etc/cloud/cloud.cfg.d/99-detect-lxd.cfg',
+ 'datasource_list: [LXD]\n',
+ )
+ client.execute('cloud-init clean --logs')
+ client.restart()
+
+
+# This test should be able to work on any cloud whose datasource specifies
+# a NETWORK dependency
+@pytest.mark.lxd_container
+@pytest.mark.lxd_vm
+@pytest.mark.ubuntu # Because netplan
+def test_lxd_datasource_discovery(client: IntegrationInstance):
+ """Test that DataSourceLXD is detected instead of NoCloud."""
+ _customize_envionment(client)
+ nic_dev = "enp5s0" if client.settings.PLATFORM == "lxd_vm" else "eth0"
+ result = client.execute('cloud-init status --long')
+ if not result.ok:
+ raise AssertionError('cloud-init failed:\n%s', result.stderr)
+ if "DataSourceLXD" not in result.stdout:
+ raise AssertionError(
+ 'cloud-init did not discover DataSourceLXD', result.stdout
+ )
+ netplan_yaml = client.execute('cat /etc/netplan/50-cloud-init.yaml')
+ netplan_cfg = yaml.safe_load(netplan_yaml)
+ assert {
+ 'network': {'ethernets': {nic_dev: {'dhcp4': True}}, 'version': 2}
+ } == netplan_cfg
+ log = client.read_from_file('/var/log/cloud-init.log')
+ verify_clean_log(log)
+ result = client.execute('cloud-id')
+ if result.stdout != "lxd":
+ raise AssertionError(
+ "cloud-id didn't report lxd. Result: %s", result.stdout
+ )
+ # Validate config instance data represented
+ data = json.loads(client.read_from_file(
+ '/run/cloud-init/instance-data.json')
+ )
+ v1 = data["v1"]
+ ds_cfg = data["ds"]
+ assert "lxd" == v1["platform"]
+ assert "LXD socket API v. 1.0 (/dev/lxd/sock)" == v1["subplatform"]
+ ds_cfg = json.loads(client.execute('cloud-init query ds').stdout)
+ assert ["config", "meta_data"] == sorted(list(ds_cfg["1.0"].keys()))
+ assert ["user.meta_data"] == list(ds_cfg["1.0"]["config"].keys())
+ assert {"public-keys": v1["public_ssh_keys"][0]} == (
+ yaml.safe_load(ds_cfg["1.0"]["config"]["user.meta_data"])
+ )
+ assert (
+ "#cloud-config\ninstance-id" in ds_cfg["1.0"]["meta_data"]
+ )
diff --git a/tests/integration_tests/datasources/test_network_dependency.py b/tests/integration_tests/datasources/test_network_dependency.py
new file mode 100644
index 00000000..24e71f9d
--- /dev/null
+++ b/tests/integration_tests/datasources/test_network_dependency.py
@@ -0,0 +1,32 @@
+import pytest
+
+from tests.integration_tests.instances import IntegrationInstance
+
+
+def _customize_envionment(client: IntegrationInstance):
+ # Insert our "disable_network_activation" file here
+ client.write_to_file(
+ '/etc/cloud/cloud.cfg.d/99-disable-network-activation.cfg',
+ 'disable_network_activation: true\n',
+ )
+ client.execute('cloud-init clean --logs')
+ client.restart()
+
+
+# This test should be able to work on any cloud whose datasource specifies
+# a NETWORK dependency
+@pytest.mark.gce
+@pytest.mark.ubuntu # Because netplan
+def test_network_activation_disabled(client: IntegrationInstance):
+ """Test that the network is not activated during init mode."""
+ _customize_envionment(client)
+ result = client.execute('systemctl status google-guest-agent.service')
+ if not result.ok:
+ raise AssertionError(
+ 'google-guest-agent is not active:\n%s', result.stdout)
+ log = client.read_from_file('/var/log/cloud-init.log')
+
+ assert "Running command ['netplan', 'apply']" not in log
+
+ assert 'Not bringing up newly configured network interfaces' in log
+ assert 'Bringing up newly configured network interfaces' not in log
diff --git a/tests/integration_tests/instances.py b/tests/integration_tests/instances.py
index 055ec758..8f66bf43 100644
--- a/tests/integration_tests/instances.py
+++ b/tests/integration_tests/instances.py
@@ -9,11 +9,14 @@ from pycloudlib.instance import BaseInstance
from pycloudlib.result import Result
from tests.integration_tests import integration_settings
+from tests.integration_tests.util import retry
try:
from typing import TYPE_CHECKING
if TYPE_CHECKING:
- from tests.integration_tests.clouds import IntegrationCloud
+ from tests.integration_tests.clouds import ( # noqa: F401
+ IntegrationCloud
+ )
except ImportError:
pass
@@ -140,26 +143,31 @@ class IntegrationInstance:
snapshot_id = self.snapshot()
self.cloud.snapshot_id = snapshot_id
+ # assert with retry because we can compete with apt already running in the
+ # background and get: E: Could not get lock /var/lib/apt/lists/lock - open
+ # (11: Resource temporarily unavailable)
+
+ @retry(tries=30, delay=1)
def install_proposed_image(self):
log.info('Installing proposed image')
- remote_script = (
+ assert self.execute(
'echo deb "http://archive.ubuntu.com/ubuntu '
- '$(lsb_release -sc)-proposed main" | '
- 'tee /etc/apt/sources.list.d/proposed.list\n'
- 'apt-get update -q\n'
- 'apt-get install -qy cloud-init'
- )
- self.execute(remote_script)
+ '$(lsb_release -sc)-proposed main" >> '
+ '/etc/apt/sources.list.d/proposed.list'
+ ).ok
+ assert self.execute('apt-get update -q').ok
+ assert self.execute('apt-get install -qy cloud-init').ok
+ @retry(tries=30, delay=1)
def install_ppa(self):
log.info('Installing PPA')
- remote_script = (
- 'add-apt-repository {repo} -y && '
- 'apt-get update -q && '
- 'apt-get install -qy cloud-init'
- ).format(repo=self.settings.CLOUD_INIT_SOURCE)
- self.execute(remote_script)
+ assert self.execute('add-apt-repository {} -y'.format(
+ self.settings.CLOUD_INIT_SOURCE)
+ ).ok
+ assert self.execute('apt-get update -q').ok
+ assert self.execute('apt-get install -qy cloud-init').ok
+ @retry(tries=30, delay=1)
def install_deb(self):
log.info('Installing deb package')
deb_path = integration_settings.CLOUD_INIT_SOURCE
@@ -168,13 +176,13 @@ class IntegrationInstance:
self.push_file(
local_path=integration_settings.CLOUD_INIT_SOURCE,
remote_path=remote_path)
- remote_script = 'dpkg -i {path}'.format(path=remote_path)
- self.execute(remote_script)
+ assert self.execute('dpkg -i {path}'.format(path=remote_path)).ok
+ @retry(tries=30, delay=1)
def upgrade_cloud_init(self):
log.info('Upgrading cloud-init to latest version in archive')
- self.execute("apt-get update -q")
- self.execute("apt-get install -qy cloud-init")
+ assert self.execute("apt-get update -q").ok
+ assert self.execute("apt-get install -qy cloud-init").ok
def __enter__(self):
return self
diff --git a/tests/integration_tests/modules/test_apt.py b/tests/integration_tests/modules/test_apt.py
index 54711fc0..2c388047 100644
--- a/tests/integration_tests/modules/test_apt.py
+++ b/tests/integration_tests/modules/test_apt.py
@@ -1,9 +1,11 @@
"""Series of integration tests covering apt functionality."""
import re
-from tests.integration_tests.clouds import ImageSpecification
import pytest
+from cloudinit.config import cc_apt_configure
+from cloudinit import gpg
+from tests.integration_tests.clouds import ImageSpecification
from tests.integration_tests.instances import IntegrationInstance
@@ -43,6 +45,13 @@ apt:
keyid: 441614D8
keyserver: keyserver.ubuntu.com
source: "ppa:simplestreams-dev/trunk"
+ test_signed_by:
+ keyid: A2EB2DEC0BD7519B7B38BE38376A290EC8068B11
+ keyserver: keyserver.ubuntu.com
+ source: "deb [signed-by=$KEY_FILE] http://ppa.launchpad.net/juju/stable/ubuntu $RELEASE main"
+ test_bad_key:
+ key: ""
+ source: "deb $MIRROR $RELEASE main"
test_key:
source: "deb http://ppa.launchpad.net/cloud-init-dev/test-archive/ubuntu $RELEASE main"
key: |
@@ -91,12 +100,27 @@ TEST_KEYSERVER_KEY = "7260 0DB1 5B8E 4C8B 1964 B868 038A CC97 C660 A937"
TEST_PPA_KEY = "3552 C902 B4DD F7BD 3842 1821 015D 28D7 4416 14D8"
TEST_KEY = "1FF0 D853 5EF7 E719 E5C8 1B9C 083D 06FB E4D3 04DF"
+TEST_SIGNED_BY_KEY = "A2EB 2DEC 0BD7 519B 7B38 BE38 376A 290E C806 8B11"
@pytest.mark.ci
@pytest.mark.ubuntu
@pytest.mark.user_data(USER_DATA)
class TestApt:
+ def get_keys(self, class_client: IntegrationInstance):
+ """Return all keys in /etc/apt/trusted.gpg.d/ and /etc/apt/trusted.gpg
+ in human readable format. Mimics the output of apt-key finger
+ """
+ list_cmd = ' '.join(gpg.GPG_LIST) + ' '
+ keys = class_client.execute(list_cmd + cc_apt_configure.APT_LOCAL_KEYS)
+ print(keys)
+ files = class_client.execute(
+ 'ls ' + cc_apt_configure.APT_TRUSTED_GPG_DIR)
+ for file in files.split():
+ path = cc_apt_configure.APT_TRUSTED_GPG_DIR + file
+ keys += class_client.execute(list_cmd + path) or ''
+ return keys
+
def test_sources_list(self, class_client: IntegrationInstance):
"""Integration test for the apt module's `sources_list` functionality.
@@ -152,8 +176,33 @@ class TestApt:
'http://ppa.launchpad.net/simplestreams-dev/trunk/ubuntu'
) in ppa_path_contents
- keys = class_client.execute('apt-key finger')
- assert TEST_PPA_KEY in keys
+ assert TEST_PPA_KEY in self.get_keys(class_client)
+
+ def test_signed_by(self, class_client: IntegrationInstance):
+ """Test the apt signed-by functionality.
+ """
+ release = ImageSpecification.from_os_image().release
+ source = (
+ "deb [signed-by=/etc/apt/cloud-init.gpg.d/test_signed_by.gpg] "
+ "http://ppa.launchpad.net/juju/stable/ubuntu"
+ " {} main".format(release))
+ print(class_client.execute('cat /var/log/cloud-init.log'))
+ path_contents = class_client.read_from_file(
+ '/etc/apt/sources.list.d/test_signed_by.list')
+ assert path_contents == source
+
+ key = class_client.execute(
+ 'gpg --no-default-keyring --with-fingerprint --list-keys '
+ '--keyring /etc/apt/cloud-init.gpg.d/test_signed_by.gpg')
+
+ assert TEST_SIGNED_BY_KEY in key
+
+ def test_bad_key(self, class_client: IntegrationInstance):
+ """Test the apt signed-by functionality.
+ """
+ with pytest.raises(OSError):
+ class_client.read_from_file(
+ '/etc/apt/trusted.list.d/test_bad_key.gpg')
def test_key(self, class_client: IntegrationInstance):
"""Test the apt key functionality.
@@ -168,9 +217,7 @@ class TestApt:
assert (
'http://ppa.launchpad.net/cloud-init-dev/test-archive/ubuntu'
) in test_archive_contents
-
- keys = class_client.execute('apt-key finger')
- assert TEST_KEY in keys
+ assert TEST_KEY in self.get_keys(class_client)
def test_keyserver(self, class_client: IntegrationInstance):
"""Test the apt keyserver functionality.
@@ -186,8 +233,7 @@ class TestApt:
'http://ppa.launchpad.net/cloud-init-raharper/curtin-dev/ubuntu'
) in test_keyserver_contents
- keys = class_client.execute('apt-key finger')
- assert TEST_KEYSERVER_KEY in keys
+ assert TEST_KEYSERVER_KEY in self.get_keys(class_client)
def test_os_pipelining(self, class_client: IntegrationInstance):
"""Test 'os' settings does not write apt config file.
diff --git a/tests/integration_tests/modules/test_combined.py b/tests/integration_tests/modules/test_combined.py
index 27f3c074..9cd1648a 100644
--- a/tests/integration_tests/modules/test_combined.py
+++ b/tests/integration_tests/modules/test_combined.py
@@ -8,14 +8,15 @@ here.
import json
import pytest
import re
-from datetime import date
from tests.integration_tests.clouds import ImageSpecification
from tests.integration_tests.instances import IntegrationInstance
-from tests.integration_tests.util import verify_ordered_items_in_text
+from tests.integration_tests.util import (
+ verify_clean_log,
+ verify_ordered_items_in_text,
+)
USER_DATA = """\
-## template: jinja
#cloud-config
apt:
primary:
@@ -33,8 +34,7 @@ locale_configfile: /etc/default/locale
ntp:
servers: ['ntp.ubuntu.com']
runcmd:
- - echo {{ds.meta_data.local_hostname}} > /var/tmp/runcmd_output
- - echo {{merged_cfg.def_log_file}} >> /var/tmp/runcmd_output
+ - echo 'hello world' > /var/tmp/runcmd_output
"""
@@ -44,11 +44,17 @@ class TestCombined:
def test_final_message(self, class_client: IntegrationInstance):
"""Test that final_message module works as expected.
- Also tests LP 1511485: final_message is silent
+ Also tests LP 1511485: final_message is silent.
+
+ It's possible that if this test is run within a minute or so of
+ midnight that we'll see a failure because the day in the logs
+ is different from the day specified in the test definition.
"""
client = class_client
log = client.read_from_file('/var/log/cloud-init.log')
- today = date.today().strftime('%a, %d %b %Y')
+ # Get date on host rather than locally as our host could be in a
+ # wildly different timezone (or more likely recording UTC)
+ today = client.execute('date "+%a, %d %b %Y"')
expected = (
'This is my final message!\n'
r'\d+\.\d+.*\n'
@@ -96,21 +102,10 @@ class TestCombined:
'en_US.UTF-8'
], locale_gen)
- def test_runcmd_with_variable_substitution(
- self, class_client: IntegrationInstance
- ):
- """Test runcmd, while including jinja substitution.
-
- Ensure we can also substitue variables from instance-data-sensitive
- LP: #1931392
- """
+ def test_runcmd(self, class_client: IntegrationInstance):
+ """Test runcmd works as expected"""
client = class_client
- expected = [
- client.execute('hostname').stdout.strip(),
- '/var/log/cloud-init.log',
- ]
- output = client.read_from_file('/var/tmp/runcmd_output')
- verify_ordered_items_in_text(expected, output)
+ assert 'hello world' == client.read_from_file('/var/tmp/runcmd_output')
def test_no_problems(self, class_client: IntegrationInstance):
"""Test no errors, warnings, or tracebacks"""
@@ -124,8 +119,7 @@ class TestCombined:
assert result_json['errors'] == []
log = client.read_from_file('/var/log/cloud-init.log')
- assert 'WARN' not in log
- assert 'Traceback' not in log
+ verify_clean_log(log)
def _check_common_metadata(self, data):
assert data['base64_encoded_keys'] == []
@@ -171,8 +165,10 @@ class TestCombined:
v1_data = data['v1']
assert v1_data['cloud_name'] == 'unknown'
assert v1_data['platform'] == 'lxd'
- assert v1_data['subplatform'] == (
- 'seed-dir (/var/lib/cloud/seed/nocloud-net)')
+ assert any([
+ '/var/lib/cloud/seed/nocloud-net' in v1_data['subplatform'],
+ '/dev/sr0' in v1_data['subplatform']
+ ])
assert v1_data['availability_zone'] is None
assert v1_data['instance_id'] == client.instance.name
assert v1_data['local_hostname'] == client.instance.name
diff --git a/tests/integration_tests/modules/test_disk_setup.py b/tests/integration_tests/modules/test_disk_setup.py
index 1fc96c52..9c9edc46 100644
--- a/tests/integration_tests/modules/test_disk_setup.py
+++ b/tests/integration_tests/modules/test_disk_setup.py
@@ -6,6 +6,7 @@ from pycloudlib.lxd.instance import LXDInstance
from cloudinit.subp import subp
from tests.integration_tests.instances import IntegrationInstance
+from tests.integration_tests.util import verify_clean_log
DISK_PATH = '/tmp/test_disk_setup_{}'.format(uuid4())
@@ -59,8 +60,7 @@ class TestDeviceAliases:
) in log
assert 'changed my_alias.1 => /dev/sdb1' in log
assert 'changed my_alias.2 => /dev/sdb2' in log
- assert 'WARN' not in log
- assert 'Traceback' not in log
+ verify_clean_log(log)
lsblk = json.loads(client.execute('lsblk --json'))
sdb = [x for x in lsblk['blockdevices'] if x['name'] == 'sdb'][0]
@@ -120,8 +120,7 @@ class TestPartProbeAvailability:
"""
def _verify_first_disk_setup(self, client, log):
- assert 'Traceback' not in log
- assert 'WARN' not in log
+ verify_clean_log(log)
lsblk = json.loads(client.execute('lsblk --json'))
sdb = [x for x in lsblk['blockdevices'] if x['name'] == 'sdb'][0]
assert len(sdb['children']) == 2
@@ -167,8 +166,7 @@ class TestPartProbeAvailability:
client.restart()
# Assert new setup works as expected
- assert 'Traceback' not in log
- assert 'WARN' not in log
+ verify_clean_log(log)
lsblk = json.loads(client.execute('lsblk --json'))
sdb = [x for x in lsblk['blockdevices'] if x['name'] == 'sdb'][0]
diff --git a/tests/integration_tests/modules/test_hotplug.py b/tests/integration_tests/modules/test_hotplug.py
index a42d1c8c..f5abc86f 100644
--- a/tests/integration_tests/modules/test_hotplug.py
+++ b/tests/integration_tests/modules/test_hotplug.py
@@ -40,15 +40,22 @@ def _get_ip_addr(client):
@pytest.mark.openstack
+# On Bionic, we traceback when attempting to detect the hotplugged
+# device in the updated metadata. This is because Bionic is specifically
+# configured not to provide network metadata.
+@pytest.mark.not_bionic
@pytest.mark.user_data(USER_DATA)
def test_hotplug_add_remove(client: IntegrationInstance):
ips_before = _get_ip_addr(client)
log = client.read_from_file('/var/log/cloud-init.log')
assert 'Exiting hotplug handler' not in log
+ assert client.execute(
+ 'test -f /etc/udev/rules.d/10-cloud-init-hook-hotplug.rules'
+ ).ok
# Add new NIC
added_ip = client.instance.add_network_interface()
- _wait_till_hotplug_complete(client, expected_runs=2)
+ _wait_till_hotplug_complete(client, expected_runs=1)
ips_after_add = _get_ip_addr(client)
new_addition = [ip for ip in ips_after_add if ip.ip4 == added_ip][0]
@@ -63,7 +70,7 @@ def test_hotplug_add_remove(client: IntegrationInstance):
# Remove new NIC
client.instance.remove_network_interface(added_ip)
- _wait_till_hotplug_complete(client, expected_runs=4)
+ _wait_till_hotplug_complete(client, expected_runs=2)
ips_after_remove = _get_ip_addr(client)
assert len(ips_after_remove) == len(ips_before)
assert added_ip not in [ip.ip4 for ip in ips_after_remove]
@@ -82,12 +89,14 @@ def test_no_hotplug_in_userdata(client: IntegrationInstance):
ips_before = _get_ip_addr(client)
log = client.read_from_file('/var/log/cloud-init.log')
assert 'Exiting hotplug handler' not in log
+ assert client.execute(
+ 'test -f /etc/udev/rules.d/10-cloud-init-hook-hotplug.rules'
+ ).failed
# Add new NIC
client.instance.add_network_interface()
- _wait_till_hotplug_complete(client)
log = client.read_from_file('/var/log/cloud-init.log')
- assert "Event Denied: scopes=['network'] EventType=hotplug" in log
+ assert 'hotplug-hook' not in log
ips_after_add = _get_ip_addr(client)
if len(ips_after_add) == len(ips_before) + 1:
diff --git a/tests/integration_tests/modules/test_jinja_templating.py b/tests/integration_tests/modules/test_jinja_templating.py
new file mode 100644
index 00000000..35b8ee2d
--- /dev/null
+++ b/tests/integration_tests/modules/test_jinja_templating.py
@@ -0,0 +1,30 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+import pytest
+
+from tests.integration_tests.instances import IntegrationInstance
+from tests.integration_tests.util import verify_ordered_items_in_text
+
+
+USER_DATA = """\
+## template: jinja
+#cloud-config
+runcmd:
+ - echo {{v1.local_hostname}} > /var/tmp/runcmd_output
+ - echo {{merged_cfg._doc}} >> /var/tmp/runcmd_output
+"""
+
+
+@pytest.mark.user_data(USER_DATA)
+def test_runcmd_with_variable_substitution(client: IntegrationInstance):
+ """Test jinja substitution.
+
+ Ensure we can also substitute variables from instance-data-sensitive
+ LP: #1931392
+ """
+ expected = [
+ client.execute('hostname').stdout.strip(),
+ ('Merged cloud-init system config from /etc/cloud/cloud.cfg and '
+ '/etc/cloud/cloud.cfg.d/')
+ ]
+ output = client.read_from_file('/var/tmp/runcmd_output')
+ verify_ordered_items_in_text(expected, output)
diff --git a/tests/integration_tests/modules/test_lxd_bridge.py b/tests/integration_tests/modules/test_lxd_bridge.py
index cbf11179..65dce3c7 100644
--- a/tests/integration_tests/modules/test_lxd_bridge.py
+++ b/tests/integration_tests/modules/test_lxd_bridge.py
@@ -6,6 +6,8 @@
import pytest
import yaml
+from tests.integration_tests.util import verify_clean_log
+
USER_DATA = """\
#cloud-config
@@ -38,7 +40,7 @@ class TestLxdBridge:
def test_bridge(self, class_client):
"""Check that the given bridge is configured"""
cloud_init_log = class_client.read_from_file("/var/log/cloud-init.log")
- assert "WARN" not in cloud_init_log
+ verify_clean_log(cloud_init_log)
# The bridge should exist
assert class_client.execute("ip addr show lxdbr0")
diff --git a/tests/integration_tests/modules/test_ntp_servers.py b/tests/integration_tests/modules/test_ntp_servers.py
index 7a799139..59241faa 100644
--- a/tests/integration_tests/modules/test_ntp_servers.py
+++ b/tests/integration_tests/modules/test_ntp_servers.py
@@ -78,6 +78,8 @@ CHRONY_DATA = """\
ntp:
enabled: true
ntp_client: chrony
+ servers:
+ - 172.16.15.14
"""
@@ -89,7 +91,7 @@ def test_chrony(client: IntegrationInstance):
else:
chrony_conf = '/etc/chrony/chrony.conf'
contents = client.read_from_file(chrony_conf)
- assert '.pool.ntp.org' in contents
+ assert 'server 172.16.15.14' in contents
TIMESYNCD_DATA = """\
@@ -97,6 +99,8 @@ TIMESYNCD_DATA = """\
ntp:
enabled: true
ntp_client: systemd-timesyncd
+ servers:
+ - 172.16.15.14
"""
@@ -106,7 +110,7 @@ def test_timesyncd(client: IntegrationInstance):
contents = client.read_from_file(
'/etc/systemd/timesyncd.conf.d/cloud-init.conf'
)
- assert '.pool.ntp.org' in contents
+ assert 'NTP=172.16.15.14' in contents
EMPTY_NTP = """\
diff --git a/tests/integration_tests/modules/test_set_password.py b/tests/integration_tests/modules/test_set_password.py
index d7cf91a5..ac9db19d 100644
--- a/tests/integration_tests/modules/test_set_password.py
+++ b/tests/integration_tests/modules/test_set_password.py
@@ -13,6 +13,8 @@ import crypt
import pytest
import yaml
+from tests.integration_tests.util import retry
+
COMMON_USER_DATA = """\
#cloud-config
@@ -129,6 +131,7 @@ class Mixin:
assert "dick:" not in cloud_init_output
assert "harry:" not in cloud_init_output
+ @retry(tries=30, delay=1)
def test_random_passwords_emitted_to_serial_console(self, class_client):
"""We should emit passwords to the serial console. (LP: #1918303)"""
try:
@@ -137,6 +140,15 @@ class Mixin:
# Assume that an exception here means that we can't use the console
# log
pytest.skip("NotImplementedError when requesting console log")
+ return
+ if console_log.lower() == 'no console output':
+ # This test retries because we might not have the full console log
+ # on the first fetch. However, if we have no console output
+ # at all, we don't want to keep retrying as that would trigger
+ # another 5 minute wait on the pycloudlib side, which could
+ # leave us waiting for a couple hours
+ pytest.fail('no console output')
+ return
assert "dick:" in console_log
assert "harry:" in console_log
diff --git a/tests/integration_tests/modules/test_ssh_keysfile.py b/tests/integration_tests/modules/test_ssh_keysfile.py
index 5c720578..b39454e6 100644
--- a/tests/integration_tests/modules/test_ssh_keysfile.py
+++ b/tests/integration_tests/modules/test_ssh_keysfile.py
@@ -38,9 +38,15 @@ def common_verify(client, expected_keys):
# Ensure key is in the key file
contents = client.read_from_file(filename)
if user in ['ubuntu', 'root']:
- # Our personal public key gets added by pycloudlib
lines = contents.split('\n')
- assert len(lines) == 2
+ if user == 'root':
+ # Our personal public key gets added by pycloudlib in
+ # addition to the default `ssh_authorized_keys`
+ assert len(lines) == 2
+ else:
+ # Clouds will insert the keys we've added to our accounts
+ # or for our launches
+ assert len(lines) >= 2
assert keys.public_key.strip() in contents
else:
assert contents.strip() == keys.public_key.strip()
diff --git a/tests/integration_tests/modules/test_user_events.py b/tests/integration_tests/modules/test_user_events.py
index a45cad72..fffa0746 100644
--- a/tests/integration_tests/modules/test_user_events.py
+++ b/tests/integration_tests/modules/test_user_events.py
@@ -31,9 +31,10 @@ def _add_dummy_bridge_to_netplan(client: IntegrationInstance):
@pytest.mark.gce
@pytest.mark.oci
@pytest.mark.openstack
-@pytest.mark.not_xenial
def test_boot_event_disabled_by_default(client: IntegrationInstance):
log = client.read_from_file('/var/log/cloud-init.log')
+ if 'network config is disabled' in log:
+ pytest.skip("network config disabled. Test doesn't apply")
assert 'Applying network configuration' in log
assert 'dummy0' not in client.execute('ls /sys/class/net')
@@ -43,6 +44,12 @@ def test_boot_event_disabled_by_default(client: IntegrationInstance):
client.restart()
log2 = client.read_from_file('/var/log/cloud-init.log')
+ if 'cache invalid in datasource' in log2:
+ # Invalid cache will get cleared, meaning we'll create a new
+ # "instance" and apply networking config, so events aren't
+ # really relevant here
+ pytest.skip("Test only valid for existing instances")
+
# We attempt to apply network config twice on every boot.
# Ensure neither time works.
assert 2 == len(
@@ -62,13 +69,21 @@ def test_boot_event_disabled_by_default(client: IntegrationInstance):
def _test_network_config_applied_on_reboot(client: IntegrationInstance):
log = client.read_from_file('/var/log/cloud-init.log')
+ if 'network config is disabled' in log:
+ pytest.skip("network config disabled. Test doesn't apply")
assert 'Applying network configuration' in log
assert 'dummy0' not in client.execute('ls /sys/class/net')
_add_dummy_bridge_to_netplan(client)
- client.execute('rm /var/log/cloud-init.log')
+ client.execute('echo "" > /var/log/cloud-init.log')
client.restart()
+
log = client.read_from_file('/var/log/cloud-init.log')
+ if 'cache invalid in datasource' in log:
+ # Invalid cache will get cleared, meaning we'll create a new
+ # "instance" and apply networking config, so events aren't
+ # really relevant here
+ pytest.skip("Test only valid for existing instances")
assert 'Event Allowed: scope=network EventType=boot' in log
assert 'Applying network configuration' in log
@@ -76,7 +91,6 @@ def _test_network_config_applied_on_reboot(client: IntegrationInstance):
@pytest.mark.azure
-@pytest.mark.not_xenial
def test_boot_event_enabled_by_default(client: IntegrationInstance):
_test_network_config_applied_on_reboot(client)
@@ -89,7 +103,6 @@ updates:
"""
-@pytest.mark.not_xenial
@pytest.mark.user_data(USER_DATA)
def test_boot_event_enabled(client: IntegrationInstance):
_test_network_config_applied_on_reboot(client)
diff --git a/tests/integration_tests/modules/test_version_change.py b/tests/integration_tests/modules/test_version_change.py
index 4e9ab63f..f28079d4 100644
--- a/tests/integration_tests/modules/test_version_change.py
+++ b/tests/integration_tests/modules/test_version_change.py
@@ -1,7 +1,9 @@
from pathlib import Path
+import pytest
+
from tests.integration_tests.instances import IntegrationInstance
-from tests.integration_tests.util import ASSETS_DIR
+from tests.integration_tests.util import ASSETS_DIR, verify_clean_log
PICKLE_PATH = Path('/var/lib/cloud/instance/obj.pkl')
@@ -10,8 +12,7 @@ TEST_PICKLE = ASSETS_DIR / 'test_version_change.pkl'
def _assert_no_pickle_problems(log):
assert 'Failed loading pickled blob' not in log
- assert 'Traceback' not in log
- assert 'WARN' not in log
+ verify_clean_log(log)
def test_reboot_without_version_change(client: IntegrationInstance):
@@ -30,9 +31,23 @@ def test_reboot_without_version_change(client: IntegrationInstance):
client.push_file(TEST_PICKLE, PICKLE_PATH)
client.restart()
log = client.read_from_file('/var/log/cloud-init.log')
- assert 'Failed loading pickled blob from {}'.format(PICKLE_PATH) in log
+
+ # no cache found is an "expected" upgrade error, and
+ # "Failed" means we're unable to load the pickle
+ assert any([
+ 'Failed loading pickled blob from {}'.format(PICKLE_PATH) in log,
+ 'no cache found' in log
+ ])
+@pytest.mark.ec2
+@pytest.mark.gce
+@pytest.mark.oci
+@pytest.mark.openstack
+@pytest.mark.lxd_container
+@pytest.mark.lxd_vm
+# No Azure because the cache gets purged every reboot, so we'll never
+# get to the point where we need to purge cache due to version change
def test_cache_purged_on_version_change(client: IntegrationInstance):
# Start by pushing the invalid pickle so we'll hit an error if the
# cache didn't actually get purged
@@ -48,9 +63,13 @@ def test_log_message_on_missing_version_file(client: IntegrationInstance):
# Start by pushing a pickle so we can see the log message
client.push_file(TEST_PICKLE, PICKLE_PATH)
client.execute("rm /var/lib/cloud/data/python-version")
+ client.execute("rm /var/log/cloud-init.log")
client.restart()
log = client.read_from_file('/var/log/cloud-init.log')
- assert (
- 'Writing python-version file. '
- 'Cache compatibility status is currently unknown.'
- ) in log
+ if 'no cache found' not in log:
+ # We don't expect the python version file to exist if we have no
+ # pre-existing cache
+ assert (
+ 'Writing python-version file. '
+ 'Cache compatibility status is currently unknown.'
+ ) in log
diff --git a/tests/integration_tests/modules/test_write_files.py b/tests/integration_tests/modules/test_write_files.py
index 15832ae3..1d532fac 100644
--- a/tests/integration_tests/modules/test_write_files.py
+++ b/tests/integration_tests/modules/test_write_files.py
@@ -21,6 +21,9 @@ B64_CONTENT = base64.b64encode(ASCII_TEXT.encode("utf-8"))
#
USER_DATA = """\
#cloud-config
+users:
+- default
+- name: myuser
write_files:
- encoding: b64
content: {}
@@ -41,6 +44,12 @@ write_files:
H4sIAIDb/U8C/1NW1E/KzNMvzuBKTc7IV8hIzcnJVyjPL8pJ4QIA6N+MVxsAAAA=
path: /root/file_gzip
permissions: '0755'
+- path: '/home/testuser/my-file'
+ content: |
+ echo 'hello world!'
+ defer: true
+ owner: 'myuser'
+ permissions: '0644'
""".format(B64_CONTENT.decode("ascii"))
@@ -64,3 +73,15 @@ class TestWriteFiles:
def test_write_files(self, cmd, expected_out, class_client):
out = class_client.execute(cmd)
assert expected_out in out
+
+ def test_write_files_deferred(self, class_client):
+ """Test that write files deferred works as expected.
+
+ Users get created after write_files module runs, so ensure that
+ with `defer: true`, the file gets written with correct ownership.
+ """
+ out = class_client.read_from_file("/home/testuser/my-file")
+ assert "echo 'hello world!'" == out
+ assert class_client.execute(
+ 'stat -c "%U %a" /home/testuser/my-file'
+ ) == 'myuser 644'
diff --git a/tests/integration_tests/test_upgrade.py b/tests/integration_tests/test_upgrade.py
index 376fcc96..0ba4754c 100644
--- a/tests/integration_tests/test_upgrade.py
+++ b/tests/integration_tests/test_upgrade.py
@@ -5,6 +5,7 @@ import pytest
from tests.integration_tests.clouds import ImageSpecification, IntegrationCloud
from tests.integration_tests.conftest import get_validated_source
+from tests.integration_tests.util import verify_clean_log
LOG = logging.getLogger('integration_testing.test_upgrade')
@@ -73,17 +74,18 @@ def test_clean_boot_of_upgraded_package(session_cloud: IntegrationCloud):
pre_cloud_blame = instance.execute('cloud-init analyze blame')
# Ensure no issues pre-upgrade
+ log = instance.read_from_file('/var/log/cloud-init.log')
assert not json.loads(pre_result)['v1']['errors']
- log = instance.read_from_file('/var/log/cloud-init.log')
- assert 'Traceback' not in log
- assert 'WARN' not in log
+ try:
+ verify_clean_log(log)
+ except AssertionError:
+ LOG.warning(
+ 'There were errors/warnings/tracebacks pre-upgrade. '
+ 'Any failures may be due to pre-upgrade problem')
- # Upgrade and reboot
+ # Upgrade
instance.install_new_cloud_init(source, take_snapshot=False)
- instance.execute('hostname something-else')
- instance.restart()
- assert instance.execute('cloud-init status --wait --long').ok
# 'cloud-init init' helps us understand if our pickling upgrade paths
# have broken across re-constitution of a cached datasource. Some
@@ -91,6 +93,11 @@ def test_clean_boot_of_upgraded_package(session_cloud: IntegrationCloud):
# it here to ensure we get a dirty run.
assert instance.execute('cloud-init init').ok
+ # Reboot
+ instance.execute('hostname something-else')
+ instance.restart()
+ assert instance.execute('cloud-init status --wait --long').ok
+
# get post values
post_hostname = instance.execute('hostname')
post_cloud_id = instance.execute('cloud-id')
@@ -105,13 +112,21 @@ def test_clean_boot_of_upgraded_package(session_cloud: IntegrationCloud):
assert not json.loads(pre_result)['v1']['errors']
log = instance.read_from_file('/var/log/cloud-init.log')
- assert 'Traceback' not in log
- assert 'WARN' not in log
+ verify_clean_log(log)
# Ensure important things stayed the same
assert pre_hostname == post_hostname
assert pre_cloud_id == post_cloud_id
- assert pre_result == post_result
+ try:
+ assert pre_result == post_result
+ except AssertionError:
+ if instance.settings.PLATFORM == 'azure':
+ pre_json = json.loads(pre_result)
+ post_json = json.loads(post_result)
+ assert pre_json['v1']['datasource'].startswith(
+ 'DataSourceAzure')
+ assert post_json['v1']['datasource'].startswith(
+ 'DataSourceAzure')
assert pre_network == post_network
# Calculate and log all the boot numbers
diff --git a/tests/integration_tests/util.py b/tests/integration_tests/util.py
index 80430eab..407096cd 100644
--- a/tests/integration_tests/util.py
+++ b/tests/integration_tests/util.py
@@ -28,6 +28,40 @@ def verify_ordered_items_in_text(to_verify: list, text: str):
assert index > -1, "Expected item not found: '{}'".format(item)
+def verify_clean_log(log):
+ """Assert no unexpected tracebacks or warnings in logs"""
+ warning_count = log.count('WARN')
+ expected_warnings = 0
+ traceback_count = log.count('Traceback')
+ expected_tracebacks = 0
+
+ warning_texts = [
+ # Consistently on all Azure launches:
+ # azure.py[WARNING]: No lease found; using default endpoint
+ 'No lease found; using default endpoint'
+ ]
+ traceback_texts = []
+ if 'oracle' in log:
+ # LP: #1842752
+ lease_exists_text = 'Stderr: RTNETLINK answers: File exists'
+ warning_texts.append(lease_exists_text)
+ traceback_texts.append(lease_exists_text)
+ # LP: #1833446
+ fetch_error_text = (
+ 'UrlError: 404 Client Error: Not Found for url: '
+ 'http://169.254.169.254/latest/meta-data/')
+ warning_texts.append(fetch_error_text)
+ traceback_texts.append(fetch_error_text)
+
+ for warning_text in warning_texts:
+ expected_warnings += log.count(warning_text)
+ for traceback_text in traceback_texts:
+ expected_tracebacks += log.count(traceback_text)
+
+ assert warning_count == expected_warnings
+ assert traceback_count == expected_tracebacks
+
+
@contextmanager
def emit_dots_on_travis():
"""emit a dot every 60 seconds if running on Travis.
diff --git a/tests/unittests/cmd/devel/test_hotplug_hook.py b/tests/unittests/cmd/devel/test_hotplug_hook.py
index 63d2490e..e1c64e2f 100644
--- a/tests/unittests/cmd/devel/test_hotplug_hook.py
+++ b/tests/unittests/cmd/devel/test_hotplug_hook.py
@@ -30,6 +30,11 @@ def mocks():
return_value=FAKE_MAC
)
+ update_event_enabled = mock.patch(
+ 'cloudinit.stages.update_event_enabled',
+ return_value=True,
+ )
+
m_network_state = mock.MagicMock(spec=NetworkState)
parse_net = mock.patch(
'cloudinit.cmd.devel.hotplug_hook.parse_net_config_data',
@@ -45,6 +50,7 @@ def mocks():
sleep = mock.patch('time.sleep')
read_sys_net.start()
+ update_event_enabled.start()
parse_net.start()
select_activator.start()
m_sleep = sleep.start()
@@ -57,6 +63,7 @@ def mocks():
)
read_sys_net.stop()
+ update_event_enabled.stop()
parse_net.stop()
select_activator.stop()
sleep.stop()
@@ -122,13 +129,16 @@ class TestHotplug:
def test_update_event_disabled(self, mocks, caplog):
init = mocks.m_init
- init.update_event_enabled.return_value = False
- handle_hotplug(
- hotplug_init=init,
- devpath='/dev/fake',
- udevaction='remove',
- subsystem='net'
- )
+ with mock.patch(
+ 'cloudinit.stages.update_event_enabled',
+ return_value=False
+ ):
+ handle_hotplug(
+ hotplug_init=init,
+ devpath='/dev/fake',
+ udevaction='remove',
+ subsystem='net'
+ )
assert 'hotplug not enabled for event of type' in caplog.text
init.datasource.update_metadata_if_supported.assert_not_called()
mocks.m_activator.bring_up_interface.assert_not_called()
diff --git a/tests/unittests/test_cli.py b/tests/unittests/test_cli.py
index a39e1d0c..1459fd9c 100644
--- a/tests/unittests/test_cli.py
+++ b/tests/unittests/test_cli.py
@@ -224,9 +224,9 @@ class TestCLI(test_helpers.FilesystemMockingTestCase):
self._call_main(['cloud-init', 'devel', 'schema', '--docs', 'all'])
expected_doc_sections = [
'**Supported distros:** all',
- ('**Supported distros:** almalinux, alpine, centos, debian, '
- 'eurolinux, fedora, opensuse, photon, rhel, rocky, sles, ubuntu, '
- 'virtuozzo'),
+ ('**Supported distros:** almalinux, alpine, centos, cloudlinux, '
+ 'debian, eurolinux, fedora, openEuler, opensuse, photon, rhel, '
+ 'rocky, sles, ubuntu, virtuozzo'),
'**Config schema**:\n **resize_rootfs:** (true/false/noblock)',
'**Examples**::\n\n runcmd:\n - [ ls, -l, / ]\n'
]
diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py
index 851cf82e..cbc9665d 100644
--- a/tests/unittests/test_datasource/test_azure.py
+++ b/tests/unittests/test_datasource/test_azure.py
@@ -434,16 +434,16 @@ class TestGetMetadataFromIMDS(HttprettyTestCase):
@mock.patch(MOCKPATH + 'readurl', autospec=True)
@mock.patch(MOCKPATH + 'EphemeralDHCPv4')
@mock.patch(MOCKPATH + 'net.is_up')
- def test_get_compute_metadata_uses_compute_url(
+ def test_get_metadata_uses_instance_url(
self, m_net_is_up, m_dhcp, m_readurl):
"""Make sure readurl is called with the correct url when accessing
- network metadata"""
+ metadata"""
m_net_is_up.return_value = True
m_readurl.return_value = url_helper.StringResponse(
json.dumps(IMDS_NETWORK_METADATA).encode('utf-8'))
dsaz.get_metadata_from_imds(
- 'eth0', retries=3, md_type=dsaz.metadata_type.compute)
+ 'eth0', retries=3, md_type=dsaz.metadata_type.all)
m_readurl.assert_called_with(
"http://169.254.169.254/metadata/instance?api-version="
"2019-06-01", exception_cb=mock.ANY,
@@ -472,10 +472,10 @@ class TestGetMetadataFromIMDS(HttprettyTestCase):
@mock.patch(MOCKPATH + 'readurl', autospec=True)
@mock.patch(MOCKPATH + 'EphemeralDHCPv4')
@mock.patch(MOCKPATH + 'net.is_up')
- def test_get_default_metadata_uses_compute_url(
+ def test_get_default_metadata_uses_instance_url(
self, m_net_is_up, m_dhcp, m_readurl):
"""Make sure readurl is called with the correct url when accessing
- network metadata"""
+ metadata"""
m_net_is_up.return_value = True
m_readurl.return_value = url_helper.StringResponse(
json.dumps(IMDS_NETWORK_METADATA).encode('utf-8'))
@@ -489,6 +489,26 @@ class TestGetMetadataFromIMDS(HttprettyTestCase):
timeout=mock.ANY, infinite=False)
@mock.patch(MOCKPATH + 'readurl', autospec=True)
+ @mock.patch(MOCKPATH + 'EphemeralDHCPv4')
+ @mock.patch(MOCKPATH + 'net.is_up')
+ def test_get_metadata_uses_extended_url(
+ self, m_net_is_up, m_dhcp, m_readurl):
+ """Make sure readurl is called with the correct url when accessing
+ metadata"""
+ m_net_is_up.return_value = True
+ m_readurl.return_value = url_helper.StringResponse(
+ json.dumps(IMDS_NETWORK_METADATA).encode('utf-8'))
+
+ dsaz.get_metadata_from_imds(
+ 'eth0', retries=3, md_type=dsaz.metadata_type.all,
+ api_version="2021-08-01")
+ m_readurl.assert_called_with(
+ "http://169.254.169.254/metadata/instance?api-version="
+ "2021-08-01&extended=true", exception_cb=mock.ANY,
+ headers=mock.ANY, retries=mock.ANY,
+ timeout=mock.ANY, infinite=False)
+
+ @mock.patch(MOCKPATH + 'readurl', autospec=True)
@mock.patch(MOCKPATH + 'EphemeralDHCPv4WithReporting', autospec=True)
@mock.patch(MOCKPATH + 'net.is_up', autospec=True)
def test_get_metadata_performs_dhcp_when_network_is_down(
@@ -912,13 +932,13 @@ scbus-1 on xpt0 bus 0
'PreprovisionedVMType': None,
'PreprovisionedVm': False,
'datasource': {'Azure': {'agent_command': 'my_command'}},
- 'system_info': {'default_user': {'name': u'myuser'}}}
+ 'system_info': {'default_user': {'name': 'myuser'}}}
expected_metadata = {
'azure_data': {
'configurationsettype': 'LinuxProvisioningConfiguration'},
'imds': NETWORK_METADATA,
'instance-id': EXAMPLE_UUID,
- 'local-hostname': u'myhost',
+ 'local-hostname': 'myhost',
'random_seed': 'wild'}
crawled_metadata = dsrc.crawl_metadata()
@@ -950,6 +970,43 @@ scbus-1 on xpt0 bus 0
dsrc.crawl_metadata()
self.assertEqual(str(cm.exception), error_msg)
+ def test_crawl_metadata_call_imds_once_no_reprovision(self):
+ """If reprovisioning, report ready at the end"""
+ ovfenv = construct_valid_ovf_env(
+ platform_settings={"PreprovisionedVm": "False"}
+ )
+
+ data = {
+ 'ovfcontent': ovfenv,
+ 'sys_cfg': {}
+ }
+ dsrc = self._get_ds(data)
+ dsrc.crawl_metadata()
+ self.assertEqual(1, self.m_get_metadata_from_imds.call_count)
+
+ @mock.patch(
+ 'cloudinit.sources.DataSourceAzure.EphemeralDHCPv4WithReporting')
+ @mock.patch('cloudinit.sources.DataSourceAzure.util.write_file')
+ @mock.patch(
+ 'cloudinit.sources.DataSourceAzure.DataSourceAzure._report_ready')
+ @mock.patch('cloudinit.sources.DataSourceAzure.DataSourceAzure._poll_imds')
+ def test_crawl_metadata_call_imds_twice_with_reprovision(
+ self, poll_imds_func, m_report_ready, m_write, m_dhcp
+ ):
+ """If reprovisioning, imds metadata will be fetched twice"""
+ ovfenv = construct_valid_ovf_env(
+ platform_settings={"PreprovisionedVm": "True"}
+ )
+
+ data = {
+ 'ovfcontent': ovfenv,
+ 'sys_cfg': {}
+ }
+ dsrc = self._get_ds(data)
+ poll_imds_func.return_value = ovfenv
+ dsrc.crawl_metadata()
+ self.assertEqual(2, self.m_get_metadata_from_imds.call_count)
+
@mock.patch(
'cloudinit.sources.DataSourceAzure.EphemeralDHCPv4WithReporting')
@mock.patch('cloudinit.sources.DataSourceAzure.util.write_file')
@@ -1385,7 +1442,7 @@ scbus-1 on xpt0 bus 0
def test_ovf_can_include_unicode(self):
xml = construct_valid_ovf_env(data={})
- xml = u'\ufeff{0}'.format(xml)
+ xml = '\ufeff{0}'.format(xml)
dsrc = self._get_ds({'ovfcontent': xml})
dsrc.get_data()
@@ -2638,6 +2695,22 @@ class TestPreprovisioningShouldReprovision(CiTestCase):
dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths)
self.assertFalse(dsa._should_reprovision((None, None, {}, None)))
+ @mock.patch(MOCKPATH + 'util.write_file', autospec=True)
+ def test__should_reprovision_uses_imds_md(self, write_file, isfile):
+ """The _should_reprovision method should be able to
+ retrieve the preprovisioning VM type from imds metadata"""
+ isfile.return_value = False
+ dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths)
+ self.assertTrue(dsa._should_reprovision(
+ (None, None, {}, None),
+ {'extended': {'compute': {'ppsType': 'Running'}}}))
+ self.assertFalse(dsa._should_reprovision(
+ (None, None, {}, None),
+ {}))
+ self.assertFalse(dsa._should_reprovision(
+ (None, None, {}, None),
+ {'extended': {'compute': {"hasCustomData": False}}}))
+
@mock.patch(MOCKPATH + 'DataSourceAzure._poll_imds')
def test_reprovision_calls__poll_imds(self, _poll_imds, isfile):
"""_reprovision will poll IMDS."""
@@ -3055,6 +3128,40 @@ class TestPreprovisioningPollIMDS(CiTestCase):
self.assertEqual(0, m_dhcp.call_count)
self.assertEqual(0, m_media_switch.call_count)
+ @mock.patch('os.path.isfile')
+ @mock.patch(MOCKPATH + 'EphemeralDHCPv4')
+ def test_poll_imds_does_dhcp_on_retries_if_ctx_present(
+ self, m_ephemeral_dhcpv4, m_isfile, report_ready_func, m_request,
+ m_media_switch, m_dhcp, m_net):
+ """The poll_imds function should reuse the dhcp ctx if it is already
+ present. This happens when we wait for nic to be hot-attached before
+ polling for reprovisiondata. Note that if this ctx is set when
+ _poll_imds is called, then it is not expected to be waiting for
+ media_disconnect_connect either."""
+
+ tries = 0
+
+ def fake_timeout_once(**kwargs):
+ nonlocal tries
+ tries += 1
+ if tries == 1:
+ raise requests.Timeout('Fake connection timeout')
+ return mock.MagicMock(status_code=200, text="good", content="good")
+
+ m_request.side_effect = fake_timeout_once
+ report_file = self.tmp_path('report_marker', self.tmp)
+ m_isfile.return_value = True
+ dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
+ with mock.patch(MOCKPATH + 'REPORTED_READY_MARKER_FILE', report_file),\
+ mock.patch.object(dsa, '_ephemeral_dhcp_ctx') as m_dhcp_ctx:
+ m_dhcp_ctx.obtain_lease.return_value = "Dummy lease"
+ dsa._ephemeral_dhcp_ctx = m_dhcp_ctx
+ dsa._poll_imds()
+ self.assertEqual(1, m_dhcp_ctx.clean_network.call_count)
+ self.assertEqual(1, m_ephemeral_dhcpv4.call_count)
+ self.assertEqual(0, m_media_switch.call_count)
+ self.assertEqual(2, m_request.call_count)
+
def test_does_not_poll_imds_report_ready_when_marker_file_exists(
self, m_report_ready, m_request, m_media_switch, m_dhcp, m_net):
"""poll_imds should not call report ready when the reported ready
diff --git a/tests/unittests/test_datasource/test_common.py b/tests/unittests/test_datasource/test_common.py
index 00f0a78c..17d53160 100644
--- a/tests/unittests/test_datasource/test_common.py
+++ b/tests/unittests/test_datasource/test_common.py
@@ -18,6 +18,7 @@ from cloudinit.sources import (
DataSourceGCE as GCE,
DataSourceHetzner as Hetzner,
DataSourceIBMCloud as IBMCloud,
+ DataSourceLXD as LXD,
DataSourceMAAS as MAAS,
DataSourceNoCloud as NoCloud,
DataSourceOpenNebula as OpenNebula,
@@ -42,6 +43,7 @@ DEFAULT_LOCAL = [
DigitalOcean.DataSourceDigitalOcean,
Hetzner.DataSourceHetzner,
IBMCloud.DataSourceIBMCloud,
+ LXD.DataSourceLXD,
NoCloud.DataSourceNoCloud,
OpenNebula.DataSourceOpenNebula,
Oracle.DataSourceOracle,
diff --git a/tests/unittests/test_datasource/test_configdrive.py b/tests/unittests/test_datasource/test_configdrive.py
index 2e2b7847..51097231 100644
--- a/tests/unittests/test_datasource/test_configdrive.py
+++ b/tests/unittests/test_datasource/test_configdrive.py
@@ -15,7 +15,7 @@ from cloudinit import util
from cloudinit.tests.helpers import CiTestCase, ExitStack, mock, populate_dir
-PUBKEY = u'ssh-rsa AAAAB3NzaC1....sIkJhq8wdX+4I3A4cYbYP ubuntu@server-460\n'
+PUBKEY = 'ssh-rsa AAAAB3NzaC1....sIkJhq8wdX+4I3A4cYbYP ubuntu@server-460\n'
EC2_META = {
'ami-id': 'ami-00000001',
'ami-launch-index': 0,
diff --git a/tests/unittests/test_datasource/test_gce.py b/tests/unittests/test_datasource/test_gce.py
index 01f4cbd1..80b38f9e 100644
--- a/tests/unittests/test_datasource/test_gce.py
+++ b/tests/unittests/test_datasource/test_gce.py
@@ -106,6 +106,7 @@ class TestDataSourceGCE(test_helpers.HttprettyTestCase):
self.m_platform_reports_gce = ppatch.start()
self.m_platform_reports_gce.return_value = True
self.addCleanup(ppatch.stop)
+ self.add_patch('time.sleep', 'm_sleep') # just to speed up tests
super(TestDataSourceGCE, self).setUp()
def test_connection(self):
diff --git a/tests/unittests/test_datasource/test_opennebula.py b/tests/unittests/test_datasource/test_opennebula.py
index 9c6070a5..283b65c2 100644
--- a/tests/unittests/test_datasource/test_opennebula.py
+++ b/tests/unittests/test_datasource/test_opennebula.py
@@ -211,7 +211,8 @@ class TestOpenNebulaDataSource(CiTestCase):
def test_hostname(self, m_get_phys_by_mac):
for dev in ('eth0', 'ens3'):
m_get_phys_by_mac.return_value = {MACADDR: dev}
- for k in ('HOSTNAME', 'PUBLIC_IP', 'IP_PUBLIC', 'ETH0_IP'):
+ for k in ('SET_HOSTNAME', 'HOSTNAME', 'PUBLIC_IP', 'IP_PUBLIC',
+ 'ETH0_IP'):
my_d = os.path.join(self.tmp, k)
populate_context_dir(my_d, {k: PUBLIC_IP})
results = ds.read_context_disk_dir(my_d, mock.Mock())
@@ -488,10 +489,11 @@ class TestOpenNebulaNetwork(unittest.TestCase):
Verify get_gateway6('device') correctly returns IPv6 default gateway
address.
"""
- context = {'ETH0_GATEWAY6': IP6_GW}
- net = ds.OpenNebulaNetwork(context, mock.Mock())
- val = net.get_gateway6('eth0')
- self.assertEqual(IP6_GW, val)
+ for k in ('GATEWAY6', 'IP6_GATEWAY'):
+ context = {'ETH0_' + k: IP6_GW}
+ net = ds.OpenNebulaNetwork(context, mock.Mock())
+ val = net.get_gateway6('eth0')
+ self.assertEqual(IP6_GW, val)
def test_get_mask(self):
"""
diff --git a/tests/unittests/test_datasource/test_openstack.py b/tests/unittests/test_datasource/test_openstack.py
index 478f3503..a9829c75 100644
--- a/tests/unittests/test_datasource/test_openstack.py
+++ b/tests/unittests/test_datasource/test_openstack.py
@@ -21,7 +21,7 @@ from cloudinit.sources.helpers import openstack
from cloudinit import util
BASE_URL = "http://169.254.169.254"
-PUBKEY = u'ssh-rsa AAAAB3NzaC1....sIkJhq8wdX+4I3A4cYbYP ubuntu@server-460\n'
+PUBKEY = 'ssh-rsa AAAAB3NzaC1....sIkJhq8wdX+4I3A4cYbYP ubuntu@server-460\n'
EC2_META = {
'ami-id': 'ami-00000001',
'ami-launch-index': '0',
diff --git a/tests/unittests/test_datasource/test_ovf.py b/tests/unittests/test_datasource/test_ovf.py
index 9f52b504..ad7446f8 100644
--- a/tests/unittests/test_datasource/test_ovf.py
+++ b/tests/unittests/test_datasource/test_ovf.py
@@ -518,6 +518,61 @@ class TestDatasourceOVF(CiTestCase):
'vmware (%s/seed/ovf-env.xml)' % self.tdir,
ds.subplatform)
+ @mock.patch('cloudinit.subp.subp')
+ @mock.patch('cloudinit.sources.DataSource.persist_instance_data')
+ def test_get_data_vmware_guestinfo_with_network_config(
+ self, m_persist, m_subp
+ ):
+ self._test_get_data_with_network_config(guestinfo=False, iso=True)
+
+ @mock.patch('cloudinit.subp.subp')
+ @mock.patch('cloudinit.sources.DataSource.persist_instance_data')
+ def test_get_data_iso9660_with_network_config(self, m_persist, m_subp):
+ self._test_get_data_with_network_config(guestinfo=True, iso=False)
+
+ def _test_get_data_with_network_config(self, guestinfo, iso):
+ network_config = dedent("""\
+ network:
+ version: 2
+ ethernets:
+ nics:
+ nameservers:
+ addresses:
+ - 127.0.0.53
+ search:
+ - vmware.com
+ match:
+ name: eth*
+ gateway4: 10.10.10.253
+ dhcp4: false
+ addresses:
+ - 10.10.10.1/24
+ """)
+ network_config_b64 = base64.b64encode(network_config.encode()).decode()
+ props = {"network-config": network_config_b64,
+ "password": "passw0rd",
+ "instance-id": "inst-001"}
+ env = fill_properties(props)
+ paths = Paths({'cloud_dir': self.tdir, 'run_dir': self.tdir})
+ ds = self.datasource(sys_cfg={}, distro={}, paths=paths)
+ with mock.patch(MPATH + 'transport_vmware_guestinfo',
+ return_value=env if guestinfo else NOT_FOUND):
+ with mock.patch(MPATH + 'transport_iso9660',
+ return_value=env if iso else NOT_FOUND):
+ self.assertTrue(ds.get_data())
+ self.assertEqual('inst-001', ds.metadata['instance-id'])
+ self.assertEqual(
+ {'version': 2, 'ethernets':
+ {'nics':
+ {'nameservers':
+ {'addresses': ['127.0.0.53'],
+ 'search': ['vmware.com']},
+ 'match': {'name': 'eth*'},
+ 'gateway4': '10.10.10.253',
+ 'dhcp4': False,
+ 'addresses': ['10.10.10.1/24']}}},
+ ds.network_config)
+
def test_get_data_cloudinit_metadata_json(self):
"""Test metadata can be loaded to cloud-init metadata and network.
The metadata format is json.
diff --git a/tests/unittests/test_datasource/test_scaleway.py b/tests/unittests/test_datasource/test_scaleway.py
index 32f3274a..f9e968c5 100644
--- a/tests/unittests/test_datasource/test_scaleway.py
+++ b/tests/unittests/test_datasource/test_scaleway.py
@@ -209,9 +209,9 @@ class TestDataSourceScaleway(HttprettyTestCase):
self.assertEqual(self.datasource.get_instance_id(),
MetadataResponses.FAKE_METADATA['id'])
self.assertEqual(self.datasource.get_public_ssh_keys().sort(), [
- u'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC',
- u'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABDDDDD',
- u'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA',
+ 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC',
+ 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABDDDDD',
+ 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA',
].sort())
self.assertEqual(self.datasource.get_hostname(),
MetadataResponses.FAKE_METADATA['hostname'])
@@ -242,8 +242,8 @@ class TestDataSourceScaleway(HttprettyTestCase):
]
self.datasource.metadata['ssh_public_keys'] = []
self.assertEqual(self.datasource.get_public_ssh_keys().sort(), [
- u'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABDDDDD',
- u'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC',
+ 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABDDDDD',
+ 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC',
].sort())
def test_ssh_keys_only_conf(self):
@@ -260,9 +260,9 @@ class TestDataSourceScaleway(HttprettyTestCase):
'fingerprint': '2048 06:ff:... login2 (RSA)'
}]
self.assertEqual(self.datasource.get_public_ssh_keys().sort(), [
- u'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC',
- u'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABDDDDD',
- u'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA',
+ 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC',
+ 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABDDDDD',
+ 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA',
].sort())
def test_ssh_keys_both(self):
@@ -282,9 +282,9 @@ class TestDataSourceScaleway(HttprettyTestCase):
'fingerprint': '2048 06:ff:... login2 (RSA)'
}]
self.assertEqual(self.datasource.get_public_ssh_keys().sort(), [
- u'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC',
- u'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABDDDDD',
- u'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA',
+ 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC',
+ 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABDDDDD',
+ 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA',
].sort())
@mock.patch('cloudinit.sources.DataSourceScaleway.EphemeralDHCPv4')
diff --git a/tests/unittests/test_datasource/test_vultr.py b/tests/unittests/test_datasource/test_vultr.py
index bbea2aa3..63235009 100644
--- a/tests/unittests/test_datasource/test_vultr.py
+++ b/tests/unittests/test_datasource/test_vultr.py
@@ -64,10 +64,8 @@ VULTR_V1_1 = {
'raid1-script': '',
'user-data': [
],
- 'vendor-data': {
- 'vendor-script': '',
- 'ethtool-script': '',
- 'config': {
+ 'vendor-data': [
+ {
'package_upgrade': 'true',
'disable_root': 0,
'ssh_pwauth': 1,
@@ -83,7 +81,7 @@ VULTR_V1_1 = {
}
}
}
- }
+ ]
}
VULTR_V1_2 = {
@@ -155,11 +153,8 @@ VULTR_V1_2 = {
'user-data': [
],
- 'vendor-data': {
- 'vendor-script': '',
- 'ethtool-script': '',
- 'raid1-script': '',
- 'config': {
+ 'vendor-data': [
+ {
'package_upgrade': 'true',
'disable_root': 0,
'ssh_pwauth': 1,
@@ -175,7 +170,7 @@ VULTR_V1_2 = {
}
}
}
- }
+ ]
}
SSH_KEYS_1 = [
@@ -217,7 +212,7 @@ EXPECTED_VULTR_NETWORK_1 = {
'accept-ra': 1,
'subnets': [
{'type': 'dhcp', 'control': 'auto'},
- {'type': 'dhcp6', 'control': 'auto'}
+ {'type': 'ipv6_slaac', 'control': 'auto'}
],
}
]
@@ -237,14 +232,13 @@ EXPECTED_VULTR_NETWORK_2 = {
'accept-ra': 1,
'subnets': [
{'type': 'dhcp', 'control': 'auto'},
- {'type': 'dhcp6', 'control': 'auto'}
+ {'type': 'ipv6_slaac', 'control': 'auto'}
],
},
{
'name': 'eth1',
'type': 'physical',
'mac_address': '5a:00:03:1b:4e:ca',
- 'accept-ra': 1,
'subnets': [
{
"type": "static",
@@ -270,12 +264,12 @@ class TestDataSourceVultr(CiTestCase):
super(TestDataSourceVultr, self).setUp()
# Stored as a dict to make it easier to maintain
- raw1 = json.dumps(VULTR_V1_1['vendor-data']['config'])
- raw2 = json.dumps(VULTR_V1_2['vendor-data']['config'])
+ raw1 = json.dumps(VULTR_V1_1['vendor-data'][0])
+ raw2 = json.dumps(VULTR_V1_2['vendor-data'][0])
# Make expected format
- VULTR_V1_1['vendor-data']['config'] = raw1
- VULTR_V1_2['vendor-data']['config'] = raw2
+ VULTR_V1_1['vendor-data'] = [raw1]
+ VULTR_V1_2['vendor-data'] = [raw2]
self.tmp = self.tmp_dir()
diff --git a/tests/unittests/test_distros/test_create_users.py b/tests/unittests/test_distros/test_create_users.py
index 021866b7..685f08ba 100644
--- a/tests/unittests/test_distros/test_create_users.py
+++ b/tests/unittests/test_distros/test_create_users.py
@@ -5,44 +5,7 @@ import re
from cloudinit import distros
from cloudinit import ssh_util
from cloudinit.tests.helpers import (CiTestCase, mock)
-
-
-class MyBaseDistro(distros.Distro):
- # MyBaseDistro is here to test base Distro class implementations
-
- def __init__(self, name="basedistro", cfg=None, paths=None):
- if not cfg:
- cfg = {}
- if not paths:
- paths = {}
- super(MyBaseDistro, self).__init__(name, cfg, paths)
-
- def install_packages(self, pkglist):
- raise NotImplementedError()
-
- def _write_network(self, settings):
- raise NotImplementedError()
-
- def package_command(self, command, args=None, pkgs=None):
- raise NotImplementedError()
-
- def update_package_sources(self):
- raise NotImplementedError()
-
- def apply_locale(self, locale, out_fn=None):
- raise NotImplementedError()
-
- def set_timezone(self, tz):
- raise NotImplementedError()
-
- def _read_hostname(self, filename, default=None):
- raise NotImplementedError()
-
- def _write_hostname(self, hostname, filename):
- raise NotImplementedError()
-
- def _read_system_hostname(self):
- raise NotImplementedError()
+from tests.unittests.util import abstract_to_concrete
@mock.patch("cloudinit.distros.util.system_is_snappy", return_value=False)
@@ -53,7 +16,9 @@ class TestCreateUser(CiTestCase):
def setUp(self):
super(TestCreateUser, self).setUp()
- self.dist = MyBaseDistro()
+ self.dist = abstract_to_concrete(distros.Distro)(
+ name='test', cfg=None, paths=None
+ )
def _useradd2call(self, args):
# return a mock call for the useradd command in args
diff --git a/tests/unittests/test_distros/test_manage_service.py b/tests/unittests/test_distros/test_manage_service.py
new file mode 100644
index 00000000..47e7cfb0
--- /dev/null
+++ b/tests/unittests/test_distros/test_manage_service.py
@@ -0,0 +1,38 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+from cloudinit.tests.helpers import (CiTestCase, mock)
+from tests.unittests.util import TestingDistro
+
+
+class TestManageService(CiTestCase):
+
+ with_logs = True
+
+ def setUp(self):
+ super(TestManageService, self).setUp()
+ self.dist = TestingDistro()
+
+ @mock.patch.object(TestingDistro, 'uses_systemd', return_value=False)
+ @mock.patch("cloudinit.distros.subp.subp")
+ def test_manage_service_systemctl_initcmd(self, m_subp, m_sysd):
+ self.dist.init_cmd = ['systemctl']
+ self.dist.manage_service('start', 'myssh')
+ m_subp.assert_called_with(['systemctl', 'start', 'myssh'],
+ capture=True)
+
+ @mock.patch.object(TestingDistro, 'uses_systemd', return_value=False)
+ @mock.patch("cloudinit.distros.subp.subp")
+ def test_manage_service_service_initcmd(self, m_subp, m_sysd):
+ self.dist.init_cmd = ['service']
+ self.dist.manage_service('start', 'myssh')
+ m_subp.assert_called_with(['service', 'myssh', 'start'], capture=True)
+
+ @mock.patch.object(TestingDistro, 'uses_systemd', return_value=True)
+ @mock.patch("cloudinit.distros.subp.subp")
+ def test_manage_service_systemctl(self, m_subp, m_sysd):
+ self.dist.init_cmd = ['ignore']
+ self.dist.manage_service('start', 'myssh')
+ m_subp.assert_called_with(['systemctl', 'start', 'myssh'],
+ capture=True)
+
+# vi: ts=4 sw=4 expandtab
diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py
index 8617d7bd..43603ea5 100644
--- a/tests/unittests/test_ds_identify.py
+++ b/tests/unittests/test_ds_identify.py
@@ -534,6 +534,30 @@ class TestDsIdentify(DsIdentifyBase):
return self._check_via_dict(
cust64, RC_FOUND, dslist=[cust64.get('ds'), DS_NONE])
+ def test_ovf_on_vmware_iso_found_open_vm_tools_x86_64_linux_gnu(self):
+ """OVF is identified when open-vm-tools installed in
+ /usr/lib/x86_64-linux-gnu."""
+ cust64 = copy.deepcopy(VALID_CFG['OVF-vmware-customization'])
+ p32 = 'usr/lib/vmware-tools/plugins/vmsvc/libdeployPkgPlugin.so'
+ x86 = 'usr/lib/x86_64-linux-gnu/open-vm-tools/plugins/vmsvc/' \
+ 'libdeployPkgPlugin.so'
+ cust64['files'][x86] = cust64['files'][p32]
+ del cust64['files'][p32]
+ return self._check_via_dict(
+ cust64, RC_FOUND, dslist=[cust64.get('ds'), DS_NONE])
+
+ def test_ovf_on_vmware_iso_found_open_vm_tools_aarch64_linux_gnu(self):
+ """OVF is identified when open-vm-tools installed in
+ /usr/lib/aarch64-linux-gnu."""
+ cust64 = copy.deepcopy(VALID_CFG['OVF-vmware-customization'])
+ p32 = 'usr/lib/vmware-tools/plugins/vmsvc/libdeployPkgPlugin.so'
+ aarch64 = 'usr/lib/aarch64-linux-gnu/open-vm-tools/plugins/vmsvc/' \
+ 'libdeployPkgPlugin.so'
+ cust64['files'][aarch64] = cust64['files'][p32]
+ del cust64['files'][p32]
+ return self._check_via_dict(
+ cust64, RC_FOUND, dslist=[cust64.get('ds'), DS_NONE])
+
def test_ovf_on_vmware_iso_found_by_cdrom_with_matching_fs_label(self):
"""OVF is identified by well-known iso9660 labels."""
ovf_cdrom_by_label = copy.deepcopy(VALID_CFG['OVF'])
diff --git a/tests/unittests/test_gpg.py b/tests/unittests/test_gpg.py
new file mode 100644
index 00000000..451ffa91
--- /dev/null
+++ b/tests/unittests/test_gpg.py
@@ -0,0 +1,81 @@
+import pytest
+from unittest import mock
+
+from cloudinit import gpg
+from cloudinit import subp
+
+TEST_KEY_HUMAN = '''
+/etc/apt/cloud-init.gpg.d/my_key.gpg
+--------------------------------------------
+pub rsa4096 2021-10-22 [SC]
+ 3A3E F34D FDED B3B7 F3FD F603 F83F 7712 9A5E BD85
+uid [ unknown] Brett Holman <brett.holman@canonical.com>
+sub rsa4096 2021-10-22 [A]
+sub rsa4096 2021-10-22 [E]
+'''
+
+TEST_KEY_MACHINE = '''
+tru::1:1635129362:0:3:1:5
+pub:-:4096:1:F83F77129A5EBD85:1634912922:::-:::scESCA::::::23::0:
+fpr:::::::::3A3EF34DFDEDB3B7F3FDF603F83F77129A5EBD85:
+uid:-::::1634912922::64F1F1D6FA96316752D635D7C6406C52C40713C7::Brett Holman \
+<brett.holman@canonical.com>::::::::::0:
+sub:-:4096:1:544B39C9A9141F04:1634912922::::::a::::::23:
+fpr:::::::::8BD901490D6EC986D03D6F0D544B39C9A9141F04:
+sub:-:4096:1:F45D9443F0A87092:1634912922::::::e::::::23:
+fpr:::::::::8CCCB332317324F030A45B19F45D9443F0A87092:
+'''
+
+TEST_KEY_FINGERPRINT_HUMAN = \
+ '3A3E F34D FDED B3B7 F3FD F603 F83F 7712 9A5E BD85'
+
+TEST_KEY_FINGERPRINT_MACHINE = \
+ '3A3EF34DFDEDB3B7F3FDF603F83F77129A5EBD85'
+
+
+class TestGPGCommands:
+ def test_dearmor_bad_value(self):
+ """This exception is handled by the callee. Ensure it is not caught
+ internally.
+ """
+ with mock.patch.object(
+ subp,
+ 'subp',
+ side_effect=subp.ProcessExecutionError):
+ with pytest.raises(subp.ProcessExecutionError):
+ gpg.dearmor('garbage key value')
+
+ def test_gpg_list_args(self):
+ """Verify correct command gets called to list keys
+ """
+ no_colons = [
+ 'gpg',
+ '--with-fingerprint',
+ '--no-default-keyring',
+ '--list-keys',
+ '--keyring',
+ 'key']
+ colons = [
+ 'gpg',
+ '--with-fingerprint',
+ '--no-default-keyring',
+ '--list-keys',
+ '--keyring',
+ '--with-colons',
+ 'key']
+ with mock.patch.object(subp, 'subp', return_value=('', '')) as m_subp:
+ gpg.list('key')
+ assert mock.call(colons, capture=True) == m_subp.call_args
+
+ gpg.list('key', human_output=True)
+ test_calls = mock.call((no_colons), capture=True)
+ assert test_calls == m_subp.call_args
+
+ def test_gpg_dearmor_args(self):
+ """Verify correct command gets called to dearmor keys
+ """
+ with mock.patch.object(subp, 'subp', return_value=('', '')) as m_subp:
+ gpg.dearmor('key')
+ test_call = mock.call(
+ ["gpg", "--dearmor"], data='key', decode=False)
+ assert test_call == m_subp.call_args
diff --git a/tests/unittests/test_handler/test_handler_apt_configure_sources_list_v1.py b/tests/unittests/test_handler/test_handler_apt_configure_sources_list_v1.py
index 369480be..d69916f9 100644
--- a/tests/unittests/test_handler/test_handler_apt_configure_sources_list_v1.py
+++ b/tests/unittests/test_handler/test_handler_apt_configure_sources_list_v1.py
@@ -9,19 +9,16 @@ import shutil
import tempfile
from unittest import mock
-from cloudinit import cloud
-from cloudinit import distros
-from cloudinit import helpers
from cloudinit import templater
from cloudinit import subp
from cloudinit import util
from cloudinit.config import cc_apt_configure
-from cloudinit.sources import DataSourceNone
from cloudinit.distros.debian import Distro
from cloudinit.tests import helpers as t_help
+from tests.unittests.util import get_cloud
LOG = logging.getLogger(__name__)
@@ -80,16 +77,6 @@ class TestAptSourceConfigSourceList(t_help.FilesystemMockingTestCase):
get_arch.return_value = 'amd64'
self.addCleanup(apatcher.stop)
- def _get_cloud(self, distro, metadata=None):
- self.patchUtils(self.new_root)
- paths = helpers.Paths({})
- cls = distros.fetch(distro)
- mydist = cls(distro, {}, paths)
- myds = DataSourceNone.DataSourceNone({}, mydist, paths)
- if metadata:
- myds.metadata.update(metadata)
- return cloud.Cloud(myds, paths, {}, mydist, None)
-
def apt_source_list(self, distro, mirror, mirrorcheck=None):
"""apt_source_list
Test rendering of a source.list from template for a given distro
@@ -102,7 +89,7 @@ class TestAptSourceConfigSourceList(t_help.FilesystemMockingTestCase):
else:
cfg = {'apt_mirror': mirror}
- mycloud = self._get_cloud(distro)
+ mycloud = get_cloud(distro)
with mock.patch.object(util, 'write_file') as mockwf:
with mock.patch.object(util, 'load_file',
@@ -175,7 +162,7 @@ class TestAptSourceConfigSourceList(t_help.FilesystemMockingTestCase):
def test_apt_v1_srcl_custom(self):
"""Test rendering from a custom source.list template"""
cfg = util.load_yaml(YAML_TEXT_CUSTOM_SL)
- mycloud = self._get_cloud('ubuntu')
+ mycloud = get_cloud()
# the second mock restores the original subp
with mock.patch.object(util, 'write_file') as mockwrite:
diff --git a/tests/unittests/test_handler/test_handler_apt_configure_sources_list_v3.py b/tests/unittests/test_handler/test_handler_apt_configure_sources_list_v3.py
index b96fd4d4..cd6f9239 100644
--- a/tests/unittests/test_handler/test_handler_apt_configure_sources_list_v3.py
+++ b/tests/unittests/test_handler/test_handler_apt_configure_sources_list_v3.py
@@ -3,6 +3,7 @@
""" test_apt_custom_sources_list
Test templating of custom sources list
"""
+from contextlib import ExitStack
import logging
import os
import shutil
@@ -10,19 +11,14 @@ import tempfile
from unittest import mock
from unittest.mock import call
-from cloudinit import cloud
-from cloudinit import distros
-from cloudinit import helpers
from cloudinit import subp
from cloudinit import util
-
from cloudinit.config import cc_apt_configure
-from cloudinit.sources import DataSourceNone
-
from cloudinit.distros.debian import Distro
-
from cloudinit.tests import helpers as t_help
+from tests.unittests.util import get_cloud
+
LOG = logging.getLogger(__name__)
TARGET = "/"
@@ -108,37 +104,29 @@ class TestAptSourceConfigSourceList(t_help.FilesystemMockingTestCase):
get_arch.return_value = 'amd64'
self.addCleanup(apatcher.stop)
- def _get_cloud(self, distro, metadata=None):
- self.patchUtils(self.new_root)
- paths = helpers.Paths({})
- cls = distros.fetch(distro)
- mydist = cls(distro, {}, paths)
- myds = DataSourceNone.DataSourceNone({}, mydist, paths)
- if metadata:
- myds.metadata.update(metadata)
- return cloud.Cloud(myds, paths, {}, mydist, None)
-
def _apt_source_list(self, distro, cfg, cfg_on_empty=False):
"""_apt_source_list - Test rendering from template (generic)"""
# entry at top level now, wrap in 'apt' key
cfg = {'apt': cfg}
- mycloud = self._get_cloud(distro)
-
- with mock.patch.object(util, 'write_file') as mock_writefile:
- with mock.patch.object(util, 'load_file',
- return_value=MOCKED_APT_SRC_LIST
- ) as mock_loadfile:
- with mock.patch.object(os.path, 'isfile',
- return_value=True) as mock_isfile:
- cfg_func = ('cloudinit.config.cc_apt_configure.' +
- '_should_configure_on_empty_apt')
- with mock.patch(cfg_func,
- return_value=(cfg_on_empty, "test")
- ) as mock_shouldcfg:
- cc_apt_configure.handle("test", cfg, mycloud, LOG,
- None)
-
- return mock_writefile, mock_loadfile, mock_isfile, mock_shouldcfg
+ mycloud = get_cloud(distro)
+
+ with ExitStack() as stack:
+ mock_writefile = stack.enter_context(mock.patch.object(
+ util, 'write_file'))
+ mock_loadfile = stack.enter_context(mock.patch.object(
+ util, 'load_file', return_value=MOCKED_APT_SRC_LIST))
+ mock_isfile = stack.enter_context(mock.patch.object(
+ os.path, 'isfile', return_value=True))
+ stack.enter_context(mock.patch.object(
+ util, 'del_file'))
+ cfg_func = ('cloudinit.config.cc_apt_configure.'
+ '_should_configure_on_empty_apt')
+ mock_shouldcfg = stack.enter_context(mock.patch(
+ cfg_func, return_value=(cfg_on_empty, 'test')
+ ))
+ cc_apt_configure.handle("test", cfg, mycloud, LOG, None)
+
+ return mock_writefile, mock_loadfile, mock_isfile, mock_shouldcfg
def test_apt_v3_source_list_debian(self):
"""test_apt_v3_source_list_debian - without custom sources or parms"""
@@ -176,7 +164,7 @@ class TestAptSourceConfigSourceList(t_help.FilesystemMockingTestCase):
"""test_apt_v3_source_list_ubuntu_snappy - without custom sources or
parms"""
cfg = {'apt': {}}
- mycloud = self._get_cloud('ubuntu')
+ mycloud = get_cloud()
with mock.patch.object(util, 'write_file') as mock_writefile:
with mock.patch.object(util, 'system_is_snappy',
@@ -219,7 +207,7 @@ class TestAptSourceConfigSourceList(t_help.FilesystemMockingTestCase):
def test_apt_v3_srcl_custom(self):
"""test_apt_v3_srcl_custom - Test rendering a custom source template"""
cfg = util.load_yaml(YAML_TEXT_CUSTOM_SL)
- mycloud = self._get_cloud('ubuntu')
+ mycloud = get_cloud()
# the second mock restores the original subp
with mock.patch.object(util, 'write_file') as mockwrite:
diff --git a/tests/unittests/test_handler/test_handler_apt_key.py b/tests/unittests/test_handler/test_handler_apt_key.py
new file mode 100644
index 00000000..00e5a38d
--- /dev/null
+++ b/tests/unittests/test_handler/test_handler_apt_key.py
@@ -0,0 +1,137 @@
+import os
+from unittest import mock
+
+from cloudinit.config import cc_apt_configure
+from cloudinit import subp
+from cloudinit import util
+
+TEST_KEY_HUMAN = '''
+/etc/apt/cloud-init.gpg.d/my_key.gpg
+--------------------------------------------
+pub rsa4096 2021-10-22 [SC]
+ 3A3E F34D FDED B3B7 F3FD F603 F83F 7712 9A5E BD85
+uid [ unknown] Brett Holman <brett.holman@canonical.com>
+sub rsa4096 2021-10-22 [A]
+sub rsa4096 2021-10-22 [E]
+'''
+
+TEST_KEY_MACHINE = '''
+tru::1:1635129362:0:3:1:5
+pub:-:4096:1:F83F77129A5EBD85:1634912922:::-:::scESCA::::::23::0:
+fpr:::::::::3A3EF34DFDEDB3B7F3FDF603F83F77129A5EBD85:
+uid:-::::1634912922::64F1F1D6FA96316752D635D7C6406C52C40713C7::Brett Holman \
+<brett.holman@canonical.com>::::::::::0:
+sub:-:4096:1:544B39C9A9141F04:1634912922::::::a::::::23:
+fpr:::::::::8BD901490D6EC986D03D6F0D544B39C9A9141F04:
+sub:-:4096:1:F45D9443F0A87092:1634912922::::::e::::::23:
+fpr:::::::::8CCCB332317324F030A45B19F45D9443F0A87092:
+'''
+
+TEST_KEY_FINGERPRINT_HUMAN = \
+ '3A3E F34D FDED B3B7 F3FD F603 F83F 7712 9A5E BD85'
+
+TEST_KEY_FINGERPRINT_MACHINE = \
+ '3A3EF34DFDEDB3B7F3FDF603F83F77129A5EBD85'
+
+
+class TestAptKey:
+ """TestAptKey
+ Class to test apt-key commands
+ """
+ @mock.patch.object(subp, 'subp', return_value=('fakekey', ''))
+ @mock.patch.object(util, 'write_file')
+ def _apt_key_add_success_helper(self, directory, *args, hardened=False):
+ file = cc_apt_configure.apt_key(
+ 'add',
+ output_file='my-key',
+ data='fakekey',
+ hardened=hardened)
+ assert file == directory + '/my-key.gpg'
+
+ def test_apt_key_add_success(self):
+ """Verify the correct directory path gets returned for unhardened case
+ """
+ self._apt_key_add_success_helper('/etc/apt/trusted.gpg.d')
+
+ def test_apt_key_add_success_hardened(self):
+ """Verify the correct directory path gets returned for hardened case
+ """
+ self._apt_key_add_success_helper(
+ '/etc/apt/cloud-init.gpg.d',
+ hardened=True)
+
+ def test_apt_key_add_fail_no_file_name(self):
+ """Verify that null filename gets handled correctly
+ """
+ file = cc_apt_configure.apt_key(
+ 'add',
+ output_file=None,
+ data='')
+ assert '/dev/null' == file
+
+ def _apt_key_fail_helper(self):
+ file = cc_apt_configure.apt_key(
+ 'add',
+ output_file='my-key',
+ data='fakekey')
+ assert file == '/dev/null'
+
+ @mock.patch.object(subp, 'subp', side_effect=subp.ProcessExecutionError)
+ def test_apt_key_add_fail_no_file_name_subproc(self, *args):
+ """Verify that bad key value gets handled correctly
+ """
+ self._apt_key_fail_helper()
+
+ @mock.patch.object(
+ subp, 'subp', side_effect=UnicodeDecodeError('test', b'', 1, 1, ''))
+ def test_apt_key_add_fail_no_file_name_unicode(self, *args):
+ """Verify that bad key encoding gets handled correctly
+ """
+ self._apt_key_fail_helper()
+
+ def _apt_key_list_success_helper(self, finger, key, human_output=True):
+ @mock.patch.object(os, 'listdir', return_value=('/fake/dir/key.gpg',))
+ @mock.patch.object(subp, 'subp', return_value=(key, ''))
+ def mocked_list(*a):
+
+ keys = cc_apt_configure.apt_key('list', human_output)
+ assert finger in keys
+ mocked_list()
+
+ def test_apt_key_list_success_human(self):
+ """Verify expected key output, human
+ """
+ self._apt_key_list_success_helper(
+ TEST_KEY_FINGERPRINT_HUMAN,
+ TEST_KEY_HUMAN)
+
+ def test_apt_key_list_success_machine(self):
+ """Verify expected key output, machine
+ """
+ self._apt_key_list_success_helper(
+ TEST_KEY_FINGERPRINT_MACHINE,
+ TEST_KEY_MACHINE, human_output=False)
+
+ @mock.patch.object(os, 'listdir', return_value=())
+ @mock.patch.object(subp, 'subp', return_value=('', ''))
+ def test_apt_key_list_fail_no_keys(self, *args):
+ """Ensure falsy output for no keys
+ """
+ keys = cc_apt_configure.apt_key('list')
+ assert not keys
+
+ @mock.patch.object(os, 'listdir', return_value=('file_not_gpg_key.txt'))
+ @mock.patch.object(subp, 'subp', return_value=('', ''))
+ def test_apt_key_list_fail_no_keys_file(self, *args):
+ """Ensure non-gpg file is not returned.
+
+ apt-key used file extensions for this, so we do too
+ """
+ assert not cc_apt_configure.apt_key('list')
+
+ @mock.patch.object(subp, 'subp', side_effect=subp.ProcessExecutionError)
+ @mock.patch.object(os, 'listdir', return_value=('bad_gpg_key.gpg'))
+ def test_apt_key_list_fail_bad_key_file(self, *args):
+ """Ensure bad gpg key doesn't throw exeption.
+ """
+ assert not cc_apt_configure.apt_key('list')
diff --git a/tests/unittests/test_handler/test_handler_apt_source_v1.py b/tests/unittests/test_handler/test_handler_apt_source_v1.py
index 367971cb..2357d699 100644
--- a/tests/unittests/test_handler/test_handler_apt_source_v1.py
+++ b/tests/unittests/test_handler/test_handler_apt_source_v1.py
@@ -9,6 +9,7 @@ import os
import re
import shutil
import tempfile
+import pathlib
from unittest import mock
from unittest.mock import call
@@ -279,16 +280,16 @@ class TestAptSourceConfig(TestCase):
"""
cfg = self.wrapv1conf(cfg)
- with mock.patch.object(subp, 'subp',
- return_value=('fakekey 1234', '')) as mockobj:
+ with mock.patch.object(cc_apt_configure, 'add_apt_key') as mockobj:
cc_apt_configure.handle("test", cfg, self.fakecloud, None, None)
- # check if it added the right ammount of keys
+ # check if it added the right number of keys
calls = []
- for _ in range(keynum):
- calls.append(call(['apt-key', 'add', '-'],
- data=b'fakekey 1234',
- target=None))
+ sources = cfg['apt']['sources']
+ for src in sources:
+ print(sources[src])
+ calls.append(call(sources[src], None))
+
mockobj.assert_has_calls(calls, any_order=True)
self.assertTrue(os.path.isfile(filename))
@@ -364,11 +365,17 @@ class TestAptSourceConfig(TestCase):
"""
cfg = self.wrapv1conf([cfg])
- with mock.patch.object(subp, 'subp') as mockobj:
+ with mock.patch.object(cc_apt_configure, 'add_apt_key') as mockobj:
cc_apt_configure.handle("test", cfg, self.fakecloud, None, None)
- mockobj.assert_called_with(['apt-key', 'add', '-'],
- data=b'fakekey 4321', target=None)
+ # check if it added the right amount of keys
+ sources = cfg['apt']['sources']
+ calls = []
+ for src in sources:
+ print(sources[src])
+ calls.append(call(sources[src], None))
+
+ mockobj.assert_has_calls(calls, any_order=True)
self.assertTrue(os.path.isfile(filename))
@@ -405,12 +412,15 @@ class TestAptSourceConfig(TestCase):
cfg = {'key': "fakekey 4242",
'filename': self.aptlistfile}
cfg = self.wrapv1conf([cfg])
-
- with mock.patch.object(subp, 'subp') as mockobj:
+ with mock.patch.object(cc_apt_configure, 'apt_key') as mockobj:
cc_apt_configure.handle("test", cfg, self.fakecloud, None, None)
- mockobj.assert_called_once_with(['apt-key', 'add', '-'],
- data=b'fakekey 4242', target=None)
+ calls = (call(
+ 'add',
+ output_file=pathlib.Path(self.aptlistfile).stem,
+ data='fakekey 4242',
+ hardened=False),)
+ mockobj.assert_has_calls(calls, any_order=True)
# filename should be ignored on key only
self.assertFalse(os.path.isfile(self.aptlistfile))
@@ -422,16 +432,26 @@ class TestAptSourceConfig(TestCase):
cfg = self.wrapv1conf([cfg])
with mock.patch.object(subp, 'subp',
- return_value=('fakekey 1212', '')) as mockobj:
- cc_apt_configure.handle("test", cfg, self.fakecloud, None, None)
-
- mockobj.assert_called_with(['apt-key', 'add', '-'],
- data=b'fakekey 1212', target=None)
+ return_value=('fakekey 1212', '')):
+ with mock.patch.object(cc_apt_configure, 'apt_key') as mockobj:
+ cc_apt_configure.handle(
+ "test",
+ cfg,
+ self.fakecloud,
+ None,
+ None)
+
+ calls = (call(
+ 'add',
+ output_file=pathlib.Path(self.aptlistfile).stem,
+ data='fakekey 1212',
+ hardened=False),)
+ mockobj.assert_has_calls(calls, any_order=True)
# filename should be ignored on key only
self.assertFalse(os.path.isfile(self.aptlistfile))
- def apt_src_keyid_real(self, cfg, expectedkey):
+ def apt_src_keyid_real(self, cfg, expectedkey, is_hardened=None):
"""apt_src_keyid_real
Test specification of a keyid without source including
up to addition of the key (add_apt_key_raw mocked to keep the
@@ -446,9 +466,14 @@ class TestAptSourceConfig(TestCase):
return_value=expectedkey) as mockgetkey:
cc_apt_configure.handle("test", cfg, self.fakecloud,
None, None)
-
+ if is_hardened is not None:
+ mockkey.assert_called_with(
+ expectedkey,
+ self.aptlistfile,
+ hardened=is_hardened)
+ else:
+ mockkey.assert_called_with(expectedkey, self.aptlistfile)
mockgetkey.assert_called_with(key, keyserver)
- mockkey.assert_called_with(expectedkey, None)
# filename should be ignored on key only
self.assertFalse(os.path.isfile(self.aptlistfile))
@@ -459,7 +484,7 @@ class TestAptSourceConfig(TestCase):
cfg = {'keyid': keyid,
'filename': self.aptlistfile}
- self.apt_src_keyid_real(cfg, EXPECTEDKEY)
+ self.apt_src_keyid_real(cfg, EXPECTEDKEY, is_hardened=False)
def test_apt_src_longkeyid_real(self):
"""test_apt_src_longkeyid_real - Test long keyid including key add"""
@@ -467,7 +492,7 @@ class TestAptSourceConfig(TestCase):
cfg = {'keyid': keyid,
'filename': self.aptlistfile}
- self.apt_src_keyid_real(cfg, EXPECTEDKEY)
+ self.apt_src_keyid_real(cfg, EXPECTEDKEY, is_hardened=False)
def test_apt_src_longkeyid_ks_real(self):
"""test_apt_src_longkeyid_ks_real - Test long keyid from other ks"""
@@ -476,7 +501,7 @@ class TestAptSourceConfig(TestCase):
'keyserver': 'keys.gnupg.net',
'filename': self.aptlistfile}
- self.apt_src_keyid_real(cfg, EXPECTEDKEY)
+ self.apt_src_keyid_real(cfg, EXPECTEDKEY, is_hardened=False)
def test_apt_src_ppa(self):
"""Test adding a ppa"""
diff --git a/tests/unittests/test_handler/test_handler_apt_source_v3.py b/tests/unittests/test_handler/test_handler_apt_source_v3.py
index abb0a9b6..20289121 100644
--- a/tests/unittests/test_handler/test_handler_apt_source_v3.py
+++ b/tests/unittests/test_handler/test_handler_apt_source_v3.py
@@ -10,23 +10,20 @@ import re
import shutil
import socket
import tempfile
+import pathlib
from unittest import TestCase, mock
from unittest.mock import call
-from cloudinit import cloud
-from cloudinit import distros
from cloudinit import gpg
-from cloudinit import helpers
from cloudinit import subp
from cloudinit import util
-
from cloudinit.config import cc_apt_configure
-from cloudinit.sources import DataSourceNone
-
from cloudinit.tests import helpers as t_help
-EXPECTEDKEY = u"""-----BEGIN PGP PUBLIC KEY BLOCK-----
+from tests.unittests.util import get_cloud
+
+EXPECTEDKEY = """-----BEGIN PGP PUBLIC KEY BLOCK-----
Version: GnuPG v1
mI0ESuZLUgEEAKkqq3idtFP7g9hzOu1a8+v8ImawQN4TrvlygfScMU1TIS1eC7UQ
@@ -106,16 +103,6 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase):
else:
return self.join(*args, **kwargs)
- def _get_cloud(self, distro, metadata=None):
- self.patchUtils(self.new_root)
- paths = helpers.Paths({})
- cls = distros.fetch(distro)
- mydist = cls(distro, {}, paths)
- myds = DataSourceNone.DataSourceNone({}, mydist, paths)
- if metadata:
- myds.metadata.update(metadata)
- return cloud.Cloud(myds, paths, {}, mydist, None)
-
def _apt_src_basic(self, filename, cfg):
"""_apt_src_basic
Test Fix deb source string, has to overwrite mirror conf in params
@@ -228,22 +215,24 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase):
self.aptlistfile3: {'source': 'deb $MIRROR $RELEASE universe'}}
self._apt_src_replace_tri(cfg)
- def _apt_src_keyid(self, filename, cfg, keynum):
+ def _apt_src_keyid(self, filename, cfg, keynum, is_hardened=None):
"""_apt_src_keyid
Test specification of a source + keyid
"""
params = self._get_default_params()
- with mock.patch("cloudinit.subp.subp",
- return_value=('fakekey 1234', '')) as mockobj:
+ with mock.patch.object(cc_apt_configure, 'add_apt_key') as mockobj:
self._add_apt_sources(cfg, TARGET, template_params=params,
aa_repo_match=self.matcher)
- # check if it added the right ammount of keys
+ # check if it added the right number of keys
calls = []
- for _ in range(keynum):
- calls.append(call(['apt-key', 'add', '-'], data=b'fakekey 1234',
- target=TARGET))
+ for key in cfg:
+ if is_hardened is not None:
+ calls.append(call(cfg[key], hardened=is_hardened))
+ else:
+ calls.append(call(cfg[key], TARGET))
+
mockobj.assert_has_calls(calls, any_order=True)
self.assertTrue(os.path.isfile(filename))
@@ -262,6 +251,7 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase):
'http://ppa.launchpad.net/'
'smoser/cloud-init-test/ubuntu'
' xenial main'),
+ 'filename': self.aptlistfile,
'keyid': "03683F77"}}
self._apt_src_keyid(self.aptlistfile, cfg, 1)
@@ -282,6 +272,7 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase):
'http://ppa.launchpad.net/'
'smoser/cloud-init-test/ubuntu'
' xenial multiverse'),
+ 'filename': self.aptlistfile3,
'keyid': "03683F77"}}
self._apt_src_keyid(self.aptlistfile, cfg, 3)
@@ -307,15 +298,19 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase):
'http://ppa.launchpad.net/'
'smoser/cloud-init-test/ubuntu'
' xenial main'),
+ 'filename': self.aptlistfile,
'key': "fakekey 4321"}}
- with mock.patch.object(subp, 'subp') as mockobj:
+ with mock.patch.object(cc_apt_configure, 'apt_key') as mockobj:
self._add_apt_sources(cfg, TARGET, template_params=params,
aa_repo_match=self.matcher)
- mockobj.assert_any_call(['apt-key', 'add', '-'], data=b'fakekey 4321',
- target=TARGET)
-
+ calls = (call(
+ 'add',
+ output_file=pathlib.Path(self.aptlistfile).stem,
+ data='fakekey 4321',
+ hardened=False),)
+ mockobj.assert_has_calls(calls, any_order=True)
self.assertTrue(os.path.isfile(self.aptlistfile))
contents = util.load_file(self.aptlistfile)
@@ -331,12 +326,16 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase):
params = self._get_default_params()
cfg = {self.aptlistfile: {'key': "fakekey 4242"}}
- with mock.patch.object(subp, 'subp') as mockobj:
+ with mock.patch.object(cc_apt_configure, 'apt_key') as mockobj:
self._add_apt_sources(cfg, TARGET, template_params=params,
aa_repo_match=self.matcher)
- mockobj.assert_any_call(['apt-key', 'add', '-'], data=b'fakekey 4242',
- target=TARGET)
+ calls = (call(
+ 'add',
+ output_file=pathlib.Path(self.aptlistfile).stem,
+ data='fakekey 4242',
+ hardened=False),)
+ mockobj.assert_has_calls(calls, any_order=True)
# filename should be ignored on key only
self.assertFalse(os.path.isfile(self.aptlistfile))
@@ -345,19 +344,23 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase):
"""test_apt_v3_src_keyidonly - Test keyid without source"""
params = self._get_default_params()
cfg = {self.aptlistfile: {'keyid': "03683F77"}}
-
with mock.patch.object(subp, 'subp',
- return_value=('fakekey 1212', '')) as mockobj:
- self._add_apt_sources(cfg, TARGET, template_params=params,
- aa_repo_match=self.matcher)
+ return_value=('fakekey 1212', '')):
+ with mock.patch.object(cc_apt_configure, 'apt_key') as mockobj:
+ self._add_apt_sources(cfg, TARGET, template_params=params,
+ aa_repo_match=self.matcher)
- mockobj.assert_any_call(['apt-key', 'add', '-'], data=b'fakekey 1212',
- target=TARGET)
+ calls = (call(
+ 'add',
+ output_file=pathlib.Path(self.aptlistfile).stem,
+ data='fakekey 1212',
+ hardened=False),)
+ mockobj.assert_has_calls(calls, any_order=True)
# filename should be ignored on key only
self.assertFalse(os.path.isfile(self.aptlistfile))
- def apt_src_keyid_real(self, cfg, expectedkey):
+ def apt_src_keyid_real(self, cfg, expectedkey, is_hardened=None):
"""apt_src_keyid_real
Test specification of a keyid without source including
up to addition of the key (add_apt_key_raw mocked to keep the
@@ -375,7 +378,11 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase):
mockgetkey.assert_called_with(keycfg['keyid'],
keycfg.get('keyserver',
'keyserver.ubuntu.com'))
- mockkey.assert_called_with(expectedkey, TARGET)
+ if is_hardened is not None:
+ mockkey.assert_called_with(
+ expectedkey,
+ keycfg['keyfile'],
+ hardened=is_hardened)
# filename should be ignored on key only
self.assertFalse(os.path.isfile(self.aptlistfile))
@@ -383,21 +390,24 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase):
def test_apt_v3_src_keyid_real(self):
"""test_apt_v3_src_keyid_real - Test keyid including key add"""
keyid = "03683F77"
- cfg = {self.aptlistfile: {'keyid': keyid}}
+ cfg = {self.aptlistfile: {'keyid': keyid,
+ 'keyfile': self.aptlistfile}}
- self.apt_src_keyid_real(cfg, EXPECTEDKEY)
+ self.apt_src_keyid_real(cfg, EXPECTEDKEY, is_hardened=False)
def test_apt_v3_src_longkeyid_real(self):
"""test_apt_v3_src_longkeyid_real Test long keyid including key add"""
keyid = "B59D 5F15 97A5 04B7 E230 6DCA 0620 BBCF 0368 3F77"
- cfg = {self.aptlistfile: {'keyid': keyid}}
+ cfg = {self.aptlistfile: {'keyid': keyid,
+ 'keyfile': self.aptlistfile}}
- self.apt_src_keyid_real(cfg, EXPECTEDKEY)
+ self.apt_src_keyid_real(cfg, EXPECTEDKEY, is_hardened=False)
def test_apt_v3_src_longkeyid_ks_real(self):
"""test_apt_v3_src_longkeyid_ks_real Test long keyid from other ks"""
keyid = "B59D 5F15 97A5 04B7 E230 6DCA 0620 BBCF 0368 3F77"
cfg = {self.aptlistfile: {'keyid': keyid,
+ 'keyfile': self.aptlistfile,
'keyserver': 'keys.gnupg.net'}}
self.apt_src_keyid_real(cfg, EXPECTEDKEY)
@@ -407,6 +417,7 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase):
keyid = "03683F77"
params = self._get_default_params()
cfg = {self.aptlistfile: {'keyid': keyid,
+ 'keyfile': self.aptlistfile,
'keyserver': 'test.random.com'}}
# in some test environments only *.ubuntu.com is reachable
@@ -419,7 +430,7 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase):
aa_repo_match=self.matcher)
mockgetkey.assert_called_with('03683F77', 'test.random.com')
- mockadd.assert_called_with('fakekey', TARGET)
+ mockadd.assert_called_with('fakekey', self.aptlistfile, hardened=False)
# filename should be ignored on key only
self.assertFalse(os.path.isfile(self.aptlistfile))
@@ -587,7 +598,7 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase):
default_mirrors = cc_apt_configure.get_default_mirrors(arch)
pmir = default_mirrors["PRIMARY"]
smir = default_mirrors["SECURITY"]
- mycloud = self._get_cloud('ubuntu')
+ mycloud = get_cloud()
mirrors = cc_apt_configure.find_apt_mirror_info({}, mycloud, arch)
self.assertEqual(mirrors['MIRROR'],
@@ -659,7 +670,7 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase):
default_mirrors = cc_apt_configure.get_default_mirrors(arch)
pmir = default_mirrors["PRIMARY"]
smir = default_mirrors["SECURITY"]
- mycloud = self._get_cloud('ubuntu')
+ mycloud = get_cloud()
cfg = {"primary": [{'arches': ["thisarchdoesntexist_64"],
"uri": "notthis"},
{'arches': ["thisarchdoesntexist"],
@@ -969,7 +980,7 @@ deb http://ubuntu.com/ubuntu/ xenial-proposed main""")
pmir = "phit"
smir = "shit"
arch = 'amd64'
- mycloud = self._get_cloud('ubuntu')
+ mycloud = get_cloud('ubuntu')
cfg = {"primary": [{'arches': ["default"],
"search_dns": True}],
"security": [{'arches': ["default"],
@@ -1016,10 +1027,12 @@ deb http://ubuntu.com/ubuntu/ xenial-proposed main""")
'primary': [
{'arches': [arch],
'uri': 'http://test.ubuntu.com/',
+ 'filename': 'primary',
'key': 'fakekey_primary'}],
'security': [
{'arches': [arch],
'uri': 'http://testsec.ubuntu.com/',
+ 'filename': 'security',
'key': 'fakekey_security'}]
}
@@ -1027,8 +1040,8 @@ deb http://ubuntu.com/ubuntu/ xenial-proposed main""")
'add_apt_key_raw') as mockadd:
cc_apt_configure.add_mirror_keys(cfg, TARGET)
calls = [
- mock.call('fakekey_primary', TARGET),
- mock.call('fakekey_security', TARGET),
+ mock.call('fakekey_primary', 'primary', hardened=False),
+ mock.call('fakekey_security', 'security', hardened=False),
]
mockadd.assert_has_calls(calls, any_order=True)
diff --git a/tests/unittests/test_handler/test_handler_bootcmd.py b/tests/unittests/test_handler/test_handler_bootcmd.py
index b53d60d4..8cd3a5e1 100644
--- a/tests/unittests/test_handler/test_handler_bootcmd.py
+++ b/tests/unittests/test_handler/test_handler_bootcmd.py
@@ -1,14 +1,13 @@
# This file is part of cloud-init. See LICENSE file for license information.
+import logging
+import tempfile
from cloudinit.config.cc_bootcmd import handle, schema
-from cloudinit.sources import DataSourceNone
-from cloudinit import (distros, helpers, cloud, subp, util)
+from cloudinit import (subp, util)
from cloudinit.tests.helpers import (
CiTestCase, mock, SchemaTestCaseMixin, skipUnlessJsonSchema)
-import logging
-import tempfile
-
+from tests.unittests.util import get_cloud
LOG = logging.getLogger(__name__)
@@ -39,18 +38,10 @@ class TestBootcmd(CiTestCase):
self.subp = subp.subp
self.new_root = self.tmp_dir()
- def _get_cloud(self, distro):
- paths = helpers.Paths({})
- cls = distros.fetch(distro)
- mydist = cls(distro, {}, paths)
- myds = DataSourceNone.DataSourceNone({}, mydist, paths)
- paths.datasource = myds
- return cloud.Cloud(myds, paths, {}, mydist, None)
-
def test_handler_skip_if_no_bootcmd(self):
"""When the provided config doesn't contain bootcmd, skip it."""
cfg = {}
- mycloud = self._get_cloud('ubuntu')
+ mycloud = get_cloud()
handle('notimportant', cfg, mycloud, LOG, None)
self.assertIn(
"Skipping module named notimportant, no 'bootcmd' key",
@@ -59,7 +50,7 @@ class TestBootcmd(CiTestCase):
def test_handler_invalid_command_set(self):
"""Commands which can't be converted to shell will raise errors."""
invalid_config = {'bootcmd': 1}
- cc = self._get_cloud('ubuntu')
+ cc = get_cloud()
with self.assertRaises(TypeError) as context_manager:
handle('cc_bootcmd', invalid_config, cc, LOG, [])
self.assertIn('Failed to shellify bootcmd', self.logs.getvalue())
@@ -75,7 +66,7 @@ class TestBootcmd(CiTestCase):
invalid content.
"""
invalid_config = {'bootcmd': 1}
- cc = self._get_cloud('ubuntu')
+ cc = get_cloud()
with self.assertRaises(TypeError):
handle('cc_bootcmd', invalid_config, cc, LOG, [])
self.assertIn(
@@ -92,7 +83,7 @@ class TestBootcmd(CiTestCase):
"""
invalid_config = {
'bootcmd': ['ls /', 20, ['wget', 'http://stuff/blah'], {'a': 'n'}]}
- cc = self._get_cloud('ubuntu')
+ cc = get_cloud()
with self.assertRaises(TypeError) as context_manager:
handle('cc_bootcmd', invalid_config, cc, LOG, [])
expected_warnings = [
@@ -111,7 +102,7 @@ class TestBootcmd(CiTestCase):
def test_handler_creates_and_runs_bootcmd_script_with_instance_id(self):
"""Valid schema runs a bootcmd script with INSTANCE_ID in the env."""
- cc = self._get_cloud('ubuntu')
+ cc = get_cloud()
out_file = self.tmp_path('bootcmd.out', self.new_root)
my_id = "b6ea0f59-e27d-49c6-9f87-79f19765a425"
valid_config = {'bootcmd': [
@@ -125,7 +116,7 @@ class TestBootcmd(CiTestCase):
def test_handler_runs_bootcmd_script_with_error(self):
"""When a valid script generates an error, that error is raised."""
- cc = self._get_cloud('ubuntu')
+ cc = get_cloud()
valid_config = {'bootcmd': ['exit 1']} # Script with error
with mock.patch(self._etmpfile_path, FakeExtendedTempFile):
diff --git a/tests/unittests/test_handler/test_handler_ca_certs.py b/tests/unittests/test_handler/test_handler_ca_certs.py
index 6e3831ed..2a4ab49e 100644
--- a/tests/unittests/test_handler/test_handler_ca_certs.py
+++ b/tests/unittests/test_handler/test_handler_ca_certs.py
@@ -1,20 +1,19 @@
# This file is part of cloud-init. See LICENSE file for license information.
+import logging
+import shutil
+import tempfile
+import unittest
+from contextlib import ExitStack
+from unittest import mock
-from cloudinit import cloud
from cloudinit import distros
from cloudinit.config import cc_ca_certs
from cloudinit import helpers
from cloudinit import subp
from cloudinit import util
-
from cloudinit.tests.helpers import TestCase
-import logging
-import shutil
-import tempfile
-import unittest
-from contextlib import ExitStack
-from unittest import mock
+from tests.unittests.util import get_cloud
class TestNoConfig(unittest.TestCase):
@@ -56,10 +55,6 @@ class TestConfig(TestCase):
paths = helpers.Paths({})
return cls(kind, {}, paths)
- def _get_cloud(self, kind):
- distro = self._fetch_distro(kind)
- return cloud.Cloud(None, self.paths, None, distro, None)
-
def _mock_init(self):
self.mocks = ExitStack()
self.addCleanup(self.mocks.close)
@@ -81,7 +76,7 @@ class TestConfig(TestCase):
for distro_name in cc_ca_certs.distros:
self._mock_init()
- cloud = self._get_cloud(distro_name)
+ cloud = get_cloud(distro_name)
cc_ca_certs.handle(self.name, config, cloud, self.log, self.args)
self.assertEqual(self.mock_add.call_count, 0)
@@ -94,7 +89,7 @@ class TestConfig(TestCase):
for distro_name in cc_ca_certs.distros:
self._mock_init()
- cloud = self._get_cloud(distro_name)
+ cloud = get_cloud(distro_name)
cc_ca_certs.handle(self.name, config, cloud, self.log, self.args)
self.assertEqual(self.mock_add.call_count, 0)
@@ -107,7 +102,7 @@ class TestConfig(TestCase):
for distro_name in cc_ca_certs.distros:
self._mock_init()
- cloud = self._get_cloud(distro_name)
+ cloud = get_cloud(distro_name)
conf = cc_ca_certs._distro_ca_certs_configs(distro_name)
cc_ca_certs.handle(self.name, config, cloud, self.log, self.args)
@@ -121,7 +116,7 @@ class TestConfig(TestCase):
for distro_name in cc_ca_certs.distros:
self._mock_init()
- cloud = self._get_cloud(distro_name)
+ cloud = get_cloud(distro_name)
conf = cc_ca_certs._distro_ca_certs_configs(distro_name)
cc_ca_certs.handle(self.name, config, cloud, self.log, self.args)
@@ -135,7 +130,7 @@ class TestConfig(TestCase):
for distro_name in cc_ca_certs.distros:
self._mock_init()
- cloud = self._get_cloud(distro_name)
+ cloud = get_cloud(distro_name)
cc_ca_certs.handle(self.name, config, cloud, self.log, self.args)
self.assertEqual(self.mock_add.call_count, 0)
@@ -148,7 +143,7 @@ class TestConfig(TestCase):
for distro_name in cc_ca_certs.distros:
self._mock_init()
- cloud = self._get_cloud(distro_name)
+ cloud = get_cloud(distro_name)
cc_ca_certs.handle(self.name, config, cloud, self.log, self.args)
self.assertEqual(self.mock_add.call_count, 0)
@@ -161,7 +156,7 @@ class TestConfig(TestCase):
for distro_name in cc_ca_certs.distros:
self._mock_init()
- cloud = self._get_cloud(distro_name)
+ cloud = get_cloud(distro_name)
conf = cc_ca_certs._distro_ca_certs_configs(distro_name)
cc_ca_certs.handle(self.name, config, cloud, self.log, self.args)
diff --git a/tests/unittests/test_handler/test_handler_chef.py b/tests/unittests/test_handler/test_handler_chef.py
index 7918c609..0672cebc 100644
--- a/tests/unittests/test_handler/test_handler_chef.py
+++ b/tests/unittests/test_handler/test_handler_chef.py
@@ -5,16 +5,14 @@ import json
import logging
import os
-from cloudinit import cloud
from cloudinit.config import cc_chef
-from cloudinit import distros
-from cloudinit import helpers
-from cloudinit.sources import DataSourceNone
from cloudinit import util
from cloudinit.tests.helpers import (
HttprettyTestCase, FilesystemMockingTestCase, mock, skipIf)
+from tests.unittests.util import get_cloud
+
LOG = logging.getLogger(__name__)
CLIENT_TEMPL = os.path.sep.join(["templates", "chef_client.rb.tmpl"])
@@ -106,19 +104,12 @@ class TestChef(FilesystemMockingTestCase):
super(TestChef, self).setUp()
self.tmp = self.tmp_dir()
- def fetch_cloud(self, distro_kind):
- cls = distros.fetch(distro_kind)
- paths = helpers.Paths({})
- distro = cls(distro_kind, {}, paths)
- ds = DataSourceNone.DataSourceNone({}, distro, paths, None)
- return cloud.Cloud(ds, paths, {}, distro, None)
-
def test_no_config(self):
self.patchUtils(self.tmp)
self.patchOS(self.tmp)
cfg = {}
- cc_chef.handle('chef', cfg, self.fetch_cloud('ubuntu'), LOG, [])
+ cc_chef.handle('chef', cfg, get_cloud(), LOG, [])
for d in cc_chef.CHEF_DIRS:
self.assertFalse(os.path.isdir(d))
@@ -163,7 +154,7 @@ class TestChef(FilesystemMockingTestCase):
'/etc/chef/encrypted_data_bag_secret'
},
}
- cc_chef.handle('chef', cfg, self.fetch_cloud('ubuntu'), LOG, [])
+ cc_chef.handle('chef', cfg, get_cloud(), LOG, [])
for d in cc_chef.CHEF_DIRS:
self.assertTrue(os.path.isdir(d))
c = util.load_file(cc_chef.CHEF_RB_PATH)
@@ -198,7 +189,7 @@ class TestChef(FilesystemMockingTestCase):
}
},
}
- cc_chef.handle('chef', cfg, self.fetch_cloud('ubuntu'), LOG, [])
+ cc_chef.handle('chef', cfg, get_cloud(), LOG, [])
c = util.load_file(cc_chef.CHEF_FB_PATH)
self.assertEqual(
{
@@ -222,7 +213,7 @@ class TestChef(FilesystemMockingTestCase):
'show_time': None,
},
}
- cc_chef.handle('chef', cfg, self.fetch_cloud('ubuntu'), LOG, [])
+ cc_chef.handle('chef', cfg, get_cloud(), LOG, [])
c = util.load_file(cc_chef.CHEF_RB_PATH)
self.assertNotIn('json_attribs', c)
self.assertNotIn('Formatter.show_time', c)
@@ -246,7 +237,7 @@ class TestChef(FilesystemMockingTestCase):
'validation_cert': v_cert
},
}
- cc_chef.handle('chef', cfg, self.fetch_cloud('ubuntu'), LOG, [])
+ cc_chef.handle('chef', cfg, get_cloud(), LOG, [])
content = util.load_file(cc_chef.CHEF_RB_PATH)
self.assertIn(v_path, content)
util.load_file(v_path)
@@ -271,7 +262,7 @@ class TestChef(FilesystemMockingTestCase):
}
util.write_file('/etc/cloud/templates/chef_client.rb.tmpl', tpl_file)
util.write_file(v_path, expected_cert)
- cc_chef.handle('chef', cfg, self.fetch_cloud('ubuntu'), LOG, [])
+ cc_chef.handle('chef', cfg, get_cloud(), LOG, [])
content = util.load_file(cc_chef.CHEF_RB_PATH)
self.assertIn(v_path, content)
util.load_file(v_path)
diff --git a/tests/unittests/test_handler/test_handler_debug.py b/tests/unittests/test_handler/test_handler_debug.py
index 787ba350..41e9d9bd 100644
--- a/tests/unittests/test_handler/test_handler_debug.py
+++ b/tests/unittests/test_handler/test_handler_debug.py
@@ -1,21 +1,15 @@
# Copyright (C) 2014 Yahoo! Inc.
#
# This file is part of cloud-init. See LICENSE file for license information.
+import logging
+import shutil
+import tempfile
-from cloudinit.config import cc_debug
-
-from cloudinit import cloud
-from cloudinit import distros
-from cloudinit import helpers
from cloudinit import util
-
-from cloudinit.sources import DataSourceNone
-
+from cloudinit.config import cc_debug
from cloudinit.tests.helpers import (FilesystemMockingTestCase, mock)
-import logging
-import shutil
-import tempfile
+from tests.unittests.util import get_cloud
LOG = logging.getLogger(__name__)
@@ -26,29 +20,20 @@ class TestDebug(FilesystemMockingTestCase):
super(TestDebug, self).setUp()
self.new_root = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, self.new_root)
-
- def _get_cloud(self, distro, metadata=None):
self.patchUtils(self.new_root)
- paths = helpers.Paths({})
- cls = distros.fetch(distro)
- d = cls(distro, {}, paths)
- ds = DataSourceNone.DataSourceNone({}, d, paths)
- if metadata:
- ds.metadata.update(metadata)
- return cloud.Cloud(ds, paths, {}, d, None)
def test_debug_write(self, m_locale):
m_locale.return_value = 'en_US.UTF-8'
cfg = {
'abc': '123',
- 'c': u'\u20a0',
+ 'c': '\u20a0',
'debug': {
'verbose': True,
# Does not actually write here due to mocking...
'output': '/var/log/cloud-init-debug.log',
},
}
- cc = self._get_cloud('ubuntu')
+ cc = get_cloud()
cc_debug.handle('cc_debug', cfg, cc, LOG, [])
contents = util.load_file('/var/log/cloud-init-debug.log')
# Some basic sanity tests...
@@ -66,7 +51,7 @@ class TestDebug(FilesystemMockingTestCase):
'output': '/var/log/cloud-init-debug.log',
},
}
- cc = self._get_cloud('ubuntu')
+ cc = get_cloud()
cc_debug.handle('cc_debug', cfg, cc, LOG, [])
self.assertRaises(IOError,
util.load_file, '/var/log/cloud-init-debug.log')
diff --git a/tests/unittests/test_handler/test_handler_install_hotplug.py b/tests/unittests/test_handler/test_handler_install_hotplug.py
new file mode 100644
index 00000000..5d6b1e77
--- /dev/null
+++ b/tests/unittests/test_handler/test_handler_install_hotplug.py
@@ -0,0 +1,113 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+from collections import namedtuple
+from unittest import mock
+
+import pytest
+
+from cloudinit.config.cc_install_hotplug import (
+ handle,
+ HOTPLUG_UDEV_PATH,
+ HOTPLUG_UDEV_RULES_TEMPLATE,
+)
+from cloudinit.event import EventScope, EventType
+
+
+@pytest.yield_fixture()
+def mocks():
+ m_update_enabled = mock.patch('cloudinit.stages.update_event_enabled')
+ m_write = mock.patch('cloudinit.util.write_file', autospec=True)
+ m_del = mock.patch('cloudinit.util.del_file', autospec=True)
+ m_subp = mock.patch('cloudinit.subp.subp')
+ m_which = mock.patch('cloudinit.subp.which', return_value=None)
+ m_path_exists = mock.patch('os.path.exists', return_value=False)
+
+ yield namedtuple(
+ 'Mocks',
+ 'm_update_enabled m_write m_del m_subp m_which m_path_exists'
+ )(
+ m_update_enabled.start(), m_write.start(), m_del.start(),
+ m_subp.start(), m_which.start(), m_path_exists.start()
+ )
+
+ m_update_enabled.stop()
+ m_write.stop()
+ m_del.stop()
+ m_subp.stop()
+ m_which.stop()
+ m_path_exists.stop()
+
+
+class TestInstallHotplug:
+ @pytest.mark.parametrize('libexec_exists', [True, False])
+ def test_rules_installed_when_supported_and_enabled(
+ self, mocks, libexec_exists
+ ):
+ mocks.m_which.return_value = 'udevadm'
+ mocks.m_update_enabled.return_value = True
+ m_cloud = mock.MagicMock()
+ m_cloud.datasource.get_supported_events.return_value = {
+ EventScope.NETWORK: {EventType.HOTPLUG}
+ }
+
+ if libexec_exists:
+ libexecdir = "/usr/libexec/cloud-init"
+ else:
+ libexecdir = "/usr/lib/cloud-init"
+ with mock.patch('os.path.exists', return_value=libexec_exists):
+ handle(None, {}, m_cloud, mock.Mock(), None)
+ mocks.m_write.assert_called_once_with(
+ filename=HOTPLUG_UDEV_PATH,
+ content=HOTPLUG_UDEV_RULES_TEMPLATE.format(
+ libexecdir=libexecdir),
+ )
+ assert mocks.m_subp.call_args_list == [mock.call([
+ 'udevadm', 'control', '--reload-rules',
+ ])]
+ assert mocks.m_del.call_args_list == []
+
+ def test_rules_not_installed_when_unsupported(self, mocks):
+ mocks.m_update_enabled.return_value = True
+ m_cloud = mock.MagicMock()
+ m_cloud.datasource.get_supported_events.return_value = {}
+
+ handle(None, {}, m_cloud, mock.Mock(), None)
+ assert mocks.m_write.call_args_list == []
+ assert mocks.m_del.call_args_list == []
+ assert mocks.m_subp.call_args_list == []
+
+ def test_rules_not_installed_when_disabled(self, mocks):
+ mocks.m_update_enabled.return_value = False
+ m_cloud = mock.MagicMock()
+ m_cloud.datasource.get_supported_events.return_value = {
+ EventScope.NETWORK: {EventType.HOTPLUG}
+ }
+
+ handle(None, {}, m_cloud, mock.Mock(), None)
+ assert mocks.m_write.call_args_list == []
+ assert mocks.m_del.call_args_list == []
+ assert mocks.m_subp.call_args_list == []
+
+ def test_rules_uninstalled_when_disabled(self, mocks):
+ mocks.m_path_exists.return_value = True
+ mocks.m_update_enabled.return_value = False
+ m_cloud = mock.MagicMock()
+ m_cloud.datasource.get_supported_events.return_value = {}
+
+ handle(None, {}, m_cloud, mock.Mock(), None)
+ mocks.m_del.assert_called_with(HOTPLUG_UDEV_PATH)
+ assert mocks.m_subp.call_args_list == [mock.call([
+ 'udevadm', 'control', '--reload-rules',
+ ])]
+ assert mocks.m_write.call_args_list == []
+
+ def test_rules_not_installed_when_no_udevadm(self, mocks):
+ mocks.m_update_enabled.return_value = True
+ m_cloud = mock.MagicMock()
+ m_cloud.datasource.get_supported_events.return_value = {
+ EventScope.NETWORK: {EventType.HOTPLUG}
+ }
+
+ handle(None, {}, m_cloud, mock.Mock(), None)
+ assert mocks.m_del.call_args_list == []
+ assert mocks.m_write.call_args_list == []
+ assert mocks.m_subp.call_args_list == []
diff --git a/tests/unittests/test_handler/test_handler_landscape.py b/tests/unittests/test_handler/test_handler_landscape.py
index 7d165687..00333985 100644
--- a/tests/unittests/test_handler/test_handler_landscape.py
+++ b/tests/unittests/test_handler/test_handler_landscape.py
@@ -1,14 +1,13 @@
# This file is part of cloud-init. See LICENSE file for license information.
+import logging
+from configobj import ConfigObj
from cloudinit.config import cc_landscape
-from cloudinit import (distros, helpers, cloud, util)
-from cloudinit.sources import DataSourceNone
+from cloudinit import util
from cloudinit.tests.helpers import (FilesystemMockingTestCase, mock,
wrap_and_call)
-from configobj import ConfigObj
-import logging
-
+from tests.unittests.util import get_cloud
LOG = logging.getLogger(__name__)
@@ -22,18 +21,11 @@ class TestLandscape(FilesystemMockingTestCase):
self.new_root = self.tmp_dir()
self.conf = self.tmp_path('client.conf', self.new_root)
self.default_file = self.tmp_path('default_landscape', self.new_root)
-
- def _get_cloud(self, distro):
self.patchUtils(self.new_root)
- paths = helpers.Paths({'templates_dir': self.new_root})
- cls = distros.fetch(distro)
- mydist = cls(distro, {}, paths)
- myds = DataSourceNone.DataSourceNone({}, mydist, paths)
- return cloud.Cloud(myds, paths, {}, mydist, None)
def test_handler_skips_empty_landscape_cloudconfig(self):
"""Empty landscape cloud-config section does no work."""
- mycloud = self._get_cloud('ubuntu')
+ mycloud = get_cloud('ubuntu')
mycloud.distro = mock.MagicMock()
cfg = {'landscape': {}}
cc_landscape.handle('notimportant', cfg, mycloud, LOG, None)
@@ -41,7 +33,7 @@ class TestLandscape(FilesystemMockingTestCase):
def test_handler_error_on_invalid_landscape_type(self):
"""Raise an error when landscape configuraiton option is invalid."""
- mycloud = self._get_cloud('ubuntu')
+ mycloud = get_cloud('ubuntu')
cfg = {'landscape': 'wrongtype'}
with self.assertRaises(RuntimeError) as context_manager:
cc_landscape.handle('notimportant', cfg, mycloud, LOG, None)
@@ -52,7 +44,7 @@ class TestLandscape(FilesystemMockingTestCase):
@mock.patch('cloudinit.config.cc_landscape.subp')
def test_handler_restarts_landscape_client(self, m_subp):
"""handler restarts lansdscape-client after install."""
- mycloud = self._get_cloud('ubuntu')
+ mycloud = get_cloud('ubuntu')
cfg = {'landscape': {'client': {}}}
wrap_and_call(
'cloudinit.config.cc_landscape',
@@ -64,7 +56,7 @@ class TestLandscape(FilesystemMockingTestCase):
def test_handler_installs_client_and_creates_config_file(self):
"""Write landscape client.conf and install landscape-client."""
- mycloud = self._get_cloud('ubuntu')
+ mycloud = get_cloud('ubuntu')
cfg = {'landscape': {'client': {}}}
expected = {'client': {
'log_level': 'info',
@@ -91,7 +83,7 @@ class TestLandscape(FilesystemMockingTestCase):
"""Merge and write options from LSC_CLIENT_CFG_FILE with defaults."""
# Write existing sparse client.conf file
util.write_file(self.conf, '[client]\ncomputer_title = My PC\n')
- mycloud = self._get_cloud('ubuntu')
+ mycloud = get_cloud('ubuntu')
cfg = {'landscape': {'client': {}}}
expected = {'client': {
'log_level': 'info',
@@ -112,7 +104,7 @@ class TestLandscape(FilesystemMockingTestCase):
"""Merge and write options from cloud-config options with defaults."""
# Write empty sparse client.conf file
util.write_file(self.conf, '')
- mycloud = self._get_cloud('ubuntu')
+ mycloud = get_cloud('ubuntu')
cfg = {'landscape': {'client': {'computer_title': 'My PC'}}}
expected = {'client': {
'log_level': 'info',
diff --git a/tests/unittests/test_handler/test_handler_locale.py b/tests/unittests/test_handler/test_handler_locale.py
index 15fe7b23..3c17927e 100644
--- a/tests/unittests/test_handler/test_handler_locale.py
+++ b/tests/unittests/test_handler/test_handler_locale.py
@@ -3,27 +3,21 @@
# Author: Juerg Haefliger <juerg.haefliger@hp.com>
#
# This file is part of cloud-init. See LICENSE file for license information.
-
-from cloudinit.config import cc_locale
-
-from cloudinit import cloud
-from cloudinit import distros
-from cloudinit import helpers
-from cloudinit import util
-
-from cloudinit.sources import DataSourceNoCloud
-
-from cloudinit.tests import helpers as t_help
-
-from configobj import ConfigObj
-
import logging
import os
import shutil
import tempfile
from io import BytesIO
+from configobj import ConfigObj
from unittest import mock
+from cloudinit import util
+from cloudinit.config import cc_locale
+from cloudinit.tests import helpers as t_help
+
+from tests.unittests.util import get_cloud
+
+
LOG = logging.getLogger(__name__)
@@ -33,16 +27,7 @@ class TestLocale(t_help.FilesystemMockingTestCase):
super(TestLocale, self).setUp()
self.new_root = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, self.new_root)
-
- def _get_cloud(self, distro):
self.patchUtils(self.new_root)
- paths = helpers.Paths({})
-
- cls = distros.fetch(distro)
- d = cls(distro, {}, paths)
- ds = DataSourceNoCloud.DataSourceNoCloud({}, d, paths)
- cc = cloud.Cloud(ds, paths, {}, d, None)
- return cc
def test_set_locale_arch(self):
locale = 'en_GB.UTF-8'
@@ -51,7 +36,7 @@ class TestLocale(t_help.FilesystemMockingTestCase):
'locale': locale,
'locale_configfile': locale_configfile,
}
- cc = self._get_cloud('arch')
+ cc = get_cloud('arch')
with mock.patch('cloudinit.distros.arch.subp.subp') as m_subp:
with mock.patch('cloudinit.distros.arch.LOG.warning') as m_LOG:
@@ -72,7 +57,7 @@ class TestLocale(t_help.FilesystemMockingTestCase):
cfg = {
'locale': 'My.Locale',
}
- cc = self._get_cloud('sles')
+ cc = get_cloud('sles')
cc_locale.handle('cc_locale', cfg, cc, LOG, [])
if cc.distro.uses_systemd():
locale_conf = cc.distro.systemd_locale_conf_fn
@@ -87,7 +72,7 @@ class TestLocale(t_help.FilesystemMockingTestCase):
def test_set_locale_sles_default(self):
cfg = {}
- cc = self._get_cloud('sles')
+ cc = get_cloud('sles')
cc_locale.handle('cc_locale', cfg, cc, LOG, [])
if cc.distro.uses_systemd():
@@ -106,7 +91,7 @@ class TestLocale(t_help.FilesystemMockingTestCase):
locale_conf = os.path.join(self.new_root, "etc/default/locale")
util.write_file(locale_conf, 'LANG="en_US.UTF-8"\n')
cfg = {'locale': 'C.UTF-8'}
- cc = self._get_cloud('ubuntu')
+ cc = get_cloud('ubuntu')
with mock.patch('cloudinit.distros.debian.subp.subp') as m_subp:
with mock.patch('cloudinit.distros.debian.LOCALE_CONF_FN',
locale_conf):
@@ -118,7 +103,7 @@ class TestLocale(t_help.FilesystemMockingTestCase):
def test_locale_rhel_defaults_en_us_utf8(self):
"""Test cc_locale gets en_US.UTF-8 from distro get_locale fallback"""
cfg = {}
- cc = self._get_cloud('rhel')
+ cc = get_cloud('rhel')
update_sysconfig = 'cloudinit.distros.rhel_util.update_sysconfig_file'
with mock.patch.object(cc.distro, 'uses_systemd') as m_use_sd:
m_use_sd.return_value = True
diff --git a/tests/unittests/test_handler/test_handler_lxd.py b/tests/unittests/test_handler/test_handler_lxd.py
index b2181992..ea8b6e90 100644
--- a/tests/unittests/test_handler/test_handler_lxd.py
+++ b/tests/unittests/test_handler/test_handler_lxd.py
@@ -1,11 +1,10 @@
# This file is part of cloud-init. See LICENSE file for license information.
+from unittest import mock
from cloudinit.config import cc_lxd
-from cloudinit.sources import DataSourceNoCloud
-from cloudinit import (distros, helpers, cloud)
from cloudinit.tests import helpers as t_help
-from unittest import mock
+from tests.unittests.util import get_cloud
class TestLxd(t_help.CiTestCase):
@@ -22,18 +21,10 @@ class TestLxd(t_help.CiTestCase):
}
}
- def _get_cloud(self, distro):
- cls = distros.fetch(distro)
- paths = helpers.Paths({})
- d = cls(distro, {}, paths)
- ds = DataSourceNoCloud.DataSourceNoCloud({}, d, paths)
- cc = cloud.Cloud(ds, paths, {}, d, None)
- return cc
-
@mock.patch("cloudinit.config.cc_lxd.maybe_cleanup_default")
@mock.patch("cloudinit.config.cc_lxd.subp")
def test_lxd_init(self, mock_subp, m_maybe_clean):
- cc = self._get_cloud('ubuntu')
+ cc = get_cloud()
mock_subp.which.return_value = True
m_maybe_clean.return_value = None
cc_lxd.handle('cc_lxd', self.lxd_cfg, cc, self.logger, [])
@@ -50,7 +41,7 @@ class TestLxd(t_help.CiTestCase):
@mock.patch("cloudinit.config.cc_lxd.maybe_cleanup_default")
@mock.patch("cloudinit.config.cc_lxd.subp")
def test_lxd_install(self, mock_subp, m_maybe_clean):
- cc = self._get_cloud('ubuntu')
+ cc = get_cloud()
cc.distro = mock.MagicMock()
mock_subp.which.return_value = None
cc_lxd.handle('cc_lxd', self.lxd_cfg, cc, self.logger, [])
@@ -64,7 +55,7 @@ class TestLxd(t_help.CiTestCase):
@mock.patch("cloudinit.config.cc_lxd.maybe_cleanup_default")
@mock.patch("cloudinit.config.cc_lxd.subp")
def test_no_init_does_nothing(self, mock_subp, m_maybe_clean):
- cc = self._get_cloud('ubuntu')
+ cc = get_cloud()
cc.distro = mock.MagicMock()
cc_lxd.handle('cc_lxd', {'lxd': {}}, cc, self.logger, [])
self.assertFalse(cc.distro.install_packages.called)
@@ -74,7 +65,7 @@ class TestLxd(t_help.CiTestCase):
@mock.patch("cloudinit.config.cc_lxd.maybe_cleanup_default")
@mock.patch("cloudinit.config.cc_lxd.subp")
def test_no_lxd_does_nothing(self, mock_subp, m_maybe_clean):
- cc = self._get_cloud('ubuntu')
+ cc = get_cloud()
cc.distro = mock.MagicMock()
cc_lxd.handle('cc_lxd', {'package_update': True}, cc, self.logger, [])
self.assertFalse(cc.distro.install_packages.called)
diff --git a/tests/unittests/test_handler/test_handler_mcollective.py b/tests/unittests/test_handler/test_handler_mcollective.py
index 6891e15f..9cda6fbe 100644
--- a/tests/unittests/test_handler/test_handler_mcollective.py
+++ b/tests/unittests/test_handler/test_handler_mcollective.py
@@ -1,11 +1,4 @@
# This file is part of cloud-init. See LICENSE file for license information.
-
-from cloudinit import (cloud, distros, helpers, util)
-from cloudinit.config import cc_mcollective
-from cloudinit.sources import DataSourceNoCloud
-
-from cloudinit.tests import helpers as t_help
-
import configobj
import logging
import os
@@ -13,6 +6,12 @@ import shutil
import tempfile
from io import BytesIO
+from cloudinit import (util)
+from cloudinit.config import cc_mcollective
+from cloudinit.tests import helpers as t_help
+
+from tests.unittests.util import get_cloud
+
LOG = logging.getLogger(__name__)
@@ -128,18 +127,10 @@ class TestConfig(t_help.FilesystemMockingTestCase):
class TestHandler(t_help.TestCase):
- def _get_cloud(self, distro):
- cls = distros.fetch(distro)
- paths = helpers.Paths({})
- d = cls(distro, {}, paths)
- ds = DataSourceNoCloud.DataSourceNoCloud({}, d, paths)
- cc = cloud.Cloud(ds, paths, {}, d, None)
- return cc
-
@t_help.mock.patch("cloudinit.config.cc_mcollective.subp")
@t_help.mock.patch("cloudinit.config.cc_mcollective.util")
def test_mcollective_install(self, mock_util, mock_subp):
- cc = self._get_cloud('ubuntu')
+ cc = get_cloud()
cc.distro = t_help.mock.MagicMock()
mock_util.load_file.return_value = b""
mycfg = {'mcollective': {'conf': {'loglevel': 'debug'}}}
diff --git a/tests/unittests/test_handler/test_handler_ntp.py b/tests/unittests/test_handler/test_handler_ntp.py
index 6b9c8377..b9e2ba57 100644
--- a/tests/unittests/test_handler/test_handler_ntp.py
+++ b/tests/unittests/test_handler/test_handler_ntp.py
@@ -1,17 +1,17 @@
# This file is part of cloud-init. See LICENSE file for license information.
+import copy
+import os
+import shutil
+from functools import partial
+from os.path import dirname
+from cloudinit import (helpers, util)
from cloudinit.config import cc_ntp
-from cloudinit.sources import DataSourceNone
-from cloudinit import (distros, helpers, cloud, util)
-
from cloudinit.tests.helpers import (
CiTestCase, FilesystemMockingTestCase, mock, skipUnlessJsonSchema)
+from tests.unittests.util import get_cloud
-import copy
-import os
-from os.path import dirname
-import shutil
NTP_TEMPLATE = """\
## template: jinja
@@ -39,16 +39,11 @@ class TestNtp(FilesystemMockingTestCase):
self.m_snappy.return_value = False
self.add_patch('cloudinit.util.system_info', 'm_sysinfo')
self.m_sysinfo.return_value = {'dist': ('Distro', '99.1', 'Codename')}
-
- def _get_cloud(self, distro, sys_cfg=None):
- self.new_root = self.reRoot(root=self.new_root)
- paths = helpers.Paths({'templates_dir': self.new_root})
- cls = distros.fetch(distro)
- if not sys_cfg:
- sys_cfg = {}
- mydist = cls(distro, sys_cfg, paths)
- myds = DataSourceNone.DataSourceNone(sys_cfg, mydist, paths)
- return cloud.Cloud(myds, paths, sys_cfg, mydist, None)
+ self.new_root = self.reRoot()
+ self._get_cloud = partial(
+ get_cloud,
+ paths=helpers.Paths({'templates_dir': self.new_root})
+ )
def _get_template_path(self, template_name, distro, basepath=None):
# ntp.conf.{distro} -> ntp.conf.debian.tmpl
@@ -112,22 +107,6 @@ class TestNtp(FilesystemMockingTestCase):
check_exe='timesyncd')
install_func.assert_called_once_with([])
- @mock.patch("cloudinit.config.cc_ntp.subp")
- def test_reload_ntp_defaults(self, mock_subp):
- """Test service is restarted/reloaded (defaults)"""
- service = 'ntp_service_name'
- cmd = ['service', service, 'restart']
- cc_ntp.reload_ntp(service)
- mock_subp.subp.assert_called_with(cmd, capture=True)
-
- @mock.patch("cloudinit.config.cc_ntp.subp")
- def test_reload_ntp_systemd(self, mock_subp):
- """Test service is restarted/reloaded (systemd)"""
- service = 'ntp_service_name'
- cc_ntp.reload_ntp(service, systemd=True)
- cmd = ['systemctl', 'reload-or-restart', service]
- mock_subp.subp.assert_called_with(cmd, capture=True)
-
def test_ntp_rename_ntp_conf(self):
"""When NTP_CONF exists, rename_ntp moves it."""
ntpconf = self.tmp_path("ntp.conf", self.new_root)
@@ -488,10 +467,11 @@ class TestNtp(FilesystemMockingTestCase):
cc_ntp.handle('notimportant', cfg, mycloud, None, None)
self.assertEqual(0, m_select.call_count)
+ @mock.patch("cloudinit.distros.subp")
@mock.patch("cloudinit.config.cc_ntp.subp")
@mock.patch('cloudinit.config.cc_ntp.select_ntp_client')
@mock.patch("cloudinit.distros.Distro.uses_systemd")
- def test_ntp_the_whole_package(self, m_sysd, m_select, m_subp):
+ def test_ntp_the_whole_package(self, m_sysd, m_select, m_subp, m_dsubp):
"""Test enabled config renders template, and restarts service """
cfg = {'ntp': {'enabled': True}}
for distro in cc_ntp.distros:
@@ -509,7 +489,7 @@ class TestNtp(FilesystemMockingTestCase):
if distro == 'alpine':
uses_systemd = False
- expected_service_call = ['service', service_name, 'restart']
+ expected_service_call = ['rc-service', service_name, 'restart']
# _mock_ntp_client_config call above did not specify a client
# value and so it defaults to "ntp" which on Alpine Linux only
# supports servers and not pools.
@@ -525,7 +505,7 @@ class TestNtp(FilesystemMockingTestCase):
m_util.is_false.return_value = util.is_false(
cfg['ntp']['enabled'])
cc_ntp.handle('notimportant', cfg, mycloud, None, None)
- m_subp.subp.assert_called_with(
+ m_dsubp.subp.assert_called_with(
expected_service_call, capture=True)
self.assertEqual(expected_content, util.load_file(confpath))
@@ -673,9 +653,8 @@ class TestNtp(FilesystemMockingTestCase):
self.assertEqual(sorted(expected_cfg), sorted(result))
m_which.assert_has_calls([])
- @mock.patch('cloudinit.config.cc_ntp.reload_ntp')
@mock.patch('cloudinit.config.cc_ntp.install_ntp_client')
- def test_ntp_user_provided_config_with_template(self, m_install, m_reload):
+ def test_ntp_user_provided_config_with_template(self, m_install):
custom = r'\n#MyCustomTemplate'
user_template = NTP_TEMPLATE + custom
confpath = os.path.join(self.new_root, 'etc/myntp/myntp.conf')
@@ -702,11 +681,10 @@ class TestNtp(FilesystemMockingTestCase):
util.load_file(confpath))
@mock.patch('cloudinit.config.cc_ntp.supplemental_schema_validation')
- @mock.patch('cloudinit.config.cc_ntp.reload_ntp')
@mock.patch('cloudinit.config.cc_ntp.install_ntp_client')
@mock.patch('cloudinit.config.cc_ntp.select_ntp_client')
def test_ntp_user_provided_config_template_only(self, m_select, m_install,
- m_reload, m_schema):
+ m_schema):
"""Test custom template for default client"""
custom = r'\n#MyCustomTemplate'
user_template = NTP_TEMPLATE + custom
diff --git a/tests/unittests/test_handler/test_handler_puppet.py b/tests/unittests/test_handler/test_handler_puppet.py
index b7891ab4..8d99f535 100644
--- a/tests/unittests/test_handler/test_handler_puppet.py
+++ b/tests/unittests/test_handler/test_handler_puppet.py
@@ -1,13 +1,12 @@
# This file is part of cloud-init. See LICENSE file for license information.
+import logging
+import textwrap
from cloudinit.config import cc_puppet
-from cloudinit.sources import DataSourceNone
-from cloudinit import (distros, helpers, cloud, util)
+from cloudinit import util
from cloudinit.tests.helpers import CiTestCase, HttprettyTestCase, mock
-import logging
-import textwrap
-
+from tests.unittests.util import get_cloud
LOG = logging.getLogger(__name__)
@@ -65,19 +64,13 @@ class TestPuppetHandle(CiTestCase):
self.conf = self.tmp_path('puppet.conf')
self.csr_attributes_path = self.tmp_path(
'csr_attributes.yaml')
-
- def _get_cloud(self, distro):
- paths = helpers.Paths({'templates_dir': self.new_root})
- cls = distros.fetch(distro)
- mydist = cls(distro, {}, paths)
- myds = DataSourceNone.DataSourceNone({}, mydist, paths)
- return cloud.Cloud(myds, paths, {}, mydist, None)
+ self.cloud = get_cloud()
def test_skips_missing_puppet_key_in_cloudconfig(self, m_auto):
"""Cloud-config containing no 'puppet' key is skipped."""
- mycloud = self._get_cloud('ubuntu')
+
cfg = {}
- cc_puppet.handle('notimportant', cfg, mycloud, LOG, None)
+ cc_puppet.handle('notimportant', cfg, self.cloud, LOG, None)
self.assertIn(
"no 'puppet' configuration found", self.logs.getvalue())
self.assertEqual(0, m_auto.call_count)
@@ -85,9 +78,9 @@ class TestPuppetHandle(CiTestCase):
@mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", ""))
def test_puppet_config_starts_puppet_service(self, m_subp, m_auto):
"""Cloud-config 'puppet' configuration starts puppet."""
- mycloud = self._get_cloud('ubuntu')
+
cfg = {'puppet': {'install': False}}
- cc_puppet.handle('notimportant', cfg, mycloud, LOG, None)
+ cc_puppet.handle('notimportant', cfg, self.cloud, LOG, None)
self.assertEqual(1, m_auto.call_count)
self.assertIn(
[mock.call(['service', 'puppet', 'start'], capture=False)],
@@ -96,34 +89,34 @@ class TestPuppetHandle(CiTestCase):
@mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", ""))
def test_empty_puppet_config_installs_puppet(self, m_subp, m_auto):
"""Cloud-config empty 'puppet' configuration installs latest puppet."""
- mycloud = self._get_cloud('ubuntu')
- mycloud.distro = mock.MagicMock()
+
+ self.cloud.distro = mock.MagicMock()
cfg = {'puppet': {}}
- cc_puppet.handle('notimportant', cfg, mycloud, LOG, None)
+ cc_puppet.handle('notimportant', cfg, self.cloud, LOG, None)
self.assertEqual(
[mock.call(('puppet', None))],
- mycloud.distro.install_packages.call_args_list)
+ self.cloud.distro.install_packages.call_args_list)
@mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", ""))
def test_puppet_config_installs_puppet_on_true(self, m_subp, _):
"""Cloud-config with 'puppet' key installs when 'install' is True."""
- mycloud = self._get_cloud('ubuntu')
- mycloud.distro = mock.MagicMock()
+
+ self.cloud.distro = mock.MagicMock()
cfg = {'puppet': {'install': True}}
- cc_puppet.handle('notimportant', cfg, mycloud, LOG, None)
+ cc_puppet.handle('notimportant', cfg, self.cloud, LOG, None)
self.assertEqual(
[mock.call(('puppet', None))],
- mycloud.distro.install_packages.call_args_list)
+ self.cloud.distro.install_packages.call_args_list)
@mock.patch('cloudinit.config.cc_puppet.install_puppet_aio', autospec=True)
@mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", ""))
def test_puppet_config_installs_puppet_aio(self, m_subp, m_aio, _):
"""Cloud-config with 'puppet' key installs
when 'install_type' is 'aio'."""
- mycloud = self._get_cloud('ubuntu')
- mycloud.distro = mock.MagicMock()
+
+ self.cloud.distro = mock.MagicMock()
cfg = {'puppet': {'install': True, 'install_type': 'aio'}}
- cc_puppet.handle('notimportant', cfg, mycloud, LOG, None)
+ cc_puppet.handle('notimportant', cfg, self.cloud, LOG, None)
m_aio.assert_called_with(
cc_puppet.AIO_INSTALL_URL,
None, None, True)
@@ -134,11 +127,11 @@ class TestPuppetHandle(CiTestCase):
m_subp, m_aio, _):
"""Cloud-config with 'puppet' key installs
when 'install_type' is 'aio' and 'version' is specified."""
- mycloud = self._get_cloud('ubuntu')
- mycloud.distro = mock.MagicMock()
+
+ self.cloud.distro = mock.MagicMock()
cfg = {'puppet': {'install': True,
'version': '6.24.0', 'install_type': 'aio'}}
- cc_puppet.handle('notimportant', cfg, mycloud, LOG, None)
+ cc_puppet.handle('notimportant', cfg, self.cloud, LOG, None)
m_aio.assert_called_with(
cc_puppet.AIO_INSTALL_URL,
'6.24.0', None, True)
@@ -150,11 +143,11 @@ class TestPuppetHandle(CiTestCase):
m_aio, _):
"""Cloud-config with 'puppet' key installs
when 'install_type' is 'aio' and 'collection' is specified."""
- mycloud = self._get_cloud('ubuntu')
- mycloud.distro = mock.MagicMock()
+
+ self.cloud.distro = mock.MagicMock()
cfg = {'puppet': {'install': True,
'collection': 'puppet6', 'install_type': 'aio'}}
- cc_puppet.handle('notimportant', cfg, mycloud, LOG, None)
+ cc_puppet.handle('notimportant', cfg, self.cloud, LOG, None)
m_aio.assert_called_with(
cc_puppet.AIO_INSTALL_URL,
None, 'puppet6', True)
@@ -166,13 +159,13 @@ class TestPuppetHandle(CiTestCase):
m_aio, _):
"""Cloud-config with 'puppet' key installs
when 'install_type' is 'aio' and 'aio_install_url' is specified."""
- mycloud = self._get_cloud('ubuntu')
- mycloud.distro = mock.MagicMock()
+
+ self.cloud.distro = mock.MagicMock()
cfg = {'puppet':
{'install': True,
'aio_install_url': 'http://test.url/path/to/script.sh',
'install_type': 'aio'}}
- cc_puppet.handle('notimportant', cfg, mycloud, LOG, None)
+ cc_puppet.handle('notimportant', cfg, self.cloud, LOG, None)
m_aio.assert_called_with(
'http://test.url/path/to/script.sh', None, None, True)
@@ -183,11 +176,11 @@ class TestPuppetHandle(CiTestCase):
m_aio, _):
"""Cloud-config with 'puppet' key installs
when 'install_type' is 'aio' and no cleanup."""
- mycloud = self._get_cloud('ubuntu')
- mycloud.distro = mock.MagicMock()
+
+ self.cloud.distro = mock.MagicMock()
cfg = {'puppet': {'install': True,
'cleanup': False, 'install_type': 'aio'}}
- cc_puppet.handle('notimportant', cfg, mycloud, LOG, None)
+ cc_puppet.handle('notimportant', cfg, self.cloud, LOG, None)
m_aio.assert_called_with(
cc_puppet.AIO_INSTALL_URL,
None, None, False)
@@ -195,13 +188,13 @@ class TestPuppetHandle(CiTestCase):
@mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", ""))
def test_puppet_config_installs_puppet_version(self, m_subp, _):
"""Cloud-config 'puppet' configuration can specify a version."""
- mycloud = self._get_cloud('ubuntu')
- mycloud.distro = mock.MagicMock()
+
+ self.cloud.distro = mock.MagicMock()
cfg = {'puppet': {'version': '3.8'}}
- cc_puppet.handle('notimportant', cfg, mycloud, LOG, None)
+ cc_puppet.handle('notimportant', cfg, self.cloud, LOG, None)
self.assertEqual(
[mock.call(('puppet', '3.8'))],
- mycloud.distro.install_packages.call_args_list)
+ self.cloud.distro.install_packages.call_args_list)
@mock.patch('cloudinit.config.cc_puppet.get_config_value')
@mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", ""))
@@ -213,14 +206,14 @@ class TestPuppetHandle(CiTestCase):
return self.conf
m_default.side_effect = _fake_get_config_value
- mycloud = self._get_cloud('ubuntu')
+
cfg = {
'puppet': {
'conf': {'agent': {'server': 'puppetserver.example.org'}}}}
util.write_file(
self.conf, '[agent]\nserver = origpuppet\nother = 3')
- mycloud.distro = mock.MagicMock()
- cc_puppet.handle('notimportant', cfg, mycloud, LOG, None)
+ self.cloud.distro = mock.MagicMock()
+ cc_puppet.handle('notimportant', cfg, self.cloud, LOG, None)
content = util.load_file(self.conf)
expected = '[agent]\nserver = puppetserver.example.org\nother = 3\n\n'
self.assertEqual(expected, content)
@@ -236,8 +229,8 @@ class TestPuppetHandle(CiTestCase):
return self.csr_attributes_path
m_default.side_effect = _fake_get_config_value
- mycloud = self._get_cloud('ubuntu')
- mycloud.distro = mock.MagicMock()
+
+ self.cloud.distro = mock.MagicMock()
cfg = {
'puppet': {
'csr_attributes': {
@@ -254,7 +247,7 @@ class TestPuppetHandle(CiTestCase):
}
}
}
- cc_puppet.handle('notimportant', cfg, mycloud, LOG, None)
+ cc_puppet.handle('notimportant', cfg, self.cloud, LOG, None)
content = util.load_file(self.csr_attributes_path)
expected = textwrap.dedent("""\
custom_attributes:
@@ -269,22 +262,44 @@ class TestPuppetHandle(CiTestCase):
@mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", ""))
def test_puppet_runs_puppet_if_requested(self, m_subp, m_auto):
"""Run puppet with default args if 'exec' is set to True."""
- mycloud = self._get_cloud('ubuntu')
+
cfg = {'puppet': {'exec': True}}
- cc_puppet.handle('notimportant', cfg, mycloud, LOG, None)
+ cc_puppet.handle('notimportant', cfg, self.cloud, LOG, None)
self.assertEqual(1, m_auto.call_count)
self.assertIn(
[mock.call(['puppet', 'agent', '--test'], capture=False)],
m_subp.call_args_list)
@mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", ""))
+ def test_puppet_starts_puppetd(self, m_subp, m_auto):
+ """Run puppet with default args if 'exec' is set to True."""
+
+ cfg = {'puppet': {}}
+ cc_puppet.handle('notimportant', cfg, self.cloud, LOG, None)
+ self.assertEqual(1, m_auto.call_count)
+ self.assertIn(
+ [mock.call(['service', 'puppet', 'start'], capture=False)],
+ m_subp.call_args_list)
+
+ @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", ""))
+ def test_puppet_skips_puppetd(self, m_subp, m_auto):
+ """Run puppet with default args if 'exec' is set to True."""
+
+ cfg = {'puppet': {'start_service': False}}
+ cc_puppet.handle('notimportant', cfg, self.cloud, LOG, None)
+ self.assertEqual(0, m_auto.call_count)
+ self.assertNotIn(
+ [mock.call(['service', 'puppet', 'start'], capture=False)],
+ m_subp.call_args_list)
+
+ @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", ""))
def test_puppet_runs_puppet_with_args_list_if_requested(self,
m_subp, m_auto):
"""Run puppet with 'exec_args' list if 'exec' is set to True."""
- mycloud = self._get_cloud('ubuntu')
+
cfg = {'puppet': {'exec': True, 'exec_args': [
'--onetime', '--detailed-exitcodes']}}
- cc_puppet.handle('notimportant', cfg, mycloud, LOG, None)
+ cc_puppet.handle('notimportant', cfg, self.cloud, LOG, None)
self.assertEqual(1, m_auto.call_count)
self.assertIn(
[mock.call(
@@ -296,10 +311,10 @@ class TestPuppetHandle(CiTestCase):
def test_puppet_runs_puppet_with_args_string_if_requested(self,
m_subp, m_auto):
"""Run puppet with 'exec_args' string if 'exec' is set to True."""
- mycloud = self._get_cloud('ubuntu')
+
cfg = {'puppet': {'exec': True,
'exec_args': '--onetime --detailed-exitcodes'}}
- cc_puppet.handle('notimportant', cfg, mycloud, LOG, None)
+ cc_puppet.handle('notimportant', cfg, self.cloud, LOG, None)
self.assertEqual(1, m_auto.call_count)
self.assertIn(
[mock.call(
diff --git a/tests/unittests/test_handler/test_handler_runcmd.py b/tests/unittests/test_handler/test_handler_runcmd.py
index 73237d68..672e8093 100644
--- a/tests/unittests/test_handler/test_handler_runcmd.py
+++ b/tests/unittests/test_handler/test_handler_runcmd.py
@@ -1,15 +1,16 @@
# This file is part of cloud-init. See LICENSE file for license information.
+import logging
+import os
+import stat
+from unittest.mock import patch
from cloudinit.config.cc_runcmd import handle, schema
-from cloudinit.sources import DataSourceNone
-from cloudinit import (distros, helpers, cloud, subp, util)
+from cloudinit import (helpers, subp, util)
from cloudinit.tests.helpers import (
CiTestCase, FilesystemMockingTestCase, SchemaTestCaseMixin,
skipUnlessJsonSchema)
-import logging
-import os
-import stat
+from tests.unittests.util import get_cloud
LOG = logging.getLogger(__name__)
@@ -22,34 +23,39 @@ class TestRuncmd(FilesystemMockingTestCase):
super(TestRuncmd, self).setUp()
self.subp = subp.subp
self.new_root = self.tmp_dir()
-
- def _get_cloud(self, distro):
self.patchUtils(self.new_root)
- paths = helpers.Paths({'scripts': self.new_root})
- cls = distros.fetch(distro)
- mydist = cls(distro, {}, paths)
- myds = DataSourceNone.DataSourceNone({}, mydist, paths)
- paths.datasource = myds
- return cloud.Cloud(myds, paths, {}, mydist, None)
+ self.paths = helpers.Paths({'scripts': self.new_root})
def test_handler_skip_if_no_runcmd(self):
"""When the provided config doesn't contain runcmd, skip it."""
cfg = {}
- mycloud = self._get_cloud('ubuntu')
+ mycloud = get_cloud(paths=self.paths)
handle('notimportant', cfg, mycloud, LOG, None)
self.assertIn(
"Skipping module named notimportant, no 'runcmd' key",
self.logs.getvalue())
+ @patch('cloudinit.util.shellify')
+ def test_runcmd_shellify_fails(self, cls):
+ """When shellify fails throw exception"""
+ cls.side_effect = TypeError("patched shellify")
+ valid_config = {'runcmd': ['echo 42']}
+ cc = get_cloud(paths=self.paths)
+ with self.assertRaises(TypeError) as cm:
+ with self.allow_subp(['/bin/sh']):
+ handle('cc_runcmd', valid_config, cc, LOG, None)
+ self.assertIn("Failed to shellify", str(cm.exception))
+
def test_handler_invalid_command_set(self):
"""Commands which can't be converted to shell will raise errors."""
invalid_config = {'runcmd': 1}
- cc = self._get_cloud('ubuntu')
- handle('cc_runcmd', invalid_config, cc, LOG, [])
+ cc = get_cloud(paths=self.paths)
+ with self.assertRaises(TypeError) as cm:
+ handle('cc_runcmd', invalid_config, cc, LOG, [])
self.assertIn(
'Failed to shellify 1 into file'
' /var/lib/cloud/instances/iid-datasource-none/scripts/runcmd',
- self.logs.getvalue())
+ str(cm.exception))
@skipUnlessJsonSchema()
def test_handler_schema_validation_warns_non_array_type(self):
@@ -59,12 +65,13 @@ class TestRuncmd(FilesystemMockingTestCase):
invalid content.
"""
invalid_config = {'runcmd': 1}
- cc = self._get_cloud('ubuntu')
- handle('cc_runcmd', invalid_config, cc, LOG, [])
+ cc = get_cloud(paths=self.paths)
+ with self.assertRaises(TypeError) as cm:
+ handle('cc_runcmd', invalid_config, cc, LOG, [])
self.assertIn(
'Invalid config:\nruncmd: 1 is not of type \'array\'',
self.logs.getvalue())
- self.assertIn('Failed to shellify', self.logs.getvalue())
+ self.assertIn('Failed to shellify', str(cm.exception))
@skipUnlessJsonSchema()
def test_handler_schema_validation_warns_non_array_item_type(self):
@@ -75,8 +82,9 @@ class TestRuncmd(FilesystemMockingTestCase):
"""
invalid_config = {
'runcmd': ['ls /', 20, ['wget', 'http://stuff/blah'], {'a': 'n'}]}
- cc = self._get_cloud('ubuntu')
- handle('cc_runcmd', invalid_config, cc, LOG, [])
+ cc = get_cloud(paths=self.paths)
+ with self.assertRaises(TypeError) as cm:
+ handle('cc_runcmd', invalid_config, cc, LOG, [])
expected_warnings = [
'runcmd.1: 20 is not valid under any of the given schemas',
'runcmd.3: {\'a\': \'n\'} is not valid under any of the given'
@@ -85,12 +93,12 @@ class TestRuncmd(FilesystemMockingTestCase):
logs = self.logs.getvalue()
for warning in expected_warnings:
self.assertIn(warning, logs)
- self.assertIn('Failed to shellify', logs)
+ self.assertIn('Failed to shellify', str(cm.exception))
def test_handler_write_valid_runcmd_schema_to_file(self):
"""Valid runcmd schema is written to a runcmd shell script."""
valid_config = {'runcmd': [['ls', '/']]}
- cc = self._get_cloud('ubuntu')
+ cc = get_cloud(paths=self.paths)
handle('cc_runcmd', valid_config, cc, LOG, [])
runcmd_file = os.path.join(
self.new_root,
diff --git a/tests/unittests/test_handler/test_handler_seed_random.py b/tests/unittests/test_handler/test_handler_seed_random.py
index 85167f19..2ab153d2 100644
--- a/tests/unittests/test_handler/test_handler_seed_random.py
+++ b/tests/unittests/test_handler/test_handler_seed_random.py
@@ -7,24 +7,17 @@
# Based on test_handler_set_hostname.py
#
# This file is part of cloud-init. See LICENSE file for license information.
-
-from cloudinit.config import cc_seed_random
-
import gzip
+import logging
import tempfile
from io import BytesIO
-from cloudinit import cloud
-from cloudinit import distros
-from cloudinit import helpers
from cloudinit import subp
from cloudinit import util
-
-from cloudinit.sources import DataSourceNone
-
+from cloudinit.config import cc_seed_random
from cloudinit.tests import helpers as t_help
-import logging
+from tests.unittests.util import get_cloud
LOG = logging.getLogger(__name__)
@@ -66,15 +59,6 @@ class TestRandomSeed(t_help.TestCase):
gz_fh.close()
return contents.getvalue()
- def _get_cloud(self, distro, metadata=None):
- paths = helpers.Paths({})
- cls = distros.fetch(distro)
- ubuntu_distro = cls(distro, {}, paths)
- ds = DataSourceNone.DataSourceNone({}, ubuntu_distro, paths)
- if metadata:
- ds.metadata = metadata
- return cloud.Cloud(ds, paths, {}, ubuntu_distro, None)
-
def test_append_random(self):
cfg = {
'random_seed': {
@@ -82,7 +66,7 @@ class TestRandomSeed(t_help.TestCase):
'data': 'tiny-tim-was-here',
}
}
- cc_seed_random.handle('test', cfg, self._get_cloud('ubuntu'), LOG, [])
+ cc_seed_random.handle('test', cfg, get_cloud('ubuntu'), LOG, [])
contents = util.load_file(self._seed_file)
self.assertEqual("tiny-tim-was-here", contents)
@@ -96,7 +80,7 @@ class TestRandomSeed(t_help.TestCase):
}
}
self.assertRaises(IOError, cc_seed_random.handle, 'test', cfg,
- self._get_cloud('ubuntu'), LOG, [])
+ get_cloud('ubuntu'), LOG, [])
def test_append_random_gzip(self):
data = self._compress(b"tiny-toe")
@@ -107,7 +91,7 @@ class TestRandomSeed(t_help.TestCase):
'encoding': 'gzip',
}
}
- cc_seed_random.handle('test', cfg, self._get_cloud('ubuntu'), LOG, [])
+ cc_seed_random.handle('test', cfg, get_cloud('ubuntu'), LOG, [])
contents = util.load_file(self._seed_file)
self.assertEqual("tiny-toe", contents)
@@ -120,7 +104,7 @@ class TestRandomSeed(t_help.TestCase):
'encoding': 'gz',
}
}
- cc_seed_random.handle('test', cfg, self._get_cloud('ubuntu'), LOG, [])
+ cc_seed_random.handle('test', cfg, get_cloud('ubuntu'), LOG, [])
contents = util.load_file(self._seed_file)
self.assertEqual("big-toe", contents)
@@ -133,7 +117,7 @@ class TestRandomSeed(t_help.TestCase):
'encoding': 'base64',
}
}
- cc_seed_random.handle('test', cfg, self._get_cloud('ubuntu'), LOG, [])
+ cc_seed_random.handle('test', cfg, get_cloud('ubuntu'), LOG, [])
contents = util.load_file(self._seed_file)
self.assertEqual("bubbles", contents)
@@ -146,7 +130,7 @@ class TestRandomSeed(t_help.TestCase):
'encoding': 'b64',
}
}
- cc_seed_random.handle('test', cfg, self._get_cloud('ubuntu'), LOG, [])
+ cc_seed_random.handle('test', cfg, get_cloud('ubuntu'), LOG, [])
contents = util.load_file(self._seed_file)
self.assertEqual("kit-kat", contents)
@@ -157,13 +141,13 @@ class TestRandomSeed(t_help.TestCase):
'data': 'tiny-tim-was-here',
}
}
- c = self._get_cloud('ubuntu', {'random_seed': '-so-was-josh'})
+ c = get_cloud('ubuntu', metadata={'random_seed': '-so-was-josh'})
cc_seed_random.handle('test', cfg, c, LOG, [])
contents = util.load_file(self._seed_file)
self.assertEqual('tiny-tim-was-here-so-was-josh', contents)
def test_seed_command_provided_and_available(self):
- c = self._get_cloud('ubuntu', {})
+ c = get_cloud('ubuntu')
self.whichdata = {'pollinate': '/usr/bin/pollinate'}
cfg = {'random_seed': {'command': ['pollinate', '-q']}}
cc_seed_random.handle('test', cfg, c, LOG, [])
@@ -172,7 +156,7 @@ class TestRandomSeed(t_help.TestCase):
self.assertIn(['pollinate', '-q'], subp_args)
def test_seed_command_not_provided(self):
- c = self._get_cloud('ubuntu', {})
+ c = get_cloud('ubuntu')
self.whichdata = {}
cc_seed_random.handle('test', {}, c, LOG, [])
@@ -180,7 +164,7 @@ class TestRandomSeed(t_help.TestCase):
self.assertFalse(self.subp_called)
def test_unavailable_seed_command_and_required_raises_error(self):
- c = self._get_cloud('ubuntu', {})
+ c = get_cloud('ubuntu')
self.whichdata = {}
cfg = {'random_seed': {'command': ['THIS_NO_COMMAND'],
'command_required': True}}
@@ -188,7 +172,7 @@ class TestRandomSeed(t_help.TestCase):
'test', cfg, c, LOG, [])
def test_seed_command_and_required(self):
- c = self._get_cloud('ubuntu', {})
+ c = get_cloud('ubuntu')
self.whichdata = {'foo': 'foo'}
cfg = {'random_seed': {'command_required': True, 'command': ['foo']}}
cc_seed_random.handle('test', cfg, c, LOG, [])
@@ -196,7 +180,7 @@ class TestRandomSeed(t_help.TestCase):
self.assertIn(['foo'], [f['args'] for f in self.subp_called])
def test_file_in_environment_for_command(self):
- c = self._get_cloud('ubuntu', {})
+ c = get_cloud('ubuntu')
self.whichdata = {'foo': 'foo'}
cfg = {'random_seed': {'command_required': True, 'command': ['foo'],
'file': self._seed_file}}
diff --git a/tests/unittests/test_handler/test_handler_timezone.py b/tests/unittests/test_handler/test_handler_timezone.py
index 50c45363..77cdb0c2 100644
--- a/tests/unittests/test_handler/test_handler_timezone.py
+++ b/tests/unittests/test_handler/test_handler_timezone.py
@@ -6,21 +6,19 @@
from cloudinit.config import cc_timezone
-from cloudinit import cloud
-from cloudinit import distros
-from cloudinit import helpers
from cloudinit import util
-from cloudinit.sources import DataSourceNoCloud
-from cloudinit.tests import helpers as t_help
-
-from configobj import ConfigObj
import logging
import shutil
import tempfile
+from configobj import ConfigObj
from io import BytesIO
+from cloudinit.tests import helpers as t_help
+
+from tests.unittests.util import get_cloud
+
LOG = logging.getLogger(__name__)
@@ -29,25 +27,15 @@ class TestTimezone(t_help.FilesystemMockingTestCase):
super(TestTimezone, self).setUp()
self.new_root = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, self.new_root)
-
- def _get_cloud(self, distro):
self.patchUtils(self.new_root)
self.patchOS(self.new_root)
- paths = helpers.Paths({})
-
- cls = distros.fetch(distro)
- d = cls(distro, {}, paths)
- ds = DataSourceNoCloud.DataSourceNoCloud({}, d, paths)
- cc = cloud.Cloud(ds, paths, {}, d, None)
- return cc
-
def test_set_timezone_sles(self):
cfg = {
'timezone': 'Tatooine/Bestine',
}
- cc = self._get_cloud('sles')
+ cc = get_cloud('sles')
# Create a dummy timezone file
dummy_contents = '0123456789abcdefgh'
diff --git a/tests/unittests/test_handler/test_handler_write_files.py b/tests/unittests/test_handler/test_handler_write_files.py
index 727681d3..0af92805 100644
--- a/tests/unittests/test_handler/test_handler_write_files.py
+++ b/tests/unittests/test_handler/test_handler_write_files.py
@@ -189,6 +189,19 @@ class TestWriteFiles(FilesystemMockingTestCase):
len(gz_aliases + gz_b64_aliases + b64_aliases) * len(datum))
self.assertEqual(len(expected), flen_expected)
+ def test_deferred(self):
+ self.patchUtils(self.tmp)
+ file_path = '/tmp/deferred.file'
+ config = {
+ 'write_files': [
+ {'path': file_path, 'defer': True}
+ ]
+ }
+ cc = self.tmp_cloud('ubuntu')
+ handle('cc_write_file', config, cc, LOG, [])
+ with self.assertRaises(FileNotFoundError):
+ util.load_file(file_path)
+
class TestDecodePerms(CiTestCase):
diff --git a/tests/unittests/test_handler/test_handler_write_files_deferred.py b/tests/unittests/test_handler/test_handler_write_files_deferred.py
new file mode 100644
index 00000000..57b6934a
--- /dev/null
+++ b/tests/unittests/test_handler/test_handler_write_files_deferred.py
@@ -0,0 +1,77 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import tempfile
+import shutil
+
+from cloudinit.config.cc_write_files_deferred import (handle)
+from .test_handler_write_files import (VALID_SCHEMA)
+from cloudinit import log as logging
+from cloudinit import util
+
+from cloudinit.tests.helpers import (
+ CiTestCase, FilesystemMockingTestCase, mock, skipUnlessJsonSchema)
+
+LOG = logging.getLogger(__name__)
+
+
+@skipUnlessJsonSchema()
+@mock.patch('cloudinit.config.cc_write_files_deferred.write_files')
+class TestWriteFilesDeferredSchema(CiTestCase):
+
+ with_logs = True
+
+ def test_schema_validation_warns_invalid_value(self,
+ m_write_files_deferred):
+ """If 'defer' is defined, it must be of type 'bool'."""
+
+ valid_config = {
+ 'write_files': [
+ {**VALID_SCHEMA.get('write_files')[0], 'defer': True}
+ ]
+ }
+
+ invalid_config = {
+ 'write_files': [
+ {**VALID_SCHEMA.get('write_files')[0], 'defer': str('no')}
+ ]
+ }
+
+ cc = self.tmp_cloud('ubuntu')
+ handle('cc_write_files_deferred', valid_config, cc, LOG, [])
+ self.assertNotIn('Invalid config:', self.logs.getvalue())
+ handle('cc_write_files_deferred', invalid_config, cc, LOG, [])
+ self.assertIn('Invalid config:', self.logs.getvalue())
+ self.assertIn("defer: 'no' is not of type 'boolean'",
+ self.logs.getvalue())
+
+
+class TestWriteFilesDeferred(FilesystemMockingTestCase):
+
+ with_logs = True
+
+ def setUp(self):
+ super(TestWriteFilesDeferred, self).setUp()
+ self.tmp = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, self.tmp)
+
+ def test_filtering_deferred_files(self):
+ self.patchUtils(self.tmp)
+ expected = "hello world\n"
+ config = {
+ 'write_files': [
+ {
+ 'path': '/tmp/deferred.file',
+ 'defer': True,
+ 'content': expected
+ },
+ {'path': '/tmp/not_deferred.file'}
+ ]
+ }
+ cc = self.tmp_cloud('ubuntu')
+ handle('cc_write_files_deferred', config, cc, LOG, [])
+ self.assertEqual(util.load_file('/tmp/deferred.file'), expected)
+ with self.assertRaises(FileNotFoundError):
+ util.load_file('/tmp/not_deferred.file')
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/test_handler/test_schema.py b/tests/unittests/test_handler/test_schema.py
index 15aa77bb..1dae223d 100644
--- a/tests/unittests/test_handler/test_schema.py
+++ b/tests/unittests/test_handler/test_schema.py
@@ -10,7 +10,6 @@ from cloudinit.tests.helpers import CiTestCase, mock, skipUnlessJsonSchema
from copy import copy
import itertools
-import os
import pytest
from pathlib import Path
from textwrap import dedent
@@ -35,8 +34,10 @@ class GetSchemaTest(CiTestCase):
'cc_ubuntu_advantage',
'cc_ubuntu_drivers',
'cc_write_files',
+ 'cc_write_files_deferred',
'cc_zypper_add_repo',
- 'cc_chef'
+ 'cc_chef',
+ 'cc_install_hotplug',
],
[subschema['id'] for subschema in schema['allOf']])
self.assertEqual('cloud-config-schema', schema['id'])
@@ -109,11 +110,11 @@ class ValidateCloudConfigSchemaTest(CiTestCase):
def test_validateconfig_schema_honors_formats(self):
"""With strict True, validate_cloudconfig_schema errors on format."""
schema = {
- 'properties': {'p1': {'type': 'string', 'format': 'hostname'}}}
+ 'properties': {'p1': {'type': 'string', 'format': 'email'}}}
with self.assertRaises(SchemaValidationError) as context_mgr:
validate_cloudconfig_schema({'p1': '-1'}, schema, strict=True)
self.assertEqual(
- "Cloud config schema errors: p1: '-1' is not a 'hostname'",
+ "Cloud config schema errors: p1: '-1' is not a 'email'",
str(context_mgr.exception))
@@ -190,12 +191,12 @@ class ValidateCloudConfigFileTest(CiTestCase):
def test_validateconfig_file_sctrictly_validates_schema(self):
"""validate_cloudconfig_file raises errors on invalid schema."""
schema = {
- 'properties': {'p1': {'type': 'string', 'format': 'hostname'}}}
- write_file(self.config_file, '#cloud-config\np1: "-1"')
+ 'properties': {'p1': {'type': 'string', 'format': 'string'}}}
+ write_file(self.config_file, '#cloud-config\np1: -1')
with self.assertRaises(SchemaValidationError) as context_mgr:
validate_cloudconfig_file(self.config_file, schema)
self.assertEqual(
- "Cloud config schema errors: p1: '-1' is not a 'hostname'",
+ "Cloud config schema errors: p1: -1 is not of type 'string'",
str(context_mgr.exception))
@@ -493,46 +494,6 @@ class TestMain:
assert expected == err
-class CloudTestsIntegrationTest(CiTestCase):
- """Validate all cloud-config yaml schema provided in integration tests.
-
- It is less expensive to have unittests validate schema of all cloud-config
- yaml provided to integration tests, than to run an integration test which
- raises Warnings or errors on invalid cloud-config schema.
- """
-
- @skipUnlessJsonSchema()
- def test_all_integration_test_cloud_config_schema(self):
- """Validate schema of cloud_tests yaml files looking for warnings."""
- schema = get_schema()
- testsdir = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
- integration_testdir = os.path.sep.join(
- [testsdir, 'cloud_tests', 'testcases'])
- errors = []
-
- yaml_files = []
- for root, _dirnames, filenames in os.walk(integration_testdir):
- yaml_files.extend([os.path.join(root, f)
- for f in filenames if f.endswith(".yaml")])
- self.assertTrue(len(yaml_files) > 0)
-
- for filename in yaml_files:
- test_cfg = safe_load(open(filename))
- cloud_config = test_cfg.get('cloud_config')
- if cloud_config:
- cloud_config = safe_load(
- cloud_config.replace("#cloud-config\n", ""))
- try:
- validate_cloudconfig_schema(
- cloud_config, schema, strict=True)
- except SchemaValidationError as e:
- errors.append(
- '{0}: {1}'.format(
- filename, e))
- if errors:
- raise AssertionError(', '.join(errors))
-
-
def _get_schema_doc_examples():
examples_dir = Path(
cloudinit.__file__).parent.parent / 'doc' / 'examples'
diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py
index fc77b11e..094450b4 100644
--- a/tests/unittests/test_net.py
+++ b/tests/unittests/test_net.py
@@ -1205,6 +1205,13 @@ NETWORK_CONFIGS = {
USERCTL=no
"""),
},
+ 'expected_networkd': textwrap.dedent("""\
+ [Match]
+ Name=iface0
+ [Network]
+ DHCP=ipv6
+ IPv6AcceptRA=True
+ """).rstrip(' '),
},
'dhcpv6_reject_ra': {
'expected_eni': textwrap.dedent("""\
@@ -1260,6 +1267,13 @@ NETWORK_CONFIGS = {
USERCTL=no
"""),
},
+ 'expected_networkd': textwrap.dedent("""\
+ [Match]
+ Name=iface0
+ [Network]
+ DHCP=ipv6
+ IPv6AcceptRA=False
+ """).rstrip(' '),
},
'ipv6_slaac': {
'expected_eni': textwrap.dedent("""\
@@ -5203,6 +5217,66 @@ class TestNetworkdRoundTrip(CiTestCase):
self.compare_dicts(actual, expected)
+ @mock.patch("cloudinit.net.util.chownbyname", return_value=True)
+ def test_dhcpv6_accept_ra_config_v1(self, m_chown):
+ nwk_fn = '/etc/systemd/network/10-cloud-init-iface0.network'
+ entry = NETWORK_CONFIGS['dhcpv6_accept_ra']
+ files = self._render_and_read(network_config=yaml.load(
+ entry['yaml_v1']))
+
+ actual = files[nwk_fn].splitlines()
+ actual = self.create_conf_dict(actual)
+
+ expected = entry['expected_networkd'].splitlines()
+ expected = self.create_conf_dict(expected)
+
+ self.compare_dicts(actual, expected)
+
+ @mock.patch("cloudinit.net.util.chownbyname", return_value=True)
+ def test_dhcpv6_accept_ra_config_v2(self, m_chown):
+ nwk_fn = '/etc/systemd/network/10-cloud-init-iface0.network'
+ entry = NETWORK_CONFIGS['dhcpv6_accept_ra']
+ files = self._render_and_read(network_config=yaml.load(
+ entry['yaml_v2']))
+
+ actual = files[nwk_fn].splitlines()
+ actual = self.create_conf_dict(actual)
+
+ expected = entry['expected_networkd'].splitlines()
+ expected = self.create_conf_dict(expected)
+
+ self.compare_dicts(actual, expected)
+
+ @mock.patch("cloudinit.net.util.chownbyname", return_value=True)
+ def test_dhcpv6_reject_ra_config_v1(self, m_chown):
+ nwk_fn = '/etc/systemd/network/10-cloud-init-iface0.network'
+ entry = NETWORK_CONFIGS['dhcpv6_reject_ra']
+ files = self._render_and_read(network_config=yaml.load(
+ entry['yaml_v1']))
+
+ actual = files[nwk_fn].splitlines()
+ actual = self.create_conf_dict(actual)
+
+ expected = entry['expected_networkd'].splitlines()
+ expected = self.create_conf_dict(expected)
+
+ self.compare_dicts(actual, expected)
+
+ @mock.patch("cloudinit.net.util.chownbyname", return_value=True)
+ def test_dhcpv6_reject_ra_config_v2(self, m_chown):
+ nwk_fn = '/etc/systemd/network/10-cloud-init-iface0.network'
+ entry = NETWORK_CONFIGS['dhcpv6_reject_ra']
+ files = self._render_and_read(network_config=yaml.load(
+ entry['yaml_v2']))
+
+ actual = files[nwk_fn].splitlines()
+ actual = self.create_conf_dict(actual)
+
+ expected = entry['expected_networkd'].splitlines()
+ expected = self.create_conf_dict(expected)
+
+ self.compare_dicts(actual, expected)
+
class TestRenderersSelect:
diff --git a/tests/unittests/test_net_activators.py b/tests/unittests/test_net_activators.py
index 38f2edf2..f63a8b74 100644
--- a/tests/unittests/test_net_activators.py
+++ b/tests/unittests/test_net_activators.py
@@ -118,8 +118,8 @@ NETWORK_MANAGER_AVAILABLE_CALLS = [
]
NETWORKD_AVAILABLE_CALLS = [
- (('ip',), {'search': ['/usr/bin', '/bin'], 'target': None}),
- (('systemctl',), {'search': ['/usr/bin', '/bin'], 'target': None}),
+ (('ip',), {'search': ['/usr/sbin', '/bin'], 'target': None}),
+ (('systemctl',), {'search': ['/usr/sbin', '/bin'], 'target': None}),
]
diff --git a/tests/unittests/test_net_freebsd.py b/tests/unittests/test_net_freebsd.py
index 466d472b..e339e132 100644
--- a/tests/unittests/test_net_freebsd.py
+++ b/tests/unittests/test_net_freebsd.py
@@ -1,8 +1,8 @@
import os
-import yaml
import cloudinit.net
import cloudinit.net.network_state
+from cloudinit import safeyaml
from cloudinit.tests.helpers import (CiTestCase, mock, readResource, dir2dict)
@@ -65,7 +65,7 @@ class TestFreeBSDRoundTrip(CiTestCase):
entry = {
'yaml': V1,
}
- network_config = yaml.load(entry['yaml'])
+ network_config = safeyaml.load(entry['yaml'])
ns = cloudinit.net.network_state.parse_net_config_data(network_config)
files = self._render_and_read(state=ns)
assert files == {
diff --git a/tests/unittests/test_sshutil.py b/tests/unittests/test_sshutil.py
index a66788bf..08e20050 100644
--- a/tests/unittests/test_sshutil.py
+++ b/tests/unittests/test_sshutil.py
@@ -525,6 +525,14 @@ class TestUpdateSshConfigLines(test_helpers.CiTestCase):
self.assertEqual([self.pwauth], result)
self.check_line(lines[-1], self.pwauth, "no")
+ def test_option_without_value(self):
+ """Implementation only accepts key-value pairs."""
+ extended_exlines = self.exlines.copy()
+ denyusers_opt = "DenyUsers"
+ extended_exlines.append(denyusers_opt)
+ lines = ssh_util.parse_ssh_config_lines(list(extended_exlines))
+ self.assertNotIn(denyusers_opt, str(lines))
+
def test_single_option_updated(self):
"""A single update should have change made and line updated."""
opt, val = ("UsePAM", "no")
diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py
index 2290cab7..bc30c90b 100644
--- a/tests/unittests/test_util.py
+++ b/tests/unittests/test_util.py
@@ -604,7 +604,7 @@ class TestMultiLog(helpers.FilesystemMockingTestCase):
class TestMessageFromString(helpers.TestCase):
def test_unicode_not_messed_up(self):
- roundtripped = util.message_from_string(u'\n').as_string()
+ roundtripped = util.message_from_string('\n').as_string()
self.assertNotIn('\x00', roundtripped)
diff --git a/tests/unittests/util.py b/tests/unittests/util.py
new file mode 100644
index 00000000..383f5f5c
--- /dev/null
+++ b/tests/unittests/util.py
@@ -0,0 +1,143 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+from cloudinit import cloud, distros, helpers
+from cloudinit.sources.DataSourceNone import DataSourceNone
+
+
+def get_cloud(distro=None, paths=None, sys_cfg=None, metadata=None):
+ """Obtain a "cloud" that can be used for testing.
+
+ Modules take a 'cloud' parameter to call into things that are
+ datasource/distro specific. In most cases, the specifics of this cloud
+ implementation aren't needed to test the module, so provide a fake
+ datasource/distro with stubbed calls to methods that may attempt to
+ read/write files or shell out. If a specific distro is needed, it can
+ be passed in as the distro parameter.
+ """
+ paths = paths or helpers.Paths({})
+ sys_cfg = sys_cfg or {}
+ cls = distros.fetch(distro) if distro else TestingDistro
+ mydist = cls(distro, sys_cfg, paths)
+ myds = DataSourceTesting(sys_cfg, mydist, paths)
+ if metadata:
+ myds.metadata.update(metadata)
+ if paths:
+ paths.datasource = myds
+ return cloud.Cloud(myds, paths, sys_cfg, mydist, None)
+
+
+def abstract_to_concrete(abclass):
+ """Takes an abstract class and returns a concrete version of it."""
+ class concreteCls(abclass):
+ pass
+ concreteCls.__abstractmethods__ = frozenset()
+ return type('DummyConcrete' + abclass.__name__, (concreteCls,), {})
+
+
+class DataSourceTesting(DataSourceNone):
+ def get_hostname(self, fqdn=False, resolve_ip=False, metadata_only=False):
+ return 'hostname'
+
+ def persist_instance_data(self):
+ return True
+
+ @property
+ def fallback_interface(self):
+ return None
+
+ @property
+ def cloud_name(self):
+ return 'testing'
+
+
+class TestingDistro(distros.Distro):
+ # TestingDistro is here to test base Distro class implementations
+ def __init__(self, name="testingdistro", cfg=None, paths=None):
+ if not cfg:
+ cfg = {}
+ if not paths:
+ paths = {}
+ super(TestingDistro, self).__init__(name, cfg, paths)
+
+ def install_packages(self, pkglist):
+ pass
+
+ def set_hostname(self, hostname, fqdn=None):
+ pass
+
+ def uses_systemd(self):
+ return True
+
+ def get_primary_arch(self):
+ return 'i386'
+
+ def get_package_mirror_info(self, arch=None, data_source=None):
+ pass
+
+ def apply_network(self, settings, bring_up=True):
+ return False
+
+ def generate_fallback_config(self):
+ return {}
+
+ def apply_network_config(self, netconfig, bring_up=False) -> bool:
+ return False
+
+ def apply_network_config_names(self, netconfig):
+ pass
+
+ def apply_locale(self, locale, out_fn=None):
+ pass
+
+ def set_timezone(self, tz):
+ pass
+
+ def _read_hostname(self, filename, default=None):
+ raise NotImplementedError()
+
+ def _write_hostname(self, hostname, filename):
+ raise NotImplementedError()
+
+ def _read_system_hostname(self):
+ raise NotImplementedError()
+
+ def update_hostname(self, hostname, fqdn, prev_hostname_fn):
+ pass
+
+ def update_etc_hosts(self, hostname, fqdn):
+ pass
+
+ def add_user(self, name, **kwargs):
+ pass
+
+ def add_snap_user(self, name, **kwargs):
+ return 'snap_user'
+
+ def create_user(self, name, **kwargs):
+ return True
+
+ def lock_passwd(self, name):
+ pass
+
+ def expire_passwd(self, user):
+ pass
+
+ def set_passwd(self, user, passwd, hashed=False):
+ return True
+
+ def ensure_sudo_dir(self, path, sudo_base='/etc/sudoers'):
+ pass
+
+ def write_sudo_rules(self, user, rules, sudo_file=None):
+ pass
+
+ def create_group(self, name, members=None):
+ pass
+
+ def shutdown_command(self, *, mode, delay, message):
+ pass
+
+ def package_command(self, command, args=None, pkgs=None):
+ pass
+
+ def update_package_sources(self):
+ return (True, "yay")
diff --git a/tools/.github-cla-signers b/tools/.github-cla-signers
index 4fa108aa..fac3fcec 100644
--- a/tools/.github-cla-signers
+++ b/tools/.github-cla-signers
@@ -3,6 +3,7 @@ ajmyyra
akutz
AlexBaranowski
Aman306
+andgein
andrewbogott
andrewlukoshko
antonyc
@@ -13,7 +14,9 @@ BirknerAlex
bmhughes
candlerb
cawamata
+cclauss
ciprianbadescu
+citrus-it
dankenigsberg
ddymko
dermotbradley
@@ -25,16 +28,21 @@ esposem
GabrielNagy
giggsoff
hamalq
+holmanb
impl
irishgordo
izzyleung
+Jille
+JohnKepplers
johnsonshi
jordimassaguerpla
jqueuniet
jsf9k
+jshen28
klausenbusk
landon912
lucasmoura
+lucendio
lungj
mal
mamercad
@@ -50,19 +58,23 @@ omBratteng
onitake
qubidt
renanrodrigo
+rhansen
riedel
sarahwzadara
slyon
smoser
sshedi
stappersg
+t-8ch
TheRealFalcon
taoyama
timothegenzmer
tnt-dev
tomponline
tsanghan
+vteratipally
Vultaire
WebSpider
xiachen-rh
xnox
+zhuzaifangxuele
diff --git a/tools/.lp-to-git-user b/tools/.lp-to-git-user
index 69329cb9..9b09d568 100644
--- a/tools/.lp-to-git-user
+++ b/tools/.lp-to-git-user
@@ -30,6 +30,7 @@
"rjschwei": "rjschwei",
"tribaal": "chrisglass",
"trstringer": "trstringer",
+ "vlastimil-holer": "vholer",
"vtqanh": "anhvoms",
"xiaofengw": "xiaofengw-vmware"
}
diff --git a/tools/ds-identify b/tools/ds-identify
index f509f566..30d4b0f6 100755
--- a/tools/ds-identify
+++ b/tools/ds-identify
@@ -1,5 +1,5 @@
#!/bin/sh
-# shellcheck disable=2015,2039,2162,2166
+# shellcheck disable=2015,2039,2162,2166,3043
#
# ds-identify is configured via /etc/cloud/ds-identify.cfg
# or on the kernel command line. It takes the following inputs:
@@ -123,7 +123,7 @@ DS_MAYBE=2
DI_DSNAME=""
# this has to match the builtin list in cloud-init, it is what will
# be searched if there is no setting found in config.
-DI_DSLIST_DEFAULT="MAAS ConfigDrive NoCloud AltCloud Azure Bigstep \
+DI_DSLIST_DEFAULT="MAAS ConfigDrive NoCloud LXD AltCloud Azure Bigstep \
CloudSigma CloudStack DigitalOcean Vultr AliYun Ec2 GCE OpenNebula OpenStack \
OVF SmartOS Scaleway Hetzner IBMCloud Oracle Exoscale RbxCloud UpCloud VMware"
DI_DSLIST=""
@@ -615,6 +615,7 @@ read_pid1_product_name() {
dmi_chassis_asset_tag_matches() {
is_container && return 1
+ # shellcheck disable=2254
case "${DI_DMI_CHASSIS_ASSET_TAG}" in
$1) return 0;;
esac
@@ -623,6 +624,7 @@ dmi_chassis_asset_tag_matches() {
dmi_product_name_matches() {
is_container && return 1
+ # shellcheck disable=2254
case "${DI_DMI_PRODUCT_NAME}" in
$1) return 0;;
esac
@@ -631,6 +633,7 @@ dmi_product_name_matches() {
dmi_product_serial_matches() {
is_container && return 1
+ # shellcheck disable=2254
case "${DI_DMI_PRODUCT_SERIAL}" in
$1) return 0;;
esac
@@ -764,7 +767,7 @@ check_config() {
while read line; do
line=${line%%#*}
case "$line" in
- $key:\ *|$key:)
+ $key:\ *|"${key}":)
ret=${line#*:};
ret=${ret# };
found=$((found+1))
@@ -799,6 +802,12 @@ dscheck_MAAS() {
return ${DS_NOT_FOUND}
}
+# LXD datasource requires active /dev/lxd/sock
+# https://linuxcontainers.org/lxd/docs/master/dev-lxd
+dscheck_LXD() {
+ [ -S /dev/lxd/sock ] && return ${DS_FOUND} || return ${DS_NOT_FOUND}
+}
+
dscheck_NoCloud() {
local fslabel="cidata CIDATA" d=""
case " ${DI_KERNEL_CMDLINE} " in
@@ -812,6 +821,7 @@ dscheck_NoCloud() {
check_seed_dir "$d" meta-data user-data && return ${DS_FOUND}
check_writable_seed_dir "$d" meta-data user-data && return ${DS_FOUND}
done
+ # shellcheck disable=2086
if has_fs_with_label $fslabel; then
return ${DS_FOUND}
fi
@@ -895,11 +905,16 @@ ovf_vmware_guest_customization() {
# we have to have the plugin to do vmware customization
local found="" pkg="" pre="${PATH_ROOT}/usr/lib"
+ local x86="x86_64-linux-gnu" aarch="aarch64-linux-gnu"
local ppath="plugins/vmsvc/libdeployPkgPlugin.so"
for pkg in vmware-tools open-vm-tools; do
if [ -f "$pre/$pkg/$ppath" -o -f "${pre}64/$pkg/$ppath" ]; then
found="$pkg"; break;
fi
+ # search in multiarch dir
+ if [ -f "$pre/$x86/$pkg/$ppath" -o -f "$pre/$aarch/$pkg/$ppath" ]; then
+ found="$pkg"; break;
+ fi
done
[ -n "$found" ] || return 1
# vmware customization is disabled by default
@@ -1239,11 +1254,11 @@ dscheck_AltCloud() {
ctype="${DI_DMI_PRODUCT_NAME}"
fi
case "$ctype" in
- ${match_rhev})
+ "${match_rhev}")
probe_floppy || return ${DS_NOT_FOUND}
dev="/dev/floppy"
;;
- ${match_vsphere})
+ "${match_vsphere}")
block_dev_with_label CDROM || return ${DS_NOT_FOUND}
dev="$_RET"
;;
@@ -1309,6 +1324,7 @@ is_ibm_provisioning() {
msg="config '$pcfg' exists."
is_prov=true
if [ -f "$logf" ]; then
+ # shellcheck disable=3013
if [ "$logf" -nt "$PATH_PROC_1_ENVIRON" ]; then
msg="$msg log '$logf' from current boot."
else
@@ -1324,7 +1340,7 @@ is_ibm_provisioning() {
}
is_ibm_cloud() {
- cached "${_IS_IBM_CLOUD}" && return ${_IS_IBM_CLOUD}
+ cached "${_IS_IBM_CLOUD}" && return "${_IS_IBM_CLOUD}"
local ret=1
if [ "$DI_VIRT" = "xen" ]; then
if is_ibm_provisioning; then
@@ -1640,10 +1656,10 @@ parse_policy() {
for tok in "$@"; do
val=${tok#*=}
case "$tok" in
- $DI_ENABLED|$DI_DISABLED|search|report) mode=$tok;;
+ "${DI_ENABLED}"|"${DI_DISABLED}"|search|report) mode=$tok;;
found=all|found=first) found=$val;;
maybe=all|maybe=none) maybe=$val;;
- notfound=$DI_ENABLED|notfound=$DI_DISABLED) notfound=$val;;
+ notfound="${DI_ENABLED}"|notfound="${DI_DISABLED}") notfound=$val;;
found=*)
parse_warn found "$val" "${_def_found}"
found=${_def_found};;
@@ -1724,11 +1740,11 @@ _main() {
fi
case "$DI_MODE" in
- $DI_DISABLED)
+ "${DI_DISABLED}")
debug 1 "mode=$DI_DISABLED. returning $ret_dis"
return $ret_dis
;;
- $DI_ENABLED)
+ "${DI_ENABLED}")
debug 1 "mode=$DI_ENABLED. returning $ret_en"
return $ret_en;;
search|report) :;;
@@ -1768,11 +1784,11 @@ _main() {
$dscheck_fn
ret="$?"
case "$ret" in
- $DS_FOUND)
+ "${DS_FOUND}")
debug 1 "check for '$ds' returned found";
exfound_cfg="${exfound_cfg:+${exfound_cfg}${CR}}${_RET_excfg}"
found="${found} $ds";;
- $DS_MAYBE)
+ "${DS_MAYBE}")
debug 1 "check for '$ds' returned maybe";
exmaybe_cfg="${exmaybe_cfg:+${exmaybe_cfg}${CR}}${_RET_excfg}"
maybe="${maybe} $ds";;
@@ -1811,16 +1827,16 @@ _main() {
local basemsg="No ds found [mode=$DI_MODE, notfound=$DI_ON_NOTFOUND]."
local msg="" ret=3
case "$DI_MODE:$DI_ON_NOTFOUND" in
- report:$DI_DISABLED)
+ report:"${DI_DISABLED}")
msg="$basemsg Would disable cloud-init [$ret_dis]"
ret=$ret_en;;
- report:$DI_ENABLED)
+ report:"${DI_ENABLED}")
msg="$basemsg Would enable cloud-init [$ret_en]"
ret=$ret_en;;
- search:$DI_DISABLED)
+ search:"${DI_DISABLED}")
msg="$basemsg Disabled cloud-init [$ret_dis]"
ret=$ret_dis;;
- search:$DI_ENABLED)
+ search:"${DI_ENABLED}")
msg="$basemsg Enabled cloud-init [$ret_en]"
ret=$ret_en;;
*) error "Unexpected result";;
diff --git a/tools/hook-hotplug b/tools/hook-hotplug
index ced268b3..35bd3da2 100755
--- a/tools/hook-hotplug
+++ b/tools/hook-hotplug
@@ -8,11 +8,7 @@ is_finished() {
[ -e /run/cloud-init/result.json ]
}
-hotplug_enabled() {
- [ "$(cloud-init devel hotplug-hook -s "${SUBSYSTEM}" query)" == "enabled" ]
-}
-
-if is_finished && hotplug_enabled; then
+if is_finished; then
# open cloud-init's hotplug-hook fifo rw
exec 3<>/run/cloud-init/hook-hotplug-cmd
env_params=(
diff --git a/tools/render-cloudcfg b/tools/render-cloudcfg
index 30f82521..186d61b7 100755
--- a/tools/render-cloudcfg
+++ b/tools/render-cloudcfg
@@ -4,8 +4,8 @@ import argparse
import os
import sys
-VARIANTS = ["almalinux", "alpine", "amazon", "arch", "centos", "debian",
- "eurolinux", "fedora", "freebsd", "netbsd", "openbsd", "photon",
+VARIANTS = ["almalinux", "alpine", "amazon", "arch", "centos", "cloudlinux", "debian",
+ "eurolinux", "fedora", "freebsd", "netbsd", "openbsd", "openEuler", "photon",
"rhel", "suse","rocky", "ubuntu", "unknown", "virtuozzo"]
diff --git a/tools/run-pyflakes b/tools/run-flake8
index 179afebe..0021cdb9 100755
--- a/tools/run-pyflakes
+++ b/tools/run-flake8
@@ -2,7 +2,7 @@
CR="
"
-pycheck_dirs=( "cloudinit/" "tests/" "tools/" )
+pycheck_dirs=( "cloudinit/" "tests/" "tools/" "setup.py" )
set -f
if [ $# -eq 0 ]; then
@@ -11,7 +11,7 @@ else
files=( "$@" )
fi
-cmd=( "python3" -m "pyflakes" "${files[@]}" )
+cmd=( "python3" -m "flake8" "${files[@]}" )
echo "Running: " "${cmd[@]}" 1>&2
exec "${cmd[@]}"
diff --git a/tools/run-pep8 b/tools/run-pep8
deleted file mode 100755
index 4bd0bbfb..00000000
--- a/tools/run-pep8
+++ /dev/null
@@ -1,21 +0,0 @@
-#!/bin/bash
-
-pycheck_dirs=( "cloudinit/" "tests/" "tools/" )
-
-CR="
-"
-[ "$1" = "-v" ] && { verbose="$1"; shift; } || verbose=""
-
-set -f
-if [ $# -eq 0 ]; then unset IFS
- IFS="$CR"
- files=( "${bin_files[@]}" "${pycheck_dirs[@]}" )
- unset IFS
-else
- files=( "$@" )
-fi
-
-myname=${0##*/}
-cmd=( "${myname#run-}" $verbose "${files[@]}" )
-echo "Running: " "${cmd[@]}" 1>&2
-exec "${cmd[@]}"
diff --git a/tox.ini b/tox.ini
index 27c16ef3..874d3f20 100644
--- a/tox.ini
+++ b/tox.ini
@@ -12,8 +12,8 @@ passenv=
[testenv:flake8]
basepython = python3
deps =
- flake8==3.8.2
-commands = {envpython} -m flake8 {posargs:cloudinit/ tests/ tools/}
+ flake8==3.9.2
+commands = {envpython} -m flake8 {posargs:cloudinit/ tests/ tools/ setup.py}
# https://github.com/gabrielfalcao/HTTPretty/issues/223
setenv =
@@ -23,11 +23,11 @@ setenv =
basepython = python3
deps =
# requirements
- pylint==2.9.3
+ pylint==2.11.1
# test-requirements because unit tests are now present in cloudinit tree
-r{toxinidir}/test-requirements.txt
-r{toxinidir}/integration-requirements.txt
-commands = {envpython} -m pylint {posargs:cloudinit tests --ignore=cloud_tests tools}
+commands = {envpython} -m pylint {posargs:cloudinit tests tools}
[testenv:py3]
@@ -77,6 +77,7 @@ deps =
pyserial==3.0.1
configobj==5.0.6
requests==2.9.1
+ jsonschema
# test-requirements
pytest-catchlog==1.2.1
@@ -119,11 +120,11 @@ deps =
pytest==3.0.7
[testenv:tip-flake8]
-commands = {envpython} -m flake8 {posargs:cloudinit/ tests/ tools/}
+commands = {envpython} -m flake8 {posargs:cloudinit/ tests/ tools/ setup.py}
deps = flake8
[testenv:tip-pylint]
-commands = {envpython} -m pylint {posargs:cloudinit tests --ignore=cloud_tests tools}
+commands = {envpython} -m pylint {posargs:cloudinit tests tools}
deps =
# requirements
pylint
@@ -131,13 +132,6 @@ deps =
-r{toxinidir}/test-requirements.txt
-r{toxinidir}/integration-requirements.txt
-[testenv:citest]
-basepython = python3
-commands = {envpython} -m tests.cloud_tests {posargs}
-passenv = HOME TRAVIS
-deps =
- -r{toxinidir}/cloud-tests-requirements.txt
-
# Until Xenial tox support is dropped or bumps to tox:2.3.2, reflect changes to
# deps into testenv:integration-tests-ci: commands, passenv and deps.
# This is due to (https://github.com/tox-dev/tox/issues/208) which means that
@@ -161,7 +155,7 @@ setenv =
[testenv:integration-tests-jenkins]
commands = {envpython} -m pytest --log-cli-level=INFO {posargs:tests/integration_tests}
-passenv = CLOUD_INIT_* SSH_AUTH_SOCK OS_*
+passenv = *_proxy CLOUD_INIT_* SSH_AUTH_SOCK OS_* GOOGLE_* GCP_*
deps =
-r{toxinidir}/integration-requirements.txt
setenv =
diff --git a/udev/10-cloud-init-hook-hotplug.rules b/udev/10-cloud-init-hook-hotplug.rules
deleted file mode 100644
index 2e382679..00000000
--- a/udev/10-cloud-init-hook-hotplug.rules
+++ /dev/null
@@ -1,6 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-# Handle device adds only
-ACTION!="add|remove", GOTO="cloudinit_end"
-LABEL="cloudinit_hook"
-SUBSYSTEM=="net|block", RUN+="/usr/lib/cloud-init/hook-hotplug"
-LABEL="cloudinit_end"