summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBrett Holman <brett.holman@canonical.com>2022-05-18 11:23:42 -0600
committergit-ubuntu importer <ubuntu-devel-discuss@lists.ubuntu.com>2022-05-19 03:21:09 +0000
commit15d691e3b0b32c67b0589665b49e9d2755296d1b (patch)
treebece25885ecb96f5b0adcc6941d5f56826e3eb93
parent0210cd6f20d1c11410ba78409bd235cb4f7a6def (diff)
downloadcloud-init-git-15d691e3b0b32c67b0589665b49e9d2755296d1b.tar.gz
22.2-0ubuntu1~22.10.1 (patches unapplied)
Imported using git-ubuntu import.
-rw-r--r--.github/workflows/cla.yml4
-rw-r--r--.readthedocs.yaml6
-rw-r--r--.travis.yml2
-rw-r--r--CONTRIBUTING.rst315
-rw-r--r--ChangeLog196
-rw-r--r--MANIFEST.in1
-rw-r--r--Makefile46
-rw-r--r--WIP-ONGOING-REFACTORIZATION.rst313
-rw-r--r--bash_completion/cloud-init6
-rw-r--r--cloudinit/analyze/show.py4
-rw-r--r--cloudinit/cloud.py14
-rwxr-xr-x[-rw-r--r--]cloudinit/cmd/clean.py4
-rwxr-xr-xcloudinit/cmd/cloud_id.py4
-rwxr-xr-x[-rw-r--r--]cloudinit/cmd/devel/__init__.py3
-rwxr-xr-x[-rw-r--r--]cloudinit/cmd/devel/hotplug_hook.py20
-rwxr-xr-x[-rw-r--r--]cloudinit/cmd/devel/logs.py4
-rwxr-xr-xcloudinit/cmd/devel/make_mime.py7
-rwxr-xr-xcloudinit/cmd/devel/net_convert.py18
-rw-r--r--cloudinit/cmd/devel/parser.py8
-rwxr-xr-xcloudinit/cmd/devel/render.py5
-rwxr-xr-x[-rw-r--r--]cloudinit/cmd/main.py110
-rwxr-xr-x[-rw-r--r--]cloudinit/cmd/query.py4
-rwxr-xr-x[-rw-r--r--]cloudinit/cmd/status.py23
-rw-r--r--cloudinit/config/__init__.py47
-rw-r--r--cloudinit/config/cc_apt_configure.py5
-rw-r--r--[-rwxr-xr-x]cloudinit/config/cc_byobu.py0
-rw-r--r--cloudinit/config/cc_emit_upstart.py78
-rw-r--r--cloudinit/config/cc_fan.py60
-rw-r--r--cloudinit/config/cc_final_message.py49
-rw-r--r--cloudinit/config/cc_foo.py57
-rw-r--r--cloudinit/config/cc_growpart.py266
-rw-r--r--cloudinit/config/cc_grub_dpkg.py69
-rw-r--r--cloudinit/config/cc_install_hotplug.py51
-rw-r--r--cloudinit/config/cc_keyboard.py69
-rw-r--r--cloudinit/config/cc_keys_to_console.py80
-rw-r--r--cloudinit/config/cc_landscape.py110
-rw-r--r--cloudinit/config/cc_locale.py33
-rw-r--r--cloudinit/config/cc_lxd.py106
-rw-r--r--cloudinit/config/cc_mcollective.py100
-rw-r--r--cloudinit/config/cc_migrator.py42
-rw-r--r--cloudinit/config/cc_mounts.py110
-rw-r--r--cloudinit/config/cc_ntp.py135
-rw-r--r--cloudinit/config/cc_package_update_upgrade_install.py73
-rw-r--r--cloudinit/config/cc_phone_home.py100
-rw-r--r--cloudinit/config/cc_power_state_change.py105
-rw-r--r--cloudinit/config/cc_puppet.py183
-rw-r--r--cloudinit/config/cc_refresh_rmc_and_interface.py41
-rw-r--r--cloudinit/config/cc_reset_rmc.py38
-rw-r--r--cloudinit/config/cc_resizefs.py33
-rw-r--r--cloudinit/config/cc_resolv_conf.py92
-rw-r--r--cloudinit/config/cc_rh_subscription.py98
-rw-r--r--cloudinit/config/cc_rightscale_userdata.py52
-rw-r--r--cloudinit/config/cc_rsyslog.py240
-rw-r--r--cloudinit/config/cc_runcmd.py77
-rw-r--r--cloudinit/config/cc_salt_minion.py78
-rw-r--r--cloudinit/config/cc_scripts_per_boot.py33
-rw-r--r--cloudinit/config/cc_scripts_per_instance.py32
-rw-r--r--cloudinit/config/cc_scripts_per_once.py36
-rw-r--r--cloudinit/config/cc_scripts_user.py32
-rw-r--r--cloudinit/config/cc_scripts_vendor.py70
-rw-r--r--cloudinit/config/cc_seed_random.py103
-rw-r--r--cloudinit/config/cc_set_hostname.py77
-rw-r--r--[-rwxr-xr-x]cloudinit/config/cc_set_passwords.py191
-rw-r--r--cloudinit/config/cc_snap.py110
-rw-r--r--cloudinit/config/cc_spacewalk.py44
-rw-r--r--[-rwxr-xr-x]cloudinit/config/cc_ssh.py173
-rw-r--r--[-rwxr-xr-x]cloudinit/config/cc_ssh_authkey_fingerprints.py51
-rw-r--r--[-rwxr-xr-x]cloudinit/config/cc_ssh_import_id.py124
-rw-r--r--cloudinit/config/cc_timezone.py41
-rw-r--r--cloudinit/config/cc_ubuntu_advantage.py37
-rw-r--r--cloudinit/config/cc_ubuntu_drivers.py47
-rw-r--r--cloudinit/config/cc_update_etc_hosts.py84
-rw-r--r--cloudinit/config/cc_update_hostname.py86
-rw-r--r--cloudinit/config/cc_users_groups.py249
-rw-r--r--cloudinit/config/cc_write_files.py147
-rw-r--r--cloudinit/config/cc_write_files_deferred.py60
-rw-r--r--cloudinit/config/cc_yum_add_repo.py118
-rw-r--r--cloudinit/config/cc_zypper_add_repo.py79
-rw-r--r--cloudinit/config/cloud-init-schema.json560
-rw-r--r--cloudinit/config/modules.py302
-rw-r--r--cloudinit/config/schema.py292
-rw-r--r--cloudinit/config/schemas/__init__.py0
-rw-r--r--cloudinit/config/schemas/schema-cloud-config-v1.json2273
-rw-r--r--cloudinit/config/schemas/versions.schema.cloud-config.json18
-rw-r--r--[-rwxr-xr-x]cloudinit/distros/__init__.py9
-rw-r--r--cloudinit/distros/bsd.py3
-rw-r--r--cloudinit/distros/freebsd.py10
-rw-r--r--cloudinit/distros/gentoo.py27
-rw-r--r--cloudinit/distros/net_util.py5
-rw-r--r--cloudinit/distros/netbsd.py3
-rw-r--r--cloudinit/distros/networking.py32
-rw-r--r--cloudinit/distros/rhel.py32
-rw-r--r--[-rwxr-xr-x]cloudinit/distros/ug_util.py14
-rw-r--r--cloudinit/gpg.py2
-rw-r--r--cloudinit/handlers/__init__.py1
-rw-r--r--cloudinit/handlers/upstart_job.py107
-rw-r--r--cloudinit/helpers.py3
-rw-r--r--cloudinit/importer.py19
-rw-r--r--cloudinit/net/__init__.py415
-rw-r--r--cloudinit/net/activators.py27
-rw-r--r--[-rwxr-xr-x]cloudinit/net/cmdline.py0
-rw-r--r--cloudinit/net/dhcp.py33
-rw-r--r--cloudinit/net/eni.py2
-rw-r--r--cloudinit/net/netplan.py12
-rw-r--r--cloudinit/net/network_manager.py390
-rw-r--r--cloudinit/net/network_state.py180
-rw-r--r--cloudinit/net/renderer.py4
-rw-r--r--cloudinit/net/renderers.py3
-rw-r--r--cloudinit/net/sysconfig.py94
-rw-r--r--[-rwxr-xr-x]cloudinit/reporting/handlers.py0
-rw-r--r--cloudinit/safeyaml.py128
-rw-r--r--[-rwxr-xr-x]cloudinit/sources/DataSourceAzure.py283
-rw-r--r--cloudinit/sources/DataSourceCloudSigma.py2
-rw-r--r--cloudinit/sources/DataSourceCloudStack.py2
-rw-r--r--cloudinit/sources/DataSourceConfigDrive.py8
-rw-r--r--cloudinit/sources/DataSourceEc2.py14
-rw-r--r--cloudinit/sources/DataSourceExoscale.py5
-rw-r--r--cloudinit/sources/DataSourceLXD.py39
-rw-r--r--cloudinit/sources/DataSourceNoCloud.py33
-rw-r--r--cloudinit/sources/DataSourceOracle.py11
-rw-r--r--cloudinit/sources/DataSourceScaleway.py8
-rw-r--r--cloudinit/sources/__init__.py40
-rw-r--r--[-rwxr-xr-x]cloudinit/sources/helpers/azure.py278
-rw-r--r--cloudinit/sources/helpers/cloudsigma.py (renamed from cloudinit/cs_utils.py)0
-rw-r--r--cloudinit/sources/helpers/ec2.py (renamed from cloudinit/ec2_utils.py)0
-rw-r--r--cloudinit/sources/helpers/openstack.py4
-rw-r--r--cloudinit/sources/helpers/vmware/imc/guestcust_util.py4
-rw-r--r--cloudinit/sources/helpers/vultr.py69
-rw-r--r--cloudinit/stages.py308
-rw-r--r--cloudinit/subp.py9
-rw-r--r--cloudinit/templater.py47
-rw-r--r--cloudinit/url_helper.py323
-rw-r--r--cloudinit/util.py121
-rw-r--r--cloudinit/version.py2
-rw-r--r--config/cloud.cfg.tmpl27
-rw-r--r--debian/changelog185
-rw-r--r--debian/cloud-init.postinst18
-rw-r--r--debian/control5
-rw-r--r--debian/patches/cpick-30ccd51a-ds-identify-also-discover-LXD-by-presence-from-DMI307
-rw-r--r--debian/patches/cpick-5e347d25-Revert-Ensure-system_cfg-read-before-ds-net-config-on82
-rw-r--r--debian/patches/cpick-be9389c6-Work-around-bug-in-LXD-VM-detection-132531
-rw-r--r--debian/patches/cpick-e3307e4d-ds-identify-detect-LXD-for-VMs-launched-from-host-with86
-rw-r--r--debian/patches/cpick-eee60329-Fix-cloud-init-status-wait-when-no-datasource-found172
-rw-r--r--debian/patches/series5
-rw-r--r--doc-requirements.txt11
-rw-r--r--doc/examples/cloud-config-apt.txt2
-rw-r--r--doc/examples/cloud-config-archive.txt7
-rw-r--r--doc/examples/cloud-config-disk-setup.txt4
-rw-r--r--doc/examples/cloud-config-final-message.txt7
-rw-r--r--doc/examples/cloud-config-growpart.txt31
-rw-r--r--doc/examples/cloud-config-install-packages.txt2
-rw-r--r--doc/examples/cloud-config-landscape.txt23
-rw-r--r--doc/examples/cloud-config-launch-index.txt5
-rw-r--r--doc/examples/cloud-config-mcollective.txt49
-rw-r--r--doc/examples/cloud-config-mount-points.txt2
-rw-r--r--doc/examples/cloud-config-phone-home.txt14
-rw-r--r--doc/examples/cloud-config-power-state.txt40
-rw-r--r--doc/examples/cloud-config-puppet.txt93
-rw-r--r--doc/examples/cloud-config-resolv-conf.txt20
-rw-r--r--doc/examples/cloud-config-rh_subscription.txt49
-rw-r--r--doc/examples/cloud-config-rsyslog.txt47
-rw-r--r--doc/examples/cloud-config-run-cmds.txt2
-rw-r--r--doc/examples/cloud-config-salt-minion.txt53
-rw-r--r--doc/examples/cloud-config-seed-random.txt32
-rw-r--r--doc/examples/cloud-config-update-apt.txt1
-rw-r--r--doc/examples/cloud-config-update-packages.txt2
-rw-r--r--doc/examples/cloud-config-user-groups.txt18
-rw-r--r--doc/examples/cloud-config-vendor-data.txt16
-rw-r--r--doc/examples/cloud-config.txt52
-rw-r--r--doc/examples/part-handler-v2.txt12
-rw-r--r--doc/examples/part-handler.txt12
-rw-r--r--doc/examples/upstart-cloud-config.txt12
-rw-r--r--doc/examples/upstart-rclocal.txt12
-rw-r--r--doc/man/cloud-init.121
-rw-r--r--doc/rtd/conf.py14
-rw-r--r--doc/rtd/index.rst23
-rw-r--r--doc/rtd/topics/analyze.rst2
-rw-r--r--doc/rtd/topics/boot.rst2
-rw-r--r--doc/rtd/topics/cli.rst101
-rw-r--r--doc/rtd/topics/code_review.rst14
-rw-r--r--doc/rtd/topics/datasources.rst60
-rw-r--r--doc/rtd/topics/datasources/azure.rst17
-rw-r--r--doc/rtd/topics/datasources/configdrive.rst50
-rw-r--r--doc/rtd/topics/datasources/ec2.rst28
-rw-r--r--doc/rtd/topics/datasources/lxd.rst24
-rw-r--r--doc/rtd/topics/datasources/maas.rst4
-rw-r--r--doc/rtd/topics/datasources/nocloud.rst35
-rw-r--r--doc/rtd/topics/datasources/rbxcloud.rst10
-rw-r--r--doc/rtd/topics/debugging.rst1
-rw-r--r--doc/rtd/topics/dir_layout.rst6
-rw-r--r--doc/rtd/topics/docs.rst8
-rw-r--r--doc/rtd/topics/examples.rst55
-rw-r--r--doc/rtd/topics/faq.rst8
-rw-r--r--doc/rtd/topics/format.rst136
-rw-r--r--doc/rtd/topics/instancedata.rst1
-rw-r--r--doc/rtd/topics/integration_tests.rst12
-rw-r--r--doc/rtd/topics/logging.rst3
-rw-r--r--doc/rtd/topics/module_creation.rst118
-rw-r--r--doc/rtd/topics/modules.rst8
-rw-r--r--doc/rtd/topics/security.rst5
-rw-r--r--doc/rtd/topics/testing.rst2
-rw-r--r--doc/rtd/topics/tutorial.rst141
-rw-r--r--doc/status.txt6
-rw-r--r--doc/userdata.txt6
-rw-r--r--integration-requirements.txt2
-rw-r--r--packages/debian/control.in1
-rw-r--r--packages/pkg-deps.json3
-rw-r--r--pyproject.toml10
-rw-r--r--[-rwxr-xr-x]setup.py13
-rw-r--r--systemd/cloud-config.service.tmpl4
-rw-r--r--systemd/cloud-config.target19
-rw-r--r--systemd/cloud-final.service.tmpl13
-rw-r--r--systemd/cloud-init-generator.tmpl52
-rw-r--r--systemd/cloud-init-hotplugd.service1
-rw-r--r--systemd/cloud-init-local.service.tmpl23
-rw-r--r--systemd/cloud-init.service.tmpl6
-rw-r--r--systemd/cloud-init.target7
-rw-r--r--templates/hosts.arch.tmpl23
-rw-r--r--test-requirements.txt2
-rw-r--r--tests/integration_tests/bugs/test_gh570.py39
-rw-r--r--tests/integration_tests/bugs/test_gh632.py2
-rw-r--r--tests/integration_tests/clouds.py31
-rw-r--r--tests/integration_tests/cmd/test_status.py69
-rw-r--r--tests/integration_tests/datasources/test_ec2_ipv6.py43
-rw-r--r--tests/integration_tests/datasources/test_lxd_discovery.py74
-rw-r--r--tests/integration_tests/datasources/test_nocloud.py87
-rw-r--r--tests/integration_tests/decorators.py34
-rw-r--r--tests/integration_tests/instances.py2
-rw-r--r--tests/integration_tests/modules/test_cli.py8
-rw-r--r--tests/integration_tests/modules/test_combined.py141
-rw-r--r--tests/integration_tests/modules/test_disk_setup.py7
-rw-r--r--tests/integration_tests/modules/test_frequency_override.py33
-rw-r--r--tests/integration_tests/modules/test_keys_to_console.py32
-rw-r--r--tests/integration_tests/modules/test_persistence.py2
-rw-r--r--tests/integration_tests/modules/test_set_password.py19
-rw-r--r--tests/integration_tests/modules/test_ssh_auth_key_fingerprints.py25
-rw-r--r--tests/integration_tests/modules/test_write_files.py6
-rw-r--r--tests/integration_tests/network/test_net_config_load.py27
-rw-r--r--tests/integration_tests/util.py57
-rw-r--r--tests/unittests/__init__.py12
-rw-r--r--tests/unittests/cmd/devel/test_render.py175
-rw-r--r--tests/unittests/cmd/test_main.py46
-rw-r--r--tests/unittests/cmd/test_query.py96
-rw-r--r--tests/unittests/cmd/test_status.py782
-rw-r--r--tests/unittests/config/test_apt_key.py3
-rw-r--r--tests/unittests/config/test_apt_source_v1.py25
-rw-r--r--tests/unittests/config/test_apt_source_v3.py25
-rw-r--r--tests/unittests/config/test_cc_fan.py33
-rw-r--r--tests/unittests/config/test_cc_growpart.py276
-rw-r--r--tests/unittests/config/test_cc_grub_dpkg.py46
-rw-r--r--tests/unittests/config/test_cc_keyboard.py77
-rw-r--r--tests/unittests/config/test_cc_keys_to_console.py81
-rw-r--r--tests/unittests/config/test_cc_landscape.py34
-rw-r--r--tests/unittests/config/test_cc_locale.py38
-rw-r--r--tests/unittests/config/test_cc_lxd.py31
-rw-r--r--tests/unittests/config/test_cc_mcollective.py32
-rw-r--r--tests/unittests/config/test_cc_mounts.py58
-rw-r--r--tests/unittests/config/test_cc_ntp.py176
-rw-r--r--tests/unittests/config/test_cc_package_update_upgrade_install.py26
-rw-r--r--tests/unittests/config/test_cc_phone_home.py26
-rw-r--r--tests/unittests/config/test_cc_power_state_change.py45
-rw-r--r--tests/unittests/config/test_cc_puppet.py109
-rw-r--r--tests/unittests/config/test_cc_resizefs.py46
-rw-r--r--tests/unittests/config/test_cc_resolv_conf.py75
-rw-r--r--tests/unittests/config/test_cc_rh_subscription.py40
-rw-r--r--tests/unittests/config/test_cc_rsyslog.py108
-rw-r--r--tests/unittests/config/test_cc_runcmd.py97
-rw-r--r--tests/unittests/config/test_cc_scripts_vendor.py28
-rw-r--r--tests/unittests/config/test_cc_seed_random.py43
-rw-r--r--tests/unittests/config/test_cc_set_passwords.py318
-rw-r--r--tests/unittests/config/test_cc_snap.py397
-rw-r--r--tests/unittests/config/test_cc_ssh.py605
-rw-r--r--tests/unittests/config/test_cc_ssh_import_id.py78
-rw-r--r--tests/unittests/config/test_cc_ubuntu_advantage.py101
-rw-r--r--tests/unittests/config/test_cc_ubuntu_drivers.py50
-rw-r--r--tests/unittests/config/test_cc_update_etc_hosts.py30
-rw-r--r--tests/unittests/config/test_cc_users_groups.py72
-rw-r--r--tests/unittests/config/test_cc_write_files.py162
-rw-r--r--tests/unittests/config/test_cc_write_files_deferred.py63
-rw-r--r--tests/unittests/config/test_cc_yum_add_repo.py44
-rw-r--r--tests/unittests/config/test_salt_minion.py33
-rw-r--r--tests/unittests/config/test_schema.py664
-rw-r--r--tests/unittests/distros/test_generic.py10
-rw-r--r--tests/unittests/distros/test_networking.py118
-rw-r--r--tests/unittests/net/test_dhcp.py67
-rw-r--r--tests/unittests/net/test_init.py293
-rw-r--r--tests/unittests/net/test_network_state.py3
-rw-r--r--tests/unittests/runs/test_merge_run.py3
-rw-r--r--tests/unittests/runs/test_simple_run.py11
-rw-r--r--tests/unittests/sources/helpers/test_cloudsigma.py (renamed from tests/unittests/test_cs_util.py)2
-rw-r--r--tests/unittests/sources/helpers/test_ec2.py (renamed from tests/unittests/test_ec2_util.py)20
-rw-r--r--tests/unittests/sources/test_azure.py525
-rw-r--r--tests/unittests/sources/test_azure_helper.py342
-rw-r--r--tests/unittests/sources/test_cloudsigma.py2
-rw-r--r--tests/unittests/sources/test_common.py2
-rw-r--r--tests/unittests/sources/test_configdrive.py19
-rw-r--r--tests/unittests/sources/test_ec2.py126
-rw-r--r--tests/unittests/sources/test_nocloud.py69
-rw-r--r--tests/unittests/sources/test_oracle.py12
-rw-r--r--tests/unittests/sources/test_scaleway.py8
-rw-r--r--tests/unittests/sources/test_vmware.py60
-rw-r--r--tests/unittests/sources/test_vultr.py105
-rw-r--r--tests/unittests/sources/vmware/test_guestcust_util.py15
-rw-r--r--tests/unittests/test_builtin_handlers.py77
-rw-r--r--tests/unittests/test_cli.py32
-rw-r--r--tests/unittests/test_data.py15
-rw-r--r--tests/unittests/test_ds_identify.py93
-rw-r--r--tests/unittests/test_gpg.py9
-rw-r--r--tests/unittests/test_merging.py4
-rw-r--r--tests/unittests/test_net.py1436
-rw-r--r--tests/unittests/test_net_activators.py122
-rw-r--r--tests/unittests/test_net_freebsd.py8
-rw-r--r--tests/unittests/test_render_cloudcfg.py1
-rw-r--r--tests/unittests/test_safeyaml.py60
-rw-r--r--tests/unittests/test_sshutil.py226
-rw-r--r--tests/unittests/test_stages.py430
-rw-r--r--tests/unittests/test_temp_utils.py7
-rw-r--r--tests/unittests/test_templating.py44
-rw-r--r--tests/unittests/test_url_helper.py404
-rw-r--r--tests/unittests/test_util.py274
-rw-r--r--tests/unittests/util.py3
-rw-r--r--tools/.github-cla-signers13
-rw-r--r--tools/.lp-to-git-user3
-rwxr-xr-xtools/benchmark.sh8
-rwxr-xr-xtools/build-on-freebsd1
-rwxr-xr-xtools/build-on-openbsd1
-rwxr-xr-xtools/ds-identify36
-rwxr-xr-xtools/migrate-lp-user-to-github260
-rwxr-xr-xtools/run-centos72
-rwxr-xr-xtools/run-container25
-rw-r--r--tox.ini39
-rw-r--r--upstart/cloud-config.conf9
-rw-r--r--upstart/cloud-final.conf10
-rw-r--r--upstart/cloud-init-blocknet.conf83
-rw-r--r--upstart/cloud-init-container.conf57
-rw-r--r--upstart/cloud-init-local.conf16
-rw-r--r--upstart/cloud-init-nonet.conf66
-rw-r--r--upstart/cloud-init.conf9
-rw-r--r--upstart/cloud-log-shutdown.conf19
339 files changed, 16315 insertions, 10589 deletions
diff --git a/.github/workflows/cla.yml b/.github/workflows/cla.yml
index 8a0b2c07..0500136f 100644
--- a/.github/workflows/cla.yml
+++ b/.github/workflows/cla.yml
@@ -18,8 +18,8 @@ jobs:
In order for us to merge this pull request, you need
to have signed the Contributor License Agreement (CLA).
Please sign the CLA by following our
- hacking guide at:
- https://cloudinit.readthedocs.io/en/latest/topics/hacking.html
+ contribution guide at:
+ https://cloudinit.readthedocs.io/en/latest/topics/contributing.html
Thanks,
Your friendly cloud-init upstream
diff --git a/.readthedocs.yaml b/.readthedocs.yaml
index 46af8ee7..7769bc55 100644
--- a/.readthedocs.yaml
+++ b/.readthedocs.yaml
@@ -3,6 +3,6 @@ version: 2
formats: all
python:
- install:
- - requirements: doc-requirements.txt
- - path: .
+ install:
+ - path: .
+ - requirements: doc-requirements.txt
diff --git a/.travis.yml b/.travis.yml
index f655fa50..a529ace1 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -151,7 +151,7 @@ matrix:
- tox
# Test all supported Python versions (but at the end, so we schedule
# longer-running jobs first)
- - python: "3.10.1"
+ - python: "3.10"
- python: 3.9
- python: 3.8
- python: 3.7
diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst
index 73122d79..819572c6 100644
--- a/CONTRIBUTING.rst
+++ b/CONTRIBUTING.rst
@@ -136,7 +136,7 @@ Do these things for each feature or bug
* Apply black and isort formatting rules with `tox`_::
- tox -e format
+ tox -e do_format
* Run unit tests and lint/formatting checks with `tox`_::
@@ -245,317 +245,4 @@ Feature Flags
.. automodule:: cloudinit.features
:members:
-
-Ongoing Refactors
-=================
-
-This captures ongoing refactoring projects in the codebase. This is
-intended as documentation for developers involved in the refactoring,
-but also for other developers who may interact with the code being
-refactored in the meantime.
-
-``cloudinit.net`` -> ``cloudinit.distros.networking`` Hierarchy
----------------------------------------------------------------
-
-``cloudinit.net`` was imported from the curtin codebase as a chunk, and
-then modified enough that it integrated with the rest of the cloud-init
-codebase. Over the ~4 years since, the fact that it is not fully
-integrated into the ``Distro`` hierarchy has caused several issues.
-
-The common pattern of these problems is that the commands used for
-networking are different across distributions and operating systems.
-This has lead to ``cloudinit.net`` developing its own "distro
-determination" logic: `get_interfaces_by_mac`_ is probably the clearest
-example of this. Currently, these differences are primarily split
-along Linux/BSD lines. However, it would be short-sighted to only
-refactor in a way that captures this difference: we can anticipate that
-differences will develop between Linux-based distros in future, or
-there may already be differences in tooling that we currently
-work around in less obvious ways.
-
-The high-level plan is to introduce a hierarchy of networking classes
-in ``cloudinit.distros.networking``, which each ``Distro`` subclass
-will reference. These will capture the differences between networking
-on our various distros, while still allowing easy reuse of code between
-distros that share functionality (e.g. most of the Linux networking
-behaviour). ``Distro`` objects will instantiate the networking classes
-at ``self.networking``, so callers will call
-``distro.networking.<func>`` instead of ``cloudinit.net.<func>``; this
-will necessitate access to an instantiated ``Distro`` object.
-
-An implementation note: there may be external consumers of the
-``cloudinit.net`` module. We don't consider this a public API, so we
-will be removing it as part of this refactor. However, we will ensure
-that the new API is complete from its introduction, so that any such
-consumers can move over to it wholesale. (Note, however, that this new
-API is still not considered public or stable, and may not replicate the
-existing API exactly.)
-
-In more detail:
-
-* The root of this hierarchy will be the
- ``cloudinit.distros.networking.Networking`` class. This class will
- have a corresponding method for every ``cloudinit.net`` function that
- we identify to be involved in refactoring. Initially, these methods'
- implementations will simply call the corresponding ``cloudinit.net``
- function. (This gives us the complete API from day one, for existing
- consumers.)
-* As the biggest differentiator in behaviour, the next layer of the
- hierarchy will be two subclasses: ``LinuxNetworking`` and
- ``BSDNetworking``. These will be introduced in the initial PR.
-* When a difference in behaviour for a particular distro is identified,
- a new ``Networking`` subclass will be created. This new class should
- generally subclass either ``LinuxNetworking`` or ``BSDNetworking``.
-* To be clear: ``Networking`` subclasses will only be created when
- needed, we will not create a full hierarchy of per-``Distro``
- subclasses up-front.
-* Each ``Distro`` class will have a class variable
- (``cls.networking_cls``) which points at the appropriate
- networking class (initially this will be either ``LinuxNetworking``
- or ``BSDNetworking``).
-* When ``Distro`` classes are instantiated, they will instantiate
- ``cls.networking_cls`` and store the instance at ``self.networking``.
- (This will be implemented in ``cloudinit.distros.Distro.__init__``.)
-* A helper function will be added which will determine the appropriate
- ``Distro`` subclass for the current system, instantiate it and return
- its ``networking`` attribute. (This is the entry point for existing
- consumers to migrate to.)
-* Callers of refactored functions will change from calling
- ``cloudinit.net.<func>`` to ``distro.networking.<func>``, where
- ``distro`` is an instance of the appropriate ``Distro`` class for
- this system. (This will require making such an instance available to
- callers, which will constitute a large part of the work in this
- project.)
-
-After the initial structure is in place, the work in this refactor will
-consist of replacing the ``cloudinit.net.some_func`` call in each
-``cloudinit.distros.networking.Networking`` method with the actual
-implementation. This can be done incrementally, one function at a
-time:
-
-* pick an unmigrated ``cloudinit.distros.networking.Networking`` method
-* find it in the `the list of bugs tagged net-refactor`_ and assign
- yourself to it (see :ref:`Managing Work/Tracking Progress` below for
- more details)
-* refactor all of its callers to call the ``distro.networking.<func>``
- method on ``Distro`` instead of the ``cloudinit.net.<func>``
- function. (This is likely to be the most time-consuming step, as it
- may require plumbing ``Distro`` objects through to places that
- previously have not consumed them.)
-* refactor its implementation from ``cloudinit.net`` into the
- ``Networking`` hierarchy (e.g. if it has an if/else on BSD, this is
- the time to put the implementations in their respective subclasses)
-
- * if part of the method contains distro-independent logic, then you
- may need to create new methods to capture this distro-specific
- logic; we don't want to replicate common logic in different
- ``Networking`` subclasses
- * if after the refactor, the method on the root ``Networking`` class
- no longer has any implementation, it should be converted to an
- `abstractmethod`_
-
-* ensure that the new implementation has unit tests (either by moving
- existing tests, or by writing new ones)
-* ensure that the new implementation has a docstring
-* add any appropriate type annotations
-
- * note that we must follow the constraints described in the "Type
- Annotations" section above, so you may not be able to write
- complete annotations
- * we have `type aliases`_ defined in ``cloudinit.distros.networking``
- which should be used when applicable
-
-* finally, remove it (and any other now-unused functions) from
- cloudinit.net (to avoid having two parallel implementations)
-
-``cloudinit.net`` Functions/Classes
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-The functions/classes that need refactoring break down into some broad
-categories:
-
-* helpers for accessing ``/sys`` (that should not be on the top-level
- ``Networking`` class as they are Linux-specific):
-
- * ``get_sys_class_path``
- * ``sys_dev_path``
- * ``read_sys_net``
- * ``read_sys_net_safe``
- * ``read_sys_net_int``
-
-* those that directly access ``/sys`` (via helpers) and should (IMO) be
- included in the API of the ``Networking`` class:
-
- * ``generate_fallback_config``
-
- * the ``config_driver`` parameter is used and passed as a boolean,
- so we can change the default value to ``False`` (instead of
- ``None``)
-
- * ``get_ib_interface_hwaddr``
- * ``get_interface_mac``
- * ``interface_has_own_mac``
- * ``is_bond``
- * ``is_bridge``
- * ``is_physical``
- * ``is_renamed``
- * ``is_up``
- * ``is_vlan``
- * ``wait_for_physdevs``
-
-* those that directly access ``/sys`` (via helpers) but may be
- Linux-specific concepts or names:
-
- * ``get_master``
- * ``device_devid``
- * ``device_driver``
-
-* those that directly use ``ip``:
-
- * ``_get_current_rename_info``
-
- * this has non-distro-specific logic so should potentially be
- refactored to use helpers on ``self`` instead of ``ip`` directly
- (rather than being wholesale reimplemented in each of
- ``BSDNetworking`` or ``LinuxNetworking``)
- * we can also remove the ``check_downable`` argument, it's never
- specified so is always ``True``
-
- * ``_rename_interfaces``
-
- * this has several internal helper functions which use ``ip``
- directly, and it calls ``_get_current_rename_info``. That said,
- there appears to be a lot of non-distro-specific logic that could
- live in a function on ``Networking``, so this will require some
- careful refactoring to avoid duplicating that logic in each of
- ``BSDNetworking`` and ``LinuxNetworking``.
- * only the ``renames`` and ``current_info`` parameters are ever
- passed in (and ``current_info`` only by tests), so we can remove
- the others from the definition
-
- * ``EphemeralIPv4Network``
-
- * this is another case where it mixes distro-specific and
- non-specific functionality. Specifically, ``__init__``,
- ``__enter__`` and ``__exit__`` are non-specific, and the
- remaining methods are distro-specific.
- * when refactoring this, the need to track ``cleanup_cmds`` likely
- means that the distro-specific behaviour cannot be captured only
- in the ``Networking`` class. See `this comment in PR #363`_ for
- more thoughts.
-
-* those that implicitly use ``/sys`` via their call dependencies:
-
- * ``master_is_bridge_or_bond``
-
- * appends to ``get_master`` return value, which is a ``/sys`` path
-
- * ``extract_physdevs``
-
- * calls ``device_driver`` and ``device_devid`` in both
- ``_version_*`` impls
-
- * ``apply_network_config_names``
-
- * calls ``extract_physdevs``
- * there is already a ``Distro.apply_network_config_names`` which in
- the default implementation calls this function; this and its BSD
- subclass implementations should be refactored at the same time
- * the ``strict_present`` and ``strict_busy`` parameters are never
- passed, nor are they used in the function definition, so they can
- be removed
-
- * ``get_interfaces``
-
- * calls ``device_driver``, ``device_devid`` amongst others
-
- * ``get_ib_hwaddrs_by_interface``
-
- * calls ``get_interfaces``
-
-* those that may fall into the above categories, but whose use is only
- related to netfailover (which relies on a Linux-specific network
- driver, so is unlikely to be relevant elsewhere without a substantial
- refactor; these probably only need implementing in
- ``LinuxNetworking``):
-
- * ``get_dev_features``
-
- * ``has_netfail_standby_feature``
-
- * calls ``get_dev_features``
-
- * ``is_netfailover``
- * ``is_netfail_master``
-
- * this is called from ``generate_fallback_config``
-
- * ``is_netfail_primary``
- * ``is_netfail_standby``
-
- * N.B. all of these take an optional ``driver`` argument which is
- used to pass around a value to avoid having to look it up by
- calling ``device_driver`` every time. This is something of a leaky
- abstraction, and is better served by caching on ``device_driver``
- or storing the cached value on ``self``, so we can drop the
- parameter from the new API.
-
-* those that use ``/sys`` (via helpers) and have non-exhaustive BSD
- logic:
-
- * ``get_devicelist``
-
-* those that already have separate Linux/BSD implementations:
-
- * ``find_fallback_nic``
- * ``get_interfaces_by_mac``
-
-* those that have no OS-specific functionality (so do not need to be
- refactored):
-
- * ``ParserError``
- * ``RendererNotFoundError``
- * ``has_url_connectivity``
- * ``is_ip_address``
- * ``is_ipv4_address``
- * ``natural_sort_key``
-
-Note that the functions in ``cloudinit.net`` use inconsistent parameter
-names for "string that contains a device name"; we can standardise on
-``devname`` (the most common one) in the refactor.
-
-Managing Work/Tracking Progress
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-To ensure that we won't have multiple people working on the same part
-of the refactor at the same time, there is a bug for each function.
-You can see the current status by looking at `the list of bugs tagged
-net-refactor`_.
-
-When you're working on refactoring a particular method, ensure that you
-have assigned yourself to the corresponding bug, to avoid duplicate
-work.
-
-Generally, when considering what to pick up to refactor, it is best to
-start with functions in ``cloudinit.net`` which are not called by
-anything else in ``cloudinit.net``. This allows you to focus only on
-refactoring that function and its callsites, rather than having to
-update the other ``cloudinit.net`` function also.
-
-References
-~~~~~~~~~~
-
-* `Mina Galić's email the the cloud-init ML in 2018`_ (plus its thread)
-* `Mina Galić's email to the cloud-init ML in 2019`_ (plus its thread)
-* `PR #363`_, the discussion which prompted finally starting this
- refactor (and where a lot of the above details were hashed out)
-
.. _tools/.github-cla-signers: https://github.com/canonical/cloud-init/blob/main/tools/.github-cla-signers
-.. _get_interfaces_by_mac: https://github.com/canonical/cloud-init/blob/961239749106daead88da483e7319e9268c67cde/cloudinit/net/__init__.py#L810-L818
-.. _Mina Galić's email the the cloud-init ML in 2018: https://lists.launchpad.net/cloud-init/msg00185.html
-.. _Mina Galić's email to the cloud-init ML in 2019: https://lists.launchpad.net/cloud-init/msg00237.html
-.. _PR #363: https://github.com/canonical/cloud-init/pull/363
-.. _this comment in PR #363: https://github.com/canonical/cloud-init/pull/363#issuecomment-628829489
-.. _abstractmethod: https://docs.python.org/3/library/abc.html#abc.abstractmethod
-.. _type aliases: https://docs.python.org/3/library/typing.html#type-aliases
-.. _the list of bugs tagged net-refactor: https://bugs.launchpad.net/cloud-init/+bugs?field.tag=net-refactor
diff --git a/ChangeLog b/ChangeLog
index 676264cd..a90a8986 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,199 @@
+22.2
+ - Fix test due to caplog incompatibility (#1461) [Alberto Contreras]
+ - Align rhel custom files with upstream (#1431)
+ [Emanuele Giuseppe Esposito]
+ - cc_write_files: Improve schema. (#1460) [Alberto Contreras]
+ - cli: Redact files with permission errors in commands (#1440)
+ [Alberto Contreras] (LP: #1953430)
+ - Improve cc_set_passwords. (#1456) [Alberto Contreras]
+ - testing: make fake cloud-init wait actually wait (#1459)
+ - Scaleway: Fix network configuration for netplan 0.102 and later (#1455)
+ [Maxime Corbin]
+ - Fix 'ephmeral' typos in disk names(#1452) [Mike Hucka]
+ - schema: version schema-cloud-config-v1.json (#1424)
+ - cc_modules: set default meta frequency value when no config available
+ (#1457)
+ - Log generic warning on non-systemd systems. (#1450) [Alberto Contreras]
+ - cc_snap.maybe_install_squashfuse no longer needed in Bionic++. (#1448)
+ [Alberto Contreras]
+ - Drop support of *-sk keys in cc_ssh (#1451) [Alberto Contreras]
+ - testing: Fix console_log tests (#1437)
+ - tests: cc_set_passoword update for systemd, non-systemd distros (#1449)
+ - Fix bug in url_helper/dual_stack() logging (#1426)
+ - schema: render schema paths from _CustomSafeLoaderWithMarks (#1391)
+ (GH: SC-929)
+ - testing: Make integration tests kinetic friendly (#1441)
+ - Handle error if SSH service no present. (#1422)
+ [Alberto Contreras] (GH: #1969526)
+ - Fix network-manager activator availability and order (#1438)
+ - sources/azure: remove reprovisioning marker (#1414) [Chris Patterson]
+ - upstart: drop vestigial support for upstart (#1421)
+ - testing: Ensure NoCloud detected in test (#1439)
+ - Update .github-cla-signers kallioli [Kevin Allioli]
+ - Consistently strip top-level network key (#1417) (GH: #1906187)
+ - testing: Fix LXD VM metadata test (#1430)
+ - testing: Add NoCloud setup for NoCloud test (#1425)
+ - Update linters and adapt code for compatibility (#1434) [Paride Legovini]
+ - run-container: add support for LXD VMs (#1428) [Paride Legovini]
+ - integration-reqs: bump pycloudlib pinned commit (#1427) [Paride Legovini]
+ - Fix NoCloud docs (#1423)
+ - Docs fixes (#1406)
+ - docs: Add docs for module creation (#1415)
+ - Remove cheetah from templater (#1416)
+ - tests: verify_ordered_items fallback to re.escape if needed (#1420)
+ - Misc module cleanup (#1418)
+ - docs: Fix doc warnings and enable errors (#1419)
+ [Alberto Contreras] (GH: #1876341)
+ - Refactor cloudinit.sources.NetworkConfigSource to enum (#1413)
+ [Alberto Contreras] (GH: #1874875)
+ - Don't fail if IB and Ethernet devices 'collide' (#1411)
+ - Use cc_* module meta defintion over hardcoded vars (SC-888) (#1385)
+ - Fix cc_rsyslog.py initialization (#1404) [Alberto Contreras]
+ - Promote cloud-init schema from devel to top level subcommand (#1402)
+ - mypy: disable missing imports warning for httpretty (#1412)
+ [Chris Patterson]
+ - users: error when home should not be created AND ssh keys provided
+ [Jeffrey 'jf' Lim]
+ - Allow growpart to resize encrypted partitions (#1316)
+ - Fix typo in integration_test.rst (#1405) [Alberto Contreras]
+ - cloudinit.net refactor: apply_network_config_names (#1388)
+ [Alberto Contreras] (GH: #1884602)
+ - tests/azure: add fixtures for hardcoded paths (markers and data_dir)
+ (#1399) [Chris Patterson]
+ - testing: Add responses workaround for focal/impish (#1403)
+ - cc_ssh_import_id: fix is_key_in_nested_dict to avoid early False
+ - Fix ds-identify not detecting NoCloud seed in config (#1381)
+ (GH: #1876375)
+ - sources/azure: retry dhcp for failed processes (#1401) [Chris Patterson]
+ - Move notes about refactorization out of CONTRIBUTING.rst (#1389)
+ - Shave ~8ms off generator runtime (#1387)
+ - Fix provisioning dhcp timeout to 20 minutes (#1394) [Chris Patterson]
+ - schema: module example strict testing fix seed_random
+ - cc_set_hostname: examples small typo (perserve vs preserve)
+ [Wouter Schoot]
+ - sources/azure: refactor http_with_retries to remove **kwargs (#1392)
+ [Chris Patterson]
+ - declare dependency on ssh-import-id (#1334)
+ - drop references to old dependencies and old centos script
+ - sources/azure: only wait for primary nic to be attached during restore
+ (#1378) [Anh Vo]
+ - cc_ntp: migrated legacy schema to cloud-init-schema.json (#1384)
+ (GH: SC-803)
+ - Network functions refactor and bugfixes (#1383)
+ - schema: add JSON defs for modules cc_users_groups (#1379)
+ (GH: SC-928, SC-846, SC-897, #1858930)
+ - Fix doc typo (#1382) [Alberto Contreras]
+ - Add support for dual stack IPv6/IPv4 IMDS to Ec2 (#1160)
+ - Fix KeyError when rendering sysconfig IPv6 routes (#1380) (GH: #1958506)
+ - Return a namedtuple from subp() (#1376)
+ - Mypy stubs and other tox maintenance (SC-920) (#1374)
+ - Distro Compatibility Fixes (#1375)
+ - Pull in Gentoo patches (#1372)
+ - schema: add json defs for modules U-Z (#1360)
+ (GH: #1858928, #1858929, #1858931, #1858932)
+ - util: atomically update sym links to avoid Suppress FileNotFoundError
+ when reading status (#1298) [Adam Collard] (GH: LP:1962150)
+ - schema: add json defs for modules scripts-timezone (SC-801) (#1365)
+ - docs: Add first tutorial (SC-900) (#1368)
+ - BUG 1473527: module ssh-authkey-fingerprints fails Input/output error…
+ (#1340) [Andrew Lee] (GH: #1473527)
+ - add arch hosts template (#1371)
+ - ds-identify: detect LXD for VMs launched from host with > 5.10 kernel
+ (#1370) (GH: #1968085)
+ - Support EC2 tags in instance metadata (#1309) [Eduardo Dobay]
+ - schema: add json defs for modules e-install (SC-651) (#1366)
+ - Improve "(no_create_home|system): true" test (#1367) [Jeffrey 'jf' Lim]
+ - Expose https_proxy env variable to ssh-import-id cmd (#1333)
+ [Michael Rommel]
+ - sources/azure: remove bind/unbind logic for hot attached nic (#1332)
+ [Chris Patterson]
+ - tox: add types-* packages to check_format env (#1362)
+ - tests: python 3.10 is showing up in cloudimages (#1364)
+ - testing: add additional mocks to test_net tests (#1356) [yangzz-97]
+ - schema: add JSON schema for mcollective, migrator and mounts modules
+ (#1358)
+ - Honor system locale for RHEL (#1355) [Wei Shi]
+ - doc: Fix typo in cloud-config-run-cmds.txt example (#1359) [Ali Shirvani]
+ - ds-identify: also discover LXD by presence from DMI board_name = LXD
+ (#1311)
+ - black: bump pinned version to 22.3.0 to avoid click dependency issues
+ (#1357)
+ - Various doc fixes (#1330)
+ - testing: Add missing is_FreeBSD mock to networking test (#1353)
+ - Add --no-update to add-apt-repostory call (SC-880) (#1337)
+ - schema: add json defs for modules K-L (#1321)
+ (GH: #1858899, #1858900, #1858901, #1858902)
+ - docs: Re-order readthedocs install (#1354)
+ - Stop cc_ssh_authkey_fingerprints from ALWAYS creating home (#1343)
+ [Jeffrey 'jf' Lim]
+ - docs: add jinja2 pin (#1352)
+ - Vultr: Use find_candidate_nics, use ipv6 dns (#1344) [eb3095]
+ - sources/azure: move get_ip_from_lease_value out of shim (#1324)
+ [Chris Patterson]
+ - Fix cloud-init status --wait when no datasource found (#1349)
+ (GH: #1966085)
+ - schema: add JSON defs for modules resize-salt (SC-654) (#1341)
+ - Add myself as a future contributor (#1345) [Neal Gompa (ニール・ゴンパ)]
+ - Update .github-cla-signers (#1342) [Jeffrey 'jf' Lim]
+ - add Requires=cloud-init-hotplugd.socket in cloud-init-hotplugd.service
+ file (#1335) [yangzz-97]
+ - Fix sysconfig render when set-name is missing (#1327)
+ [Andrew Kutz] (GH: #1855945)
+ - Refactoring helper funcs out of NetworkState (#1336) [Andrew Kutz]
+ - url_helper: add tuple support for readurl timeout (#1328)
+ [Chris Patterson]
+ - Make fs labels match for ds-identify and docs (#1329)
+ - Work around bug in LXD VM detection (#1325)
+ - Remove redundant generator logs (#1318)
+ - tox: set verbose flags for integration tests (#1323) [Chris Patterson]
+ - net: introduce find_candidate_nics() (#1313) [Chris Patterson]
+ - Revert "Ensure system_cfg read before ds net config on Oracle (#1174)"
+ (#1326)
+ - Add vendor_data2 support for ConfigDrive source (#1307) [cvstealth]
+ - Make VMWare data source test host independent and expand testing (#1308)
+ [Robert Schweikert]
+ - Add json schemas for modules starting with P
+ - sources/azure: remove lease file parsing (#1302) [Chris Patterson]
+ - remove flaky test from ci (#1322)
+ - ci: Switch to python 3.10 in Travis CI (#1320)
+ - Better interface handling for Vultr, expect unexpected DHCP servers
+ (#1297) [eb3095]
+ - Remove unused init local artifact (#1315)
+ - Doc cleanups (#1317)
+ - docs improvements (#1312)
+ - add support for jinja do statements, add unit test (#1314)
+ [Paul Bruno] (GH: #1962759)
+ - sources/azure: prevent tight loops for DHCP retries (#1285)
+ [Chris Patterson]
+ - net/dhcp: surface type of DHCP lease failure to caller (#1276)
+ [Chris Patterson]
+ - Stop hardcoding systemctl location (#1278) [Robert Schweikert]
+ - Remove python2 syntax from docs (#1310)
+ - [tools/migrate-lp-user-to-github] Rename master branch to main (#1301)
+ [Adam Collard]
+ - redhat: Depend on "hostname" package (#1288) [Lubomir Rintel]
+ - Add native NetworkManager support (#1224) [Lubomir Rintel]
+ - Fix link in CLA check to point to contribution guide. (#1299)
+ [Adam Collard]
+ - check for existing symlink while force creating symlink (#1281)
+ [Shreenidhi Shedi]
+ - Do not silently ignore integer uid (#1280) (GH: #1875772)
+ - tests: create a IPv4/IPv6 VPC in Ec2 integration tests (#1291)
+ - Integration test fix ppa (#1296)
+ - tests: on official EC2. cloud-id actually startswith aws not ec2 (#1289)
+ - test_ppa_source: accept both http and https URLs (#1292)
+ [Paride Legovini]
+ - Fix apt test on azure
+ - add "lkundrak" as contributor [Lubomir Rintel]
+ - Holmanb/integration test fix ppa (#1287)
+ - Include missing subcommand in manpage (#1279)
+ - Clean up artifacts from pytest, packaging, release with make clean
+ (#1277)
+ - sources/azure: ensure retries on IMDS request failure (#1271)
+ [Chris Patterson]
+ - sources/azure: removed unused savable PPS paths (#1268) [Chris Patterson]
+ - integration tests: fix Azure failures (#1269)
+
22.1
- sources/azure: report ready in local phase (#1265) [Chris Patterson]
- sources/azure: validate IMDS network configuration metadata (#1257)
diff --git a/MANIFEST.in b/MANIFEST.in
index 57a85ea7..c2d2b5dd 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -10,7 +10,6 @@ graft templates
graft tests
graft tools
graft udev
-graft upstart
prune build
prune dist
prune .tox
diff --git a/Makefile b/Makefile
index 5f0c5b1b..9584ccc1 100644
--- a/Makefile
+++ b/Makefile
@@ -1,4 +1,5 @@
CWD=$(shell pwd)
+VARIANT ?= ubuntu
YAML_FILES=$(shell find cloudinit tests tools -name "*.yaml" -type f )
YAML_FILES+=$(shell find doc/examples -name "cloud-config*.txt" -type f )
@@ -6,12 +7,17 @@ YAML_FILES+=$(shell find doc/examples -name "cloud-config*.txt" -type f )
PYTHON = python3
PIP_INSTALL := pip3 install
+NUM_ITER ?= 100
+
ifeq ($(distro),)
distro = redhat
endif
READ_VERSION=$(shell $(PYTHON) $(CWD)/tools/read-version || echo read-version-failed)
CODE_VERSION=$(shell $(PYTHON) -c "from cloudinit import version; print(version.version_string())")
+GENERATOR_F=./systemd/cloud-init-generator
+DS_IDENTIFY=./tools/ds-identify
+BENCHMARK=./tools/benchmark.sh
all: check
@@ -26,6 +32,24 @@ flake8:
unittest: clean_pyc
python3 -m pytest -v tests/unittests cloudinit
+render-template:
+ $(PYTHON) ./tools/render-cloudcfg --variant=$(VARIANT) $(FILE) $(subst .tmpl,,$(FILE))
+
+# from systemd-generator(7) regarding generators:
+# "We do recommend C code however, since generators are executed
+# synchronously and hence delay the entire boot if they are slow."
+#
+# Our generator is a shell script. Make it easy to measure the
+# generator. This should be monitored for performance regressions
+benchmark-generator: FILE=$(GENERATOR_F).tmpl
+benchmark-generator: export ITER=$(NUM_ITER)
+benchmark-generator: render-template
+ $(BENCHMARK) $(GENERATOR_F)
+
+benchmark-ds-identify: export ITER=$(NUM_ITER)
+benchmark-ds-identify:
+ $(BENCHMARK) $(DS_IDENTIFY)
+
ci-deps-ubuntu:
@$(PYTHON) $(CWD)/tools/read-dependencies --distro ubuntu --test-distro
@@ -44,9 +68,10 @@ test: unittest
check_version:
@if [ "$(READ_VERSION)" != "$(CODE_VERSION)" ]; then \
- echo "Error: read-version version '$(READ_VERSION)'" \
- "not equal to code version '$(CODE_VERSION)'"; exit 2; \
- else true; fi
+ echo "Error: read-version version '$(READ_VERSION)'" \
+ "not equal to code version '$(CODE_VERSION)'"; \
+ exit 2; \
+ else true; fi
config/cloud.cfg:
$(PYTHON) ./tools/render-cloudcfg config/cloud.cfg.tmpl config/cloud.cfg
@@ -76,7 +101,7 @@ clean_release:
rm -rf new-upstream-changes.txt commit.msg
clean: clean_pyc clean_pytest clean_packaging clean_release
- rm -rf doc/rtd_html .tox .coverage tags
+ rm -rf doc/rtd_html .tox .coverage tags $(GENERATOR_F)
yaml:
@$(PYTHON) $(CWD)/tools/validate-yaml.py $(YAML_FILES)
@@ -90,14 +115,14 @@ srpm:
deb:
@which debuild || \
{ echo "Missing devscripts dependency. Install with:"; \
- echo sudo apt-get install devscripts; exit 1; }
+ echo sudo apt-get install devscripts; exit 1; }
$(PYTHON) ./packages/bddeb
deb-src:
@which debuild || \
{ echo "Missing devscripts dependency. Install with:"; \
- echo sudo apt-get install devscripts; exit 1; }
+ echo sudo apt-get install devscripts; exit 1; }
$(PYTHON) ./packages/bddeb -S -d
doc:
@@ -108,7 +133,8 @@ _CHECK_SPELLING := find doc -type f -exec spellintian {} + | \
grep -v -e 'doc/rtd/topics/cli.rst: modules modules' \
-e 'doc/examples/cloud-config-mcollective.txt: WARNING WARNING' \
-e 'doc/examples/cloud-config-power-state.txt: Bye Bye' \
- -e 'doc/examples/cloud-config.txt: Bye Bye'
+ -e 'doc/examples/cloud-config.txt: Bye Bye' \
+ -e 'doc/rtd/topics/cli.rst: DOCS DOCS'
# For CI we require a failing return code when spellintian finds spelling errors
@@ -140,7 +166,7 @@ fix_spelling:
awk -F ': | -> ' '{printf "sed -i \047s/%s/%s/g\047 %s\n", $$2, $$3, $$1}' | \
sh
-.PHONY: test flake8 clean rpm srpm deb deb-src yaml
+.PHONY: all check test flake8 clean rpm srpm deb deb-src yaml
.PHONY: check_version pip-test-requirements pip-requirements clean_pyc
-.PHONY: unittest style-check doc fix_spelling
-.PHONY: clean_pytest clean_packaging check_spelling clean_release
+.PHONY: unittest style-check fix_spelling render-template benchmark-generator
+.PHONY: clean_pytest clean_packaging check_spelling clean_release doc
diff --git a/WIP-ONGOING-REFACTORIZATION.rst b/WIP-ONGOING-REFACTORIZATION.rst
new file mode 100644
index 00000000..f401af4e
--- /dev/null
+++ b/WIP-ONGOING-REFACTORIZATION.rst
@@ -0,0 +1,313 @@
+Ongoing Refactors
+=================
+
+This captures ongoing refactoring projects in the codebase. This is
+intended as documentation for developers involved in the refactoring,
+but also for other developers who may interact with the code being
+refactored in the meantime.
+
+``cloudinit.net`` -> ``cloudinit.distros.networking`` Hierarchy
+---------------------------------------------------------------
+
+``cloudinit.net`` was imported from the curtin codebase as a chunk, and
+then modified enough that it integrated with the rest of the cloud-init
+codebase. Over the ~4 years since, the fact that it is not fully
+integrated into the ``Distro`` hierarchy has caused several issues.
+
+The common pattern of these problems is that the commands used for
+networking are different across distributions and operating systems.
+This has lead to ``cloudinit.net`` developing its own "distro
+determination" logic: `get_interfaces_by_mac`_ is probably the clearest
+example of this. Currently, these differences are primarily split
+along Linux/BSD lines. However, it would be short-sighted to only
+refactor in a way that captures this difference: we can anticipate that
+differences will develop between Linux-based distros in future, or
+there may already be differences in tooling that we currently
+work around in less obvious ways.
+
+The high-level plan is to introduce a hierarchy of networking classes
+in ``cloudinit.distros.networking``, which each ``Distro`` subclass
+will reference. These will capture the differences between networking
+on our various distros, while still allowing easy reuse of code between
+distros that share functionality (e.g. most of the Linux networking
+behaviour). ``Distro`` objects will instantiate the networking classes
+at ``self.networking``, so callers will call
+``distro.networking.<func>`` instead of ``cloudinit.net.<func>``; this
+will necessitate access to an instantiated ``Distro`` object.
+
+An implementation note: there may be external consumers of the
+``cloudinit.net`` module. We don't consider this a public API, so we
+will be removing it as part of this refactor. However, we will ensure
+that the new API is complete from its introduction, so that any such
+consumers can move over to it wholesale. (Note, however, that this new
+API is still not considered public or stable, and may not replicate the
+existing API exactly.)
+
+In more detail:
+
+* The root of this hierarchy will be the
+ ``cloudinit.distros.networking.Networking`` class. This class will
+ have a corresponding method for every ``cloudinit.net`` function that
+ we identify to be involved in refactoring. Initially, these methods'
+ implementations will simply call the corresponding ``cloudinit.net``
+ function. (This gives us the complete API from day one, for existing
+ consumers.)
+* As the biggest differentiator in behaviour, the next layer of the
+ hierarchy will be two subclasses: ``LinuxNetworking`` and
+ ``BSDNetworking``. These will be introduced in the initial PR.
+* When a difference in behaviour for a particular distro is identified,
+ a new ``Networking`` subclass will be created. This new class should
+ generally subclass either ``LinuxNetworking`` or ``BSDNetworking``.
+* To be clear: ``Networking`` subclasses will only be created when
+ needed, we will not create a full hierarchy of per-``Distro``
+ subclasses up-front.
+* Each ``Distro`` class will have a class variable
+ (``cls.networking_cls``) which points at the appropriate
+ networking class (initially this will be either ``LinuxNetworking``
+ or ``BSDNetworking``).
+* When ``Distro`` classes are instantiated, they will instantiate
+ ``cls.networking_cls`` and store the instance at ``self.networking``.
+ (This will be implemented in ``cloudinit.distros.Distro.__init__``.)
+* A helper function will be added which will determine the appropriate
+ ``Distro`` subclass for the current system, instantiate it and return
+ its ``networking`` attribute. (This is the entry point for existing
+ consumers to migrate to.)
+* Callers of refactored functions will change from calling
+ ``cloudinit.net.<func>`` to ``distro.networking.<func>``, where
+ ``distro`` is an instance of the appropriate ``Distro`` class for
+ this system. (This will require making such an instance available to
+ callers, which will constitute a large part of the work in this
+ project.)
+
+After the initial structure is in place, the work in this refactor will
+consist of replacing the ``cloudinit.net.some_func`` call in each
+``cloudinit.distros.networking.Networking`` method with the actual
+implementation. This can be done incrementally, one function at a
+time:
+
+* pick an unmigrated ``cloudinit.distros.networking.Networking`` method
+* find it in the `the list of bugs tagged net-refactor`_ and assign
+ yourself to it (see :ref:`Managing Work/Tracking Progress` below for
+ more details)
+* refactor all of its callers to call the ``distro.networking.<func>``
+ method on ``Distro`` instead of the ``cloudinit.net.<func>``
+ function. (This is likely to be the most time-consuming step, as it
+ may require plumbing ``Distro`` objects through to places that
+ previously have not consumed them.)
+* refactor its implementation from ``cloudinit.net`` into the
+ ``Networking`` hierarchy (e.g. if it has an if/else on BSD, this is
+ the time to put the implementations in their respective subclasses)
+
+ * if part of the method contains distro-independent logic, then you
+ may need to create new methods to capture this distro-specific
+ logic; we don't want to replicate common logic in different
+ ``Networking`` subclasses
+ * if after the refactor, the method on the root ``Networking`` class
+ no longer has any implementation, it should be converted to an
+ `abstractmethod`_
+
+* ensure that the new implementation has unit tests (either by moving
+ existing tests, or by writing new ones)
+* ensure that the new implementation has a docstring
+* add any appropriate type annotations
+
+ * note that we must follow the constraints described in the "Type
+ Annotations" section above, so you may not be able to write
+ complete annotations
+ * we have `type aliases`_ defined in ``cloudinit.distros.networking``
+ which should be used when applicable
+
+* finally, remove it (and any other now-unused functions) from
+ cloudinit.net (to avoid having two parallel implementations)
+
+``cloudinit.net`` Functions/Classes
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The functions/classes that need refactoring break down into some broad
+categories:
+
+* helpers for accessing ``/sys`` (that should not be on the top-level
+ ``Networking`` class as they are Linux-specific):
+
+ * ``get_sys_class_path``
+ * ``sys_dev_path``
+ * ``read_sys_net``
+ * ``read_sys_net_safe``
+ * ``read_sys_net_int``
+
+* those that directly access ``/sys`` (via helpers) and should (IMO) be
+ included in the API of the ``Networking`` class:
+
+ * ``generate_fallback_config``
+
+ * the ``config_driver`` parameter is used and passed as a boolean,
+ so we can change the default value to ``False`` (instead of
+ ``None``)
+
+ * ``get_ib_interface_hwaddr``
+ * ``get_interface_mac``
+ * ``interface_has_own_mac``
+ * ``is_bond``
+ * ``is_bridge``
+ * ``is_physical``
+ * ``is_renamed``
+ * ``is_up``
+ * ``is_vlan``
+ * ``wait_for_physdevs``
+
+* those that directly access ``/sys`` (via helpers) but may be
+ Linux-specific concepts or names:
+
+ * ``get_master``
+ * ``device_devid``
+ * ``device_driver``
+
+* those that directly use ``ip``:
+
+ * ``_get_current_rename_info``
+
+ * this has non-distro-specific logic so should potentially be
+ refactored to use helpers on ``self`` instead of ``ip`` directly
+ (rather than being wholesale reimplemented in each of
+ ``BSDNetworking`` or ``LinuxNetworking``)
+ * we can also remove the ``check_downable`` argument, it's never
+ specified so is always ``True``
+
+ * ``_rename_interfaces``
+
+ * this has several internal helper functions which use ``ip``
+ directly, and it calls ``_get_current_rename_info``. That said,
+ there appears to be a lot of non-distro-specific logic that could
+ live in a function on ``Networking``, so this will require some
+ careful refactoring to avoid duplicating that logic in each of
+ ``BSDNetworking`` and ``LinuxNetworking``.
+ * only the ``renames`` and ``current_info`` parameters are ever
+ passed in (and ``current_info`` only by tests), so we can remove
+ the others from the definition
+
+ * ``EphemeralIPv4Network``
+
+ * this is another case where it mixes distro-specific and
+ non-specific functionality. Specifically, ``__init__``,
+ ``__enter__`` and ``__exit__`` are non-specific, and the
+ remaining methods are distro-specific.
+ * when refactoring this, the need to track ``cleanup_cmds`` likely
+ means that the distro-specific behaviour cannot be captured only
+ in the ``Networking`` class. See `this comment in PR #363`_ for
+ more thoughts.
+
+* those that implicitly use ``/sys`` via their call dependencies:
+
+ * ``master_is_bridge_or_bond``
+
+ * appends to ``get_master`` return value, which is a ``/sys`` path
+
+ * ``extract_physdevs``
+
+ * calls ``device_driver`` and ``device_devid`` in both
+ ``_version_*`` impls
+
+ * ``apply_network_config_names``
+
+ * calls ``extract_physdevs``
+ * there is already a ``Distro.apply_network_config_names`` which in
+ the default implementation calls this function; this and its BSD
+ subclass implementations should be refactored at the same time
+ * the ``strict_present`` and ``strict_busy`` parameters are never
+ passed, nor are they used in the function definition, so they can
+ be removed
+
+ * ``get_interfaces``
+
+ * calls ``device_driver``, ``device_devid`` amongst others
+
+ * ``get_ib_hwaddrs_by_interface``
+
+ * calls ``get_interfaces``
+
+* those that may fall into the above categories, but whose use is only
+ related to netfailover (which relies on a Linux-specific network
+ driver, so is unlikely to be relevant elsewhere without a substantial
+ refactor; these probably only need implementing in
+ ``LinuxNetworking``):
+
+ * ``get_dev_features``
+
+ * ``has_netfail_standby_feature``
+
+ * calls ``get_dev_features``
+
+ * ``is_netfailover``
+ * ``is_netfail_master``
+
+ * this is called from ``generate_fallback_config``
+
+ * ``is_netfail_primary``
+ * ``is_netfail_standby``
+
+ * N.B. all of these take an optional ``driver`` argument which is
+ used to pass around a value to avoid having to look it up by
+ calling ``device_driver`` every time. This is something of a leaky
+ abstraction, and is better served by caching on ``device_driver``
+ or storing the cached value on ``self``, so we can drop the
+ parameter from the new API.
+
+* those that use ``/sys`` (via helpers) and have non-exhaustive BSD
+ logic:
+
+ * ``get_devicelist``
+
+* those that already have separate Linux/BSD implementations:
+
+ * ``find_fallback_nic``
+ * ``get_interfaces_by_mac``
+
+* those that have no OS-specific functionality (so do not need to be
+ refactored):
+
+ * ``ParserError``
+ * ``RendererNotFoundError``
+ * ``has_url_connectivity``
+ * ``is_ip_address``
+ * ``is_ipv4_address``
+ * ``natural_sort_key``
+
+Note that the functions in ``cloudinit.net`` use inconsistent parameter
+names for "string that contains a device name"; we can standardise on
+``devname`` (the most common one) in the refactor.
+
+Managing Work/Tracking Progress
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+To ensure that we won't have multiple people working on the same part
+of the refactor at the same time, there is a bug for each function.
+You can see the current status by looking at `the list of bugs tagged
+net-refactor`_.
+
+When you're working on refactoring a particular method, ensure that you
+have assigned yourself to the corresponding bug, to avoid duplicate
+work.
+
+Generally, when considering what to pick up to refactor, it is best to
+start with functions in ``cloudinit.net`` which are not called by
+anything else in ``cloudinit.net``. This allows you to focus only on
+refactoring that function and its callsites, rather than having to
+update the other ``cloudinit.net`` function also.
+
+References
+~~~~~~~~~~
+
+* `Mina Galić's email the the cloud-init ML in 2018`_ (plus its thread)
+* `Mina Galić's email to the cloud-init ML in 2019`_ (plus its thread)
+* `PR #363`_, the discussion which prompted finally starting this
+ refactor (and where a lot of the above details were hashed out)
+
+.. _get_interfaces_by_mac: https://github.com/canonical/cloud-init/blob/961239749106daead88da483e7319e9268c67cde/cloudinit/net/__init__.py#L810-L818
+.. _Mina Galić's email the the cloud-init ML in 2018: https://lists.launchpad.net/cloud-init/msg00185.html
+.. _Mina Galić's email to the cloud-init ML in 2019: https://lists.launchpad.net/cloud-init/msg00237.html
+.. _PR #363: https://github.com/canonical/cloud-init/pull/363
+.. _this comment in PR #363: https://github.com/canonical/cloud-init/pull/363#issuecomment-628829489
+.. _abstractmethod: https://docs.python.org/3/library/abc.html#abc.abstractmethod
+.. _type aliases: https://docs.python.org/3/library/typing.html#type-aliases
+.. _the list of bugs tagged net-refactor: https://bugs.launchpad.net/cloud-init/+bugs?field.tag=net-refactor
+
diff --git a/bash_completion/cloud-init b/bash_completion/cloud-init
index b9f137b1..1eceb472 100644
--- a/bash_completion/cloud-init
+++ b/bash_completion/cloud-init
@@ -45,6 +45,9 @@ _cloudinit_complete()
query)
COMPREPLY=($(compgen -W "--all --help --instance-data --list-keys --user-data --vendor-data --debug" -- $cur_word));;
+ schema)
+ COMPREPLY=($(compgen -W "--help --config-file --docs --annotate --system" -- $cur_word))
+ ;;
single)
COMPREPLY=($(compgen -W "--help --name --frequency --report" -- $cur_word))
;;
@@ -72,9 +75,6 @@ _cloudinit_complete()
;;
render)
COMPREPLY=($(compgen -W "--help --instance-data --debug" -- $cur_word));;
- schema)
- COMPREPLY=($(compgen -W "--help --config-file --doc --annotate" -- $cur_word))
- ;;
show)
COMPREPLY=($(compgen -W "--help --format --infile --outfile" -- $cur_word))
;;
diff --git a/cloudinit/analyze/show.py b/cloudinit/analyze/show.py
index 5fd9cdfd..abfa0913 100644
--- a/cloudinit/analyze/show.py
+++ b/cloudinit/analyze/show.py
@@ -139,7 +139,7 @@ class SystemctlReader(object):
def __init__(self, property, parameter=None):
self.epoch = None
- self.args = ["/bin/systemctl", "show"]
+ self.args = [subp.which("systemctl"), "show"]
if parameter:
self.args.append(parameter)
self.args.extend(["-p", property])
@@ -188,7 +188,7 @@ class SystemctlReader(object):
def dist_check_timestamp():
"""
Determine which init system a particular linux distro is using.
- Each init system (systemd, upstart, etc) has a different way of
+ Each init system (systemd, etc) has a different way of
providing timestamps.
:return: timestamps of kernelboot, kernelendboot, and cloud-initstart
diff --git a/cloudinit/cloud.py b/cloudinit/cloud.py
index 91e48103..cbc5d0db 100644
--- a/cloudinit/cloud.py
+++ b/cloudinit/cloud.py
@@ -6,9 +6,13 @@
import copy
import os
+from typing import Optional
from cloudinit import log as logging
+from cloudinit.distros import Distro
+from cloudinit.helpers import Paths, Runners
from cloudinit.reporting import events
+from cloudinit.sources import DataSource
LOG = logging.getLogger(__name__)
@@ -25,7 +29,15 @@ LOG = logging.getLogger(__name__)
class Cloud(object):
- def __init__(self, datasource, paths, cfg, distro, runners, reporter=None):
+ def __init__(
+ self,
+ datasource: DataSource,
+ paths: Paths,
+ cfg: dict,
+ distro: Distro,
+ runners: Runners,
+ reporter: Optional[events.ReportEventStack] = None,
+ ):
self.datasource = datasource
self.paths = paths
self.distro = distro
diff --git a/cloudinit/cmd/clean.py b/cloudinit/cmd/clean.py
index 0e1db118..1a017608 100644..100755
--- a/cloudinit/cmd/clean.py
+++ b/cloudinit/cmd/clean.py
@@ -1,3 +1,5 @@
+#!/usr/bin/env python3
+
# Copyright (C) 2017 Canonical Ltd.
#
# This file is part of cloud-init. See LICENSE file for license information.
@@ -120,5 +122,3 @@ def main():
if __name__ == "__main__":
main()
-
-# vi: ts=4 expandtab
diff --git a/cloudinit/cmd/cloud_id.py b/cloudinit/cmd/cloud_id.py
index b9c30fb4..34160f8c 100755
--- a/cloudinit/cmd/cloud_id.py
+++ b/cloudinit/cmd/cloud_id.py
@@ -1,3 +1,5 @@
+#!/usr/bin/env python3
+
# This file is part of cloud-init. See LICENSE file for license information.
"""Commandline utility to list the canonical cloud-id for an instance."""
@@ -111,5 +113,3 @@ def main():
if __name__ == "__main__":
main()
-
-# vi: ts=4 expandtab
diff --git a/cloudinit/cmd/devel/__init__.py b/cloudinit/cmd/devel/__init__.py
index ead5f7a9..9a8f2ebd 100644..100755
--- a/cloudinit/cmd/devel/__init__.py
+++ b/cloudinit/cmd/devel/__init__.py
@@ -6,6 +6,7 @@
import logging
from cloudinit import log
+from cloudinit.helpers import Paths
from cloudinit.stages import Init
@@ -16,7 +17,7 @@ def addLogHandlerCLI(logger, log_level):
return logger
-def read_cfg_paths():
+def read_cfg_paths() -> Paths:
"""Return a Paths object based on the system configuration on disk."""
init = Init(ds_deps=[])
init.read_cfg()
diff --git a/cloudinit/cmd/devel/hotplug_hook.py b/cloudinit/cmd/devel/hotplug_hook.py
index a9be0379..29439911 100644..100755
--- a/cloudinit/cmd/devel/hotplug_hook.py
+++ b/cloudinit/cmd/devel/hotplug_hook.py
@@ -1,5 +1,7 @@
+#!/usr/bin/env python3
+
# This file is part of cloud-init. See LICENSE file for license information.
-"""Handle reconfiguration on hotplug events"""
+"""Handle reconfiguration on hotplug events."""
import abc
import argparse
import os
@@ -11,8 +13,7 @@ from cloudinit.event import EventScope, EventType
from cloudinit.net import activators, read_sys_net_safe
from cloudinit.net.network_state import parse_net_config_data
from cloudinit.reporting import events
-from cloudinit.sources import DataSource # noqa: F401
-from cloudinit.sources import DataSourceNotFoundException
+from cloudinit.sources import DataSource, DataSourceNotFoundException
from cloudinit.stages import Init
LOG = log.getLogger(__name__)
@@ -45,24 +46,24 @@ def get_parser(parser=None):
subparsers.required = True
subparsers.add_parser(
- "query", help="query if hotplug is enabled for given subsystem"
+ "query", help="Query if hotplug is enabled for given subsystem."
)
parser_handle = subparsers.add_parser(
- "handle", help="handle the hotplug event"
+ "handle", help="Handle the hotplug event."
)
parser_handle.add_argument(
"-d",
"--devpath",
required=True,
metavar="PATH",
- help="sysfs path to hotplugged device",
+ help="Sysfs path to hotplugged device",
)
parser_handle.add_argument(
"-u",
"--udevaction",
required=True,
- help="action to take",
+ help="Specify action to take.",
choices=["add", "remove"],
)
@@ -72,7 +73,7 @@ def get_parser(parser=None):
class UeventHandler(abc.ABC):
def __init__(self, id, datasource, devpath, action, success_fn):
self.id = id
- self.datasource = datasource # type: DataSource
+ self.datasource: DataSource = datasource
self.devpath = devpath
self.action = action
self.success_fn = success_fn
@@ -208,6 +209,7 @@ def handle_hotplug(hotplug_init: Init, devpath, subsystem, udevaction):
success_fn=hotplug_init._write_to_cache,
) # type: UeventHandler
wait_times = [1, 3, 5, 10, 30]
+ last_exception = Exception("Bug while processing hotplug event.")
for attempt, wait in enumerate(wait_times):
LOG.debug(
"subsystem=%s update attempt %s/%s",
@@ -230,7 +232,7 @@ def handle_hotplug(hotplug_init: Init, devpath, subsystem, udevaction):
time.sleep(wait)
last_exception = e
else:
- raise last_exception # type: ignore
+ raise last_exception
def handle_args(name, args):
diff --git a/cloudinit/cmd/devel/logs.py b/cloudinit/cmd/devel/logs.py
index d54b809a..fbe8c500 100644..100755
--- a/cloudinit/cmd/devel/logs.py
+++ b/cloudinit/cmd/devel/logs.py
@@ -1,3 +1,5 @@
+#!/usr/bin/env python3
+
# Copyright (C) 2017 Canonical Ltd.
#
# This file is part of cloud-init. See LICENSE file for license information.
@@ -189,5 +191,3 @@ def main():
if __name__ == "__main__":
sys.exit(main())
-
-# vi: ts=4 expandtab
diff --git a/cloudinit/cmd/devel/make_mime.py b/cloudinit/cmd/devel/make_mime.py
index c7671a93..421e88fd 100755
--- a/cloudinit/cmd/devel/make_mime.py
+++ b/cloudinit/cmd/devel/make_mime.py
@@ -1,6 +1,8 @@
+#!/usr/bin/env python3
+
# This file is part of cloud-init. See LICENSE file for license information.
-"""Generate multi-part mime messages for user-data """
+"""Generate multi-part mime messages for user-data."""
import argparse
import sys
@@ -138,6 +140,3 @@ def main():
if __name__ == "__main__":
sys.exit(main())
-
-
-# vi: ts=4 expandtab
diff --git a/cloudinit/cmd/devel/net_convert.py b/cloudinit/cmd/devel/net_convert.py
index 18b1e7ff..e3f58e90 100755
--- a/cloudinit/cmd/devel/net_convert.py
+++ b/cloudinit/cmd/devel/net_convert.py
@@ -1,3 +1,4 @@
+#!/usr/bin/env python3
# This file is part of cloud-init. See LICENSE file for license information.
"""Debug network config format conversions."""
@@ -7,7 +8,14 @@ import os
import sys
from cloudinit import distros, log, safeyaml
-from cloudinit.net import eni, netplan, network_state, networkd, sysconfig
+from cloudinit.net import (
+ eni,
+ netplan,
+ network_manager,
+ network_state,
+ networkd,
+ sysconfig,
+)
from cloudinit.sources import DataSourceAzure as azure
from cloudinit.sources import DataSourceOVF as ovf
from cloudinit.sources.helpers import openstack
@@ -74,7 +82,7 @@ def get_parser(parser=None):
parser.add_argument(
"-O",
"--output-kind",
- choices=["eni", "netplan", "networkd", "sysconfig"],
+ choices=["eni", "netplan", "networkd", "sysconfig", "network-manager"],
required=True,
help="The network config format to emit",
)
@@ -148,6 +156,9 @@ def handle_args(name, args):
elif args.output_kind == "sysconfig":
r_cls = sysconfig.Renderer
config = distro.renderer_configs.get("sysconfig")
+ elif args.output_kind == "network-manager":
+ r_cls = network_manager.Renderer
+ config = distro.renderer_configs.get("network-manager")
else:
raise RuntimeError("Invalid output_kind")
@@ -169,6 +180,3 @@ def handle_args(name, args):
if __name__ == "__main__":
args = get_parser().parse_args()
handle_args(NAME, args)
-
-
-# vi: ts=4 expandtab
diff --git a/cloudinit/cmd/devel/parser.py b/cloudinit/cmd/devel/parser.py
index 76b16c2e..460b94b3 100644
--- a/cloudinit/cmd/devel/parser.py
+++ b/cloudinit/cmd/devel/parser.py
@@ -6,8 +6,6 @@
import argparse
-from cloudinit.config import schema
-
from . import hotplug_hook, make_mime, net_convert, render
@@ -28,12 +26,6 @@ def get_parser(parser=None):
hotplug_hook.handle_args,
),
(
- "schema",
- "Validate cloud-config files for document schema",
- schema.get_parser,
- schema.handle_schema_args,
- ),
- (
net_convert.NAME,
net_convert.__doc__,
net_convert.get_parser,
diff --git a/cloudinit/cmd/devel/render.py b/cloudinit/cmd/devel/render.py
index 2f9a22a8..62b432d2 100755
--- a/cloudinit/cmd/devel/render.py
+++ b/cloudinit/cmd/devel/render.py
@@ -1,3 +1,5 @@
+#!/usr/bin/env python3
+
# This file is part of cloud-init. See LICENSE file for license information.
"""Debug jinja template rendering of user-data."""
@@ -110,6 +112,3 @@ def main():
if __name__ == "__main__":
sys.exit(main())
-
-
-# vi: ts=4 expandtab
diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py
index c9be41b3..fcdaf725 100644..100755
--- a/cloudinit/cmd/main.py
+++ b/cloudinit/cmd/main.py
@@ -1,4 +1,5 @@
-#
+#!/usr/bin/env python3
+
# Copyright (C) 2012 Canonical Ltd.
# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
# Copyright (C) 2012 Yahoo! Inc.
@@ -11,6 +12,9 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
+# Skip isort on this file because of the patch that comes between imports
+# isort: skip_file
+
import argparse
import json
import os
@@ -19,6 +23,7 @@ import time
import traceback
from cloudinit import patcher
+from cloudinit.config.modules import Modules
patcher.patch_logging()
@@ -105,7 +110,7 @@ def extract_fns(args):
return fn_cfgs
-def run_module_section(mods, action_name, section):
+def run_module_section(mods: Modules, action_name, section):
full_section_name = MOD_SECTION_TPL % (section)
(which_ran, failures) = mods.run_section(full_section_name)
total_attempted = len(which_ran) + len(failures)
@@ -337,31 +342,6 @@ def main_init(name, args):
if mode == sources.DSMODE_NETWORK:
existing = "trust"
sys.stderr.write("%s\n" % (netinfo.debug_info()))
- LOG.debug(
- "Checking to see if files that we need already"
- " exist from a previous run that would allow us"
- " to stop early."
- )
- # no-net is written by upstart cloud-init-nonet when network failed
- # to come up
- stop_files = [
- os.path.join(path_helper.get_cpath("data"), "no-net"),
- ]
- existing_files = []
- for fn in stop_files:
- if os.path.isfile(fn):
- existing_files.append(fn)
-
- if existing_files:
- LOG.debug(
- "[%s] Exiting. stop file %s existed", mode, existing_files
- )
- return (None, [])
- else:
- LOG.debug(
- "Execution continuing, no previous run detected that"
- " would allow us to stop early."
- )
else:
existing = "check"
mcfg = util.get_cfg_option_bool(init.cfg, "manual_cache_clean", False)
@@ -375,8 +355,6 @@ def main_init(name, args):
existing = "trust"
init.purge_cache()
- # Delete the no-net file as well
- util.del_file(os.path.join(path_helper.get_cpath("data"), "no-net"))
# Stage 5
bring_up_interfaces = _should_bring_up_interfaces(init, args)
@@ -394,8 +372,7 @@ def main_init(name, args):
except sources.DataSourceNotFoundException:
# In the case of 'cloud-init init' without '--local' it is a bit
# more likely that the user would consider it failure if nothing was
- # found. When using upstart it will also mentions job failure
- # in console log if exit code is != 0.
+ # found.
if mode == sources.DSMODE_LOCAL:
LOG.debug("No local datasource found")
else:
@@ -484,7 +461,7 @@ def main_init(name, args):
apply_reporting_cfg(init.cfg)
# Stage 8 - re-read and apply relevant cloud-config to include user-data
- mods = stages.Modules(init, extract_fns(args), reporter=args.reporter)
+ mods = Modules(init, extract_fns(args), reporter=args.reporter)
# Stage 9
try:
outfmt_orig = outfmt
@@ -587,7 +564,7 @@ def main_modules(action_name, args):
return [(msg)]
_maybe_persist_instance_data(init)
# Stage 3
- mods = stages.Modules(init, extract_fns(args), reporter=args.reporter)
+ mods = Modules(init, extract_fns(args), reporter=args.reporter)
# Stage 4
try:
LOG.debug("Closing stdin")
@@ -642,7 +619,7 @@ def main_single(name, args):
return 1
_maybe_persist_instance_data(init)
# Stage 3
- mods = stages.Modules(init, extract_fns(args), reporter=args.reporter)
+ mods = Modules(init, extract_fns(args), reporter=args.reporter)
mod_args = args.module_args
if mod_args:
LOG.debug("Using passed in arguments %s", mod_args)
@@ -836,8 +813,7 @@ def main_features(name, args):
def main(sysv_args=None):
if not sysv_args:
sysv_args = sys.argv
- parser = argparse.ArgumentParser(prog=sysv_args[0])
- sysv_args = sysv_args[1:]
+ parser = argparse.ArgumentParser(prog=sysv_args.pop(0))
# Top level args
parser.add_argument(
@@ -845,28 +821,29 @@ def main(sysv_args=None):
"-v",
action="version",
version="%(prog)s " + (version.version_string()),
+ help="Show program's version number and exit.",
)
parser.add_argument(
"--file",
"-f",
action="append",
dest="files",
- help="additional yaml configuration files to use",
+ help="Use additional yaml configuration files.",
type=argparse.FileType("rb"),
)
parser.add_argument(
"--debug",
"-d",
action="store_true",
- help="show additional pre-action logging (default: %(default)s)",
+ help="Show additional pre-action logging (default: %(default)s).",
default=False,
)
parser.add_argument(
"--force",
action="store_true",
help=(
- "force running even if no datasource is"
- " found (use at your own risk)"
+ "Force running even if no datasource is"
+ " found (use at your own risk)."
),
dest="force",
default=False,
@@ -878,13 +855,13 @@ def main(sysv_args=None):
# Each action and its sub-options (if any)
parser_init = subparsers.add_parser(
- "init", help="initializes cloud-init and performs initial modules"
+ "init", help="Initialize cloud-init and perform initial modules."
)
parser_init.add_argument(
"--local",
"-l",
action="store_true",
- help="start in local mode (default: %(default)s)",
+ help="Start in local mode (default: %(default)s).",
default=False,
)
# This is used so that we can know which action is selected +
@@ -893,13 +870,13 @@ def main(sysv_args=None):
# These settings are used for the 'config' and 'final' stages
parser_mod = subparsers.add_parser(
- "modules", help="activates modules using a given configuration key"
+ "modules", help="Activate modules using a given configuration key."
)
parser_mod.add_argument(
"--mode",
"-m",
action="store",
- help="module configuration name to use (default: %(default)s)",
+ help="Module configuration name to use (default: %(default)s).",
default="config",
choices=("init", "config", "final"),
)
@@ -907,7 +884,7 @@ def main(sysv_args=None):
# This subcommand allows you to run a single module
parser_single = subparsers.add_parser(
- "single", help="run a single module "
+ "single", help="Run a single module."
)
parser_single.add_argument(
"--name",
@@ -919,21 +896,21 @@ def main(sysv_args=None):
parser_single.add_argument(
"--frequency",
action="store",
- help="frequency of the module",
+ help="Set module frequency.",
required=False,
choices=list(FREQ_SHORT_NAMES.keys()),
)
parser_single.add_argument(
"--report",
action="store_true",
- help="enable reporting",
+ help="Enable reporting.",
required=False,
)
parser_single.add_argument(
"module_args",
nargs="*",
metavar="argument",
- help="any additional arguments to pass to this module",
+ help="Any additional arguments to pass to this module.",
)
parser_single.set_defaults(action=("single", main_single))
@@ -948,18 +925,20 @@ def main(sysv_args=None):
dhclient_hook.get_parser(parser_dhclient)
parser_features = subparsers.add_parser(
- "features", help="list defined features"
+ "features", help="List defined features."
)
parser_features.set_defaults(action=("features", main_features))
parser_analyze = subparsers.add_parser(
- "analyze", help="Devel tool: Analyze cloud-init logs and data"
+ "analyze", help="Devel tool: Analyze cloud-init logs and data."
)
- parser_devel = subparsers.add_parser("devel", help="Run development tools")
+ parser_devel = subparsers.add_parser(
+ "devel", help="Run development tools."
+ )
parser_collect_logs = subparsers.add_parser(
- "collect-logs", help="Collect and tar all cloud-init debug info"
+ "collect-logs", help="Collect and tar all cloud-init debug info."
)
parser_clean = subparsers.add_parser(
@@ -970,19 +949,24 @@ def main(sysv_args=None):
"status", help="Report cloud-init status or wait on completion."
)
+ parser_schema = subparsers.add_parser(
+ "schema", help="Validate cloud-config files using jsonschema."
+ )
+
if sysv_args:
# Only load subparsers if subcommand is specified to avoid load cost
- if sysv_args[0] == "analyze":
+ subcommand = sysv_args[0]
+ if subcommand == "analyze":
from cloudinit.analyze.__main__ import get_parser as analyze_parser
# Construct analyze subcommand parser
analyze_parser(parser_analyze)
- elif sysv_args[0] == "devel":
+ elif subcommand == "devel":
from cloudinit.cmd.devel.parser import get_parser as devel_parser
# Construct devel subcommand parser
devel_parser(parser_devel)
- elif sysv_args[0] == "collect-logs":
+ elif subcommand == "collect-logs":
from cloudinit.cmd.devel.logs import (
get_parser as logs_parser,
handle_collect_logs_args,
@@ -992,7 +976,7 @@ def main(sysv_args=None):
parser_collect_logs.set_defaults(
action=("collect-logs", handle_collect_logs_args)
)
- elif sysv_args[0] == "clean":
+ elif subcommand == "clean":
from cloudinit.cmd.clean import (
get_parser as clean_parser,
handle_clean_args,
@@ -1000,7 +984,7 @@ def main(sysv_args=None):
clean_parser(parser_clean)
parser_clean.set_defaults(action=("clean", handle_clean_args))
- elif sysv_args[0] == "query":
+ elif subcommand == "query":
from cloudinit.cmd.query import (
get_parser as query_parser,
handle_args as handle_query_args,
@@ -1008,7 +992,15 @@ def main(sysv_args=None):
query_parser(parser_query)
parser_query.set_defaults(action=("render", handle_query_args))
- elif sysv_args[0] == "status":
+ elif subcommand == "schema":
+ from cloudinit.config.schema import (
+ get_parser as schema_parser,
+ handle_schema_args,
+ )
+
+ schema_parser(parser_schema)
+ parser_schema.set_defaults(action=("schema", handle_schema_args))
+ elif subcommand == "status":
from cloudinit.cmd.status import (
get_parser as status_parser,
handle_status_args,
@@ -1081,5 +1073,3 @@ if __name__ == "__main__":
return_value = main(sys.argv)
if return_value:
sys.exit(return_value)
-
-# vi: ts=4 expandtab
diff --git a/cloudinit/cmd/query.py b/cloudinit/cmd/query.py
index 46f17699..b9347200 100644..100755
--- a/cloudinit/cmd/query.py
+++ b/cloudinit/cmd/query.py
@@ -1,3 +1,5 @@
+#!/usr/bin/env python3
+
# This file is part of cloud-init. See LICENSE file for license information.
"""Query standardized instance metadata provided to machine, returning a JSON
@@ -317,5 +319,3 @@ def main():
if __name__ == "__main__":
main()
-
-# vi: ts=4 expandtab
diff --git a/cloudinit/cmd/status.py b/cloudinit/cmd/status.py
index 5176549d..1c7c209b 100644..100755
--- a/cloudinit/cmd/status.py
+++ b/cloudinit/cmd/status.py
@@ -1,3 +1,5 @@
+#!/usr/bin/env python3
+
# Copyright (C) 2017 Canonical Ltd.
#
# This file is part of cloud-init. See LICENSE file for license information.
@@ -9,9 +11,10 @@ import enum
import os
import sys
from time import gmtime, sleep, strftime
+from typing import Tuple
+from cloudinit.cmd.devel import read_cfg_paths
from cloudinit.distros import uses_systemd
-from cloudinit.stages import Init
from cloudinit.util import get_cmdline, load_file, load_json
CLOUDINIT_DISABLED_FILE = "/etc/cloud/cloud-init.disabled"
@@ -62,17 +65,16 @@ def get_parser(parser=None):
return parser
-def handle_status_args(name, args):
+def handle_status_args(name, args) -> int:
"""Handle calls to 'cloud-init status' as a subcommand."""
# Read configured paths
- init = Init(ds_deps=[])
- init.read_cfg()
- status, status_detail, time = get_status_details(init.paths)
+ paths = read_cfg_paths()
+ status, status_detail, time = get_status_details(paths)
if args.wait:
while status in (UXAppStatus.NOT_RUN, UXAppStatus.RUNNING):
sys.stdout.write(".")
sys.stdout.flush()
- status, status_detail, time = get_status_details(init.paths)
+ status, status_detail, time = get_status_details(paths)
sleep(0.25)
sys.stdout.write("\n")
print("status: {0}".format(status.value))
@@ -113,17 +115,14 @@ def _is_cloudinit_disabled(disable_file, paths):
return (is_disabled, reason)
-def get_status_details(paths=None):
+def get_status_details(paths=None) -> Tuple[UXAppStatus, str, str]:
"""Return a 3-tuple of status, status_details and time of last event.
@param paths: An initialized cloudinit.helpers.paths object.
Values are obtained from parsing paths.run_dir/status.json.
"""
- if not paths:
- init = Init(ds_deps=[])
- init.read_cfg()
- paths = init.paths
+ paths = paths or read_cfg_paths()
status = UXAppStatus.NOT_RUN
status_detail = ""
@@ -180,5 +179,3 @@ def main():
if __name__ == "__main__":
main()
-
-# vi: ts=4 expandtab
diff --git a/cloudinit/config/__init__.py b/cloudinit/config/__init__.py
index ed124180..e69de29b 100644
--- a/cloudinit/config/__init__.py
+++ b/cloudinit/config/__init__.py
@@ -1,47 +0,0 @@
-# Copyright (C) 2008-2010 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Chuck Short <chuck.short@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This file is part of cloud-init. See LICENSE file for license information.
-
-from cloudinit import log as logging
-from cloudinit.settings import FREQUENCIES, PER_INSTANCE
-
-LOG = logging.getLogger(__name__)
-
-# This prefix is used to make it less
-# of a chance that when importing
-# we will not find something else with the same
-# name in the lookup path...
-MOD_PREFIX = "cc_"
-
-
-def form_module_name(name):
- canon_name = name.replace("-", "_")
- if canon_name.lower().endswith(".py"):
- canon_name = canon_name[0 : (len(canon_name) - 3)]
- canon_name = canon_name.strip()
- if not canon_name:
- return None
- if not canon_name.startswith(MOD_PREFIX):
- canon_name = "%s%s" % (MOD_PREFIX, canon_name)
- return canon_name
-
-
-def fixup_module(mod, def_freq=PER_INSTANCE):
- if not hasattr(mod, "frequency"):
- setattr(mod, "frequency", def_freq)
- else:
- freq = mod.frequency
- if freq and freq not in FREQUENCIES:
- LOG.warning("Module %s has an unknown frequency %s", mod, freq)
- if not hasattr(mod, "distros"):
- setattr(mod, "distros", [])
- if not hasattr(mod, "osfamilies"):
- setattr(mod, "osfamilies", [])
- return mod
-
-
-# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py
index c558311a..7ca50194 100644
--- a/cloudinit/config/cc_apt_configure.py
+++ b/cloudinit/config/cc_apt_configure.py
@@ -571,7 +571,10 @@ def add_apt_sources(
if aa_repo_match(source):
try:
- subp.subp(["add-apt-repository", source], target=target)
+ subp.subp(
+ ["add-apt-repository", "--no-update", source],
+ target=target,
+ )
except subp.ProcessExecutionError:
LOG.exception("add-apt-repository failed.")
raise
diff --git a/cloudinit/config/cc_byobu.py b/cloudinit/config/cc_byobu.py
index fbc20410..fbc20410 100755..100644
--- a/cloudinit/config/cc_byobu.py
+++ b/cloudinit/config/cc_byobu.py
diff --git a/cloudinit/config/cc_emit_upstart.py b/cloudinit/config/cc_emit_upstart.py
deleted file mode 100644
index a928082b..00000000
--- a/cloudinit/config/cc_emit_upstart.py
+++ /dev/null
@@ -1,78 +0,0 @@
-# Copyright (C) 2009-2011 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""
-Emit Upstart
-------------
-**Summary:** emit upstart configuration
-
-Emit upstart configuration for cloud-init modules on upstart based systems. No
-user configuration should be required.
-
-**Internal name:** ``cc_emit_upstart``
-
-**Module frequency:** always
-
-**Supported distros:** ubuntu, debian
-"""
-
-import os
-
-from cloudinit import log as logging
-from cloudinit import subp
-from cloudinit.settings import PER_ALWAYS
-
-frequency = PER_ALWAYS
-
-distros = ["ubuntu", "debian"]
-LOG = logging.getLogger(__name__)
-
-
-def is_upstart_system():
- if not os.path.isfile("/sbin/initctl"):
- LOG.debug("no /sbin/initctl located")
- return False
-
- myenv = os.environ.copy()
- if "UPSTART_SESSION" in myenv:
- del myenv["UPSTART_SESSION"]
- check_cmd = ["initctl", "version"]
- try:
- (out, _err) = subp.subp(check_cmd, env=myenv)
- return "upstart" in out
- except subp.ProcessExecutionError as e:
- LOG.debug(
- "'%s' returned '%s', not using upstart",
- " ".join(check_cmd),
- e.exit_code,
- )
- return False
-
-
-def handle(name, _cfg, cloud, log, args):
- event_names = args
- if not event_names:
- # Default to the 'cloud-config'
- # event for backwards compat.
- event_names = ["cloud-config"]
-
- if not is_upstart_system():
- log.debug("not upstart system, '%s' disabled", name)
- return
-
- cfgpath = cloud.paths.get_ipath_cur("cloud_config")
- for n in event_names:
- cmd = ["initctl", "emit", str(n), "CLOUD_CFG=%s" % cfgpath]
- try:
- subp.subp(cmd)
- except Exception as e:
- # TODO(harlowja), use log exception from utils??
- log.warning("Emission of upstart event %s failed due to: %s", n, e)
-
-
-# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_fan.py b/cloudinit/config/cc_fan.py
index 50a81744..57c762a1 100644
--- a/cloudinit/config/cc_fan.py
+++ b/cloudinit/config/cc_fan.py
@@ -3,12 +3,16 @@
# Author: Scott Moser <scott.moser@canonical.com>
#
# This file is part of cloud-init. See LICENSE file for license information.
+"""Fan: Configure ubuntu fan networking"""
-"""
-Fan
----
-**Summary:** configure ubuntu fan networking
+from textwrap import dedent
+
+from cloudinit import log as logging
+from cloudinit import subp, util
+from cloudinit.config.schema import MetaSchema, get_meta_doc
+from cloudinit.settings import PER_INSTANCE
+MODULE_DESCRIPTION = """\
This module installs, configures and starts the ubuntu fan network system. For
more information about Ubuntu Fan, see:
``https://wiki.ubuntu.com/FanNetworking``.
@@ -19,31 +23,37 @@ If cloud-init sees a ``fan`` entry in cloud-config it will:
- install the package ``ubuntu-fan`` if it is not installed
- ensure the service is started (or restarted if was previously running)
-**Internal name:** ``cc_fan``
-
-**Module frequency:** per instance
-
-**Supported distros:** ubuntu
-
-**Config keys**::
-
- fan:
- config: |
- # fan 240
- 10.0.0.0/8 eth0/16 dhcp
- 10.0.0.0/8 eth1/16 dhcp off
- # fan 241
- 241.0.0.0/8 eth0/16 dhcp
- config_path: /etc/network/fan
+Additionally, the ``ubuntu-fan`` package will be automatically installed
+if not present.
"""
-from cloudinit import log as logging
-from cloudinit import subp, util
-from cloudinit.settings import PER_INSTANCE
+distros = ["ubuntu"]
+meta: MetaSchema = {
+ "id": "cc_fan",
+ "name": "Fan",
+ "title": "Configure ubuntu fan networking",
+ "description": MODULE_DESCRIPTION,
+ "distros": distros,
+ "frequency": PER_INSTANCE,
+ "examples": [
+ dedent(
+ """\
+ fan:
+ config: |
+ # fan 240
+ 10.0.0.0/8 eth0/16 dhcp
+ 10.0.0.0/8 eth1/16 dhcp off
+ # fan 241
+ 241.0.0.0/8 eth0/16 dhcp
+ config_path: /etc/network/fan
+ """
+ )
+ ],
+}
-LOG = logging.getLogger(__name__)
+__doc__ = get_meta_doc(meta)
-frequency = PER_INSTANCE
+LOG = logging.getLogger(__name__)
BUILTIN_CFG = {
"config": None,
diff --git a/cloudinit/config/cc_final_message.py b/cloudinit/config/cc_final_message.py
index f443ccd8..89be520e 100644
--- a/cloudinit/config/cc_final_message.py
+++ b/cloudinit/config/cc_final_message.py
@@ -5,12 +5,16 @@
# Author: Juerg Haefliger <juerg.haefliger@hp.com>
#
# This file is part of cloud-init. See LICENSE file for license information.
+"""Final Message: Output final message when cloud-init has finished"""
-"""
-Final Message
--------------
-**Summary:** output final message when cloud-init has finished
+from textwrap import dedent
+
+from cloudinit import templater, util, version
+from cloudinit.config.schema import MetaSchema, get_meta_doc
+from cloudinit.distros import ALL_DISTROS
+from cloudinit.settings import PER_ALWAYS
+MODULE_DESCRIPTION = """\
This module configures the final message that cloud-init writes. The message is
specified as a jinja template with the following variables set:
@@ -19,22 +23,31 @@ specified as a jinja template with the following variables set:
- ``datasource``: cloud-init data source
- ``uptime``: system uptime
-**Internal name:** ``cc_final_message``
-
-**Module frequency:** always
-
-**Supported distros:** all
-
-**Config keys**::
-
- final_message: <message>
-
+Upon exit, this module writes ``/var/lib/cloud/instance/boot-finished``.
"""
-
-from cloudinit import templater, util, version
-from cloudinit.settings import PER_ALWAYS
-
frequency = PER_ALWAYS
+meta: MetaSchema = {
+ "id": "cc_final_message",
+ "name": "Final Message",
+ "title": "Output final message when cloud-init has finished",
+ "description": MODULE_DESCRIPTION,
+ "distros": [ALL_DISTROS],
+ "frequency": frequency,
+ "examples": [
+ dedent(
+ """\
+ final_message: |
+ cloud-init has finished
+ version: $version
+ timestamp: $timestamp
+ datasource: $datasource
+ uptime: $uptime
+ """
+ )
+ ],
+}
+
+__doc__ = get_meta_doc(meta)
# Jinja formated default message
FINAL_MESSAGE_DEF = (
diff --git a/cloudinit/config/cc_foo.py b/cloudinit/config/cc_foo.py
deleted file mode 100644
index 3c307153..00000000
--- a/cloudinit/config/cc_foo.py
+++ /dev/null
@@ -1,57 +0,0 @@
-# Copyright (C) 2009-2010 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""
-Foo
----
-**Summary:** example module
-
-Example to show module structure. Does not do anything.
-
-**Internal name:** ``cc_foo``
-
-**Module frequency:** per instance
-
-**Supported distros:** all
-"""
-
-from cloudinit.settings import PER_INSTANCE
-
-# Modules are expected to have the following attributes.
-# 1. A required 'handle' method which takes the following params.
-# a) The name will not be this files name, but instead
-# the name specified in configuration (which is the name
-# which will be used to find this module).
-# b) A configuration object that is the result of the merging
-# of cloud configs configuration with legacy configuration
-# as well as any datasource provided configuration
-# c) A cloud object that can be used to access various
-# datasource and paths for the given distro and data provided
-# by the various datasource instance types.
-# d) A argument list that may or may not be empty to this module.
-# Typically those are from module configuration where the module
-# is defined with some extra configuration that will eventually
-# be translated from yaml into arguments to this module.
-# 2. A optional 'frequency' that defines how often this module should be run.
-# Typically one of PER_INSTANCE, PER_ALWAYS, PER_ONCE. If not
-# provided PER_INSTANCE will be assumed.
-# See settings.py for these constants.
-# 3. A optional 'distros' array/set/tuple that defines the known distros
-# this module will work with (if not all of them). This is used to write
-# a warning out if a module is being ran on a untested distribution for
-# informational purposes. If non existent all distros are assumed and
-# no warning occurs.
-
-frequency = PER_INSTANCE
-
-
-def handle(name, _cfg, _cloud, log, _args):
- log.debug("Hi from module %s", name)
-
-
-# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_growpart.py b/cloudinit/config/cc_growpart.py
index 43334caa..14a2c0b8 100644
--- a/cloudinit/config/cc_growpart.py
+++ b/cloudinit/config/cc_growpart.py
@@ -5,29 +5,34 @@
# Author: Juerg Haefliger <juerg.haefliger@hp.com>
#
# This file is part of cloud-init. See LICENSE file for license information.
+"""Growpart: Grow partitions"""
-"""
-Growpart
---------
-**Summary:** grow partitions
+import base64
+import copy
+import json
+import os
+import os.path
+import re
+import stat
+from contextlib import suppress
+from pathlib import Path
+from textwrap import dedent
+from typing import Tuple
+
+from cloudinit import log as logging
+from cloudinit import subp, temp_utils, util
+from cloudinit.config.schema import MetaSchema, get_meta_doc
+from cloudinit.distros import ALL_DISTROS
+from cloudinit.settings import PER_ALWAYS
+MODULE_DESCRIPTION = """\
Growpart resizes partitions to fill the available disk space.
This is useful for cloud instances with a larger amount of disk space available
than the pristine image uses, as it allows the instance to automatically make
use of the extra space.
The devices on which to run growpart are specified as a list under the
-``devices`` key. Each entry in the devices list can be either the path to the
-device's mountpoint in the filesystem or a path to the block device in
-``/dev``.
-
-The utility to use for resizing can be selected using the ``mode`` config key.
-If the ``mode`` key is set to ``auto``, then any available utility (either
-``growpart`` or BSD ``gpart``) will be used. If neither utility is available,
-no error will be raised. If ``mode`` is set to ``growpart``, then the
-``growpart`` utility will be used. If this utility is not available on the
-system, this will result in an error. If ``mode`` is set to ``off`` or
-``false``, then ``cc_growpart`` will take no action.
+``devices`` key.
There is some functionality overlap between this module and the ``growroot``
functionality of ``cloud-initramfs-tools``. However, there are some situations
@@ -44,36 +49,41 @@ Growpart is enabled by default on the root partition. The default config for
growpart is::
growpart:
- mode: auto
- devices: ["/"]
- ignore_growroot_disabled: false
-
-**Internal name:** ``cc_growpart``
-
-**Module frequency:** always
-
-**Supported distros:** all
-
-**Config keys**::
-
- growpart:
- mode: <auto/growpart/off/false>
- devices:
- - "/"
- - "/dev/vdb1"
- ignore_growroot_disabled: <true/false>
+ mode: auto
+ devices: ["/"]
+ ignore_growroot_disabled: false
"""
-
-import os
-import os.path
-import re
-import stat
-
-from cloudinit import log as logging
-from cloudinit import subp, temp_utils, util
-from cloudinit.settings import PER_ALWAYS
-
frequency = PER_ALWAYS
+meta: MetaSchema = {
+ "id": "cc_growpart",
+ "name": "Growpart",
+ "title": "Grow partitions",
+ "description": MODULE_DESCRIPTION,
+ "distros": [ALL_DISTROS],
+ "frequency": frequency,
+ "examples": [
+ dedent(
+ """\
+ growpart:
+ mode: auto
+ devices: ["/"]
+ ignore_growroot_disabled: false
+ """
+ ),
+ dedent(
+ """\
+ growpart:
+ mode: growpart
+ devices:
+ - "/"
+ - "/dev/vdb1"
+ ignore_growroot_disabled: true
+ """
+ ),
+ ],
+}
+
+__doc__ = get_meta_doc(meta)
DEFAULT_CONFIG = {
"mode": "auto",
@@ -81,6 +91,8 @@ DEFAULT_CONFIG = {
"ignore_growroot_disabled": False,
}
+KEYDATA_PATH = Path("/cc_growpart_keydata")
+
class RESIZE(object):
SKIPPED = "SKIPPED"
@@ -289,10 +301,128 @@ def devent2dev(devent):
return dev
+def get_mapped_device(blockdev):
+ """Returns underlying block device for a mapped device.
+
+ If it is mapped, blockdev will usually take the form of
+ /dev/mapper/some_name
+
+ If blockdev is a symlink pointing to a /dev/dm-* device, return
+ the device pointed to. Otherwise, return None.
+ """
+ realpath = os.path.realpath(blockdev)
+ if realpath.startswith("/dev/dm-"):
+ LOG.debug("%s is a mapped device pointing to %s", blockdev, realpath)
+ return realpath
+ return None
+
+
+def is_encrypted(blockdev, partition) -> bool:
+ """
+ Check if a device is an encrypted device. blockdev should have
+ a /dev/dm-* path whereas partition is something like /dev/sda1.
+ """
+ if not subp.which("cryptsetup"):
+ LOG.debug("cryptsetup not found. Assuming no encrypted partitions")
+ return False
+ try:
+ subp.subp(["cryptsetup", "status", blockdev])
+ except subp.ProcessExecutionError as e:
+ if e.exit_code == 4:
+ LOG.debug("Determined that %s is not encrypted", blockdev)
+ else:
+ LOG.warning(
+ "Received unexpected exit code %s from "
+ "cryptsetup status. Assuming no encrypted partitions.",
+ e.exit_code,
+ )
+ return False
+ with suppress(subp.ProcessExecutionError):
+ subp.subp(["cryptsetup", "isLuks", partition])
+ LOG.debug("Determined that %s is encrypted", blockdev)
+ return True
+ return False
+
+
+def get_underlying_partition(blockdev):
+ command = ["dmsetup", "deps", "--options=devname", blockdev]
+ dep: str = subp.subp(command)[0] # type: ignore
+ # Returned result should look something like:
+ # 1 dependencies : (vdb1)
+ if not dep.startswith("1 depend"):
+ raise RuntimeError(
+ f"Expecting '1 dependencies' from 'dmsetup'. Received: {dep}"
+ )
+ try:
+ return f'/dev/{dep.split(": (")[1].split(")")[0]}'
+ except IndexError as e:
+ raise RuntimeError(
+ f"Ran `{command}`, but received unexpected stdout: `{dep}`"
+ ) from e
+
+
+def resize_encrypted(blockdev, partition) -> Tuple[str, str]:
+ """Use 'cryptsetup resize' to resize LUKS volume.
+
+ The loaded keyfile is json formatted with 'key' and 'slot' keys.
+ key is base64 encoded. Example:
+ {"key":"XFmCwX2FHIQp0LBWaLEMiHIyfxt1SGm16VvUAVledlY=","slot":5}
+ """
+ if not KEYDATA_PATH.exists():
+ return (RESIZE.SKIPPED, "No encryption keyfile found")
+ try:
+ with KEYDATA_PATH.open() as f:
+ keydata = json.load(f)
+ key = keydata["key"]
+ decoded_key = base64.b64decode(key)
+ slot = keydata["slot"]
+ except Exception as e:
+ raise RuntimeError(
+ "Could not load encryption key. This is expected if "
+ "the volume has been previously resized."
+ ) from e
+
+ try:
+ subp.subp(
+ ["cryptsetup", "--key-file", "-", "resize", blockdev],
+ data=decoded_key,
+ )
+ finally:
+ try:
+ subp.subp(
+ [
+ "cryptsetup",
+ "luksKillSlot",
+ "--batch-mode",
+ partition,
+ str(slot),
+ ]
+ )
+ except subp.ProcessExecutionError as e:
+ LOG.warning(
+ "Failed to kill luks slot after resizing encrypted volume: %s",
+ e,
+ )
+ try:
+ KEYDATA_PATH.unlink()
+ except Exception:
+ util.logexc(
+ LOG, "Failed to remove keyfile after resizing encrypted volume"
+ )
+
+ return (
+ RESIZE.CHANGED,
+ f"Successfully resized encrypted volume '{blockdev}'",
+ )
+
+
def resize_devices(resizer, devices):
# returns a tuple of tuples containing (entry-in-devices, action, message)
+ devices = copy.copy(devices)
info = []
- for devent in devices:
+
+ while devices:
+ devent = devices.pop(0)
try:
blockdev = devent2dev(devent)
except ValueError as e:
@@ -329,6 +459,49 @@ def resize_devices(resizer, devices):
)
continue
+ underlying_blockdev = get_mapped_device(blockdev)
+ if underlying_blockdev:
+ try:
+ # We need to resize the underlying partition first
+ partition = get_underlying_partition(blockdev)
+ if is_encrypted(underlying_blockdev, partition):
+ if partition not in [x[0] for x in info]:
+ # We shouldn't attempt to resize this mapped partition
+ # until the underlying partition is resized, so re-add
+ # our device to the beginning of the list we're
+ # iterating over, then add our underlying partition
+ # so it can get processed first
+ devices.insert(0, devent)
+ devices.insert(0, partition)
+ continue
+ status, message = resize_encrypted(blockdev, partition)
+ info.append(
+ (
+ devent,
+ status,
+ message,
+ )
+ )
+ else:
+ info.append(
+ (
+ devent,
+ RESIZE.SKIPPED,
+ f"Resizing mapped device ({blockdev}) skipped "
+ "as it is not encrypted.",
+ )
+ )
+ except Exception as e:
+ info.append(
+ (
+ devent,
+ RESIZE.FAILED,
+ f"Resizing encrypted device ({blockdev}) failed: {e}",
+ )
+ )
+ # At this point, we WON'T resize a non-encrypted mapped device
+ # though we should probably grow the ability to
+ continue
try:
(disk, ptnum) = device_part_info(blockdev)
except (TypeError, ValueError) as e:
@@ -388,6 +561,11 @@ def handle(_name, cfg, _cloud, log, _args):
mode = mycfg.get("mode", "auto")
if util.is_false(mode):
+ if mode != "off":
+ log.warning(
+ f"DEPRECATED: growpart mode '{mode}' is deprecated. "
+ "Use 'off' instead."
+ )
log.debug("growpart disabled: mode=%s" % mode)
return
diff --git a/cloudinit/config/cc_grub_dpkg.py b/cloudinit/config/cc_grub_dpkg.py
index ad7243d9..c23e40f5 100644
--- a/cloudinit/config/cc_grub_dpkg.py
+++ b/cloudinit/config/cc_grub_dpkg.py
@@ -6,12 +6,17 @@
# Author: Matthew Ruffell <matthew.ruffell@canonical.com>
#
# This file is part of cloud-init. See LICENSE file for license information.
+"""Grub Dpkg: Configure grub debconf installation device"""
-"""
-Grub Dpkg
----------
-**Summary:** configure grub debconf installation device
+import os
+from textwrap import dedent
+
+from cloudinit import subp, util
+from cloudinit.config.schema import MetaSchema, get_meta_doc
+from cloudinit.settings import PER_INSTANCE
+from cloudinit.subp import ProcessExecutionError
+MODULE_DESCRIPTION = """\
Configure which device is used as the target for grub installation. This module
should work correctly by default without any user configuration. It can be
enabled/disabled using the ``enabled`` config key in the ``grub_dpkg`` config
@@ -25,28 +30,28 @@ but we do fallback to the plain disk name if a by-id name is not present.
If this module is executed inside a container, then the debconf database is
seeded with empty values, and install_devices_empty is set to true.
-
-**Internal name:** ``cc_grub_dpkg``
-
-**Module frequency:** per instance
-
-**Supported distros:** ubuntu, debian
-
-**Config keys**::
-
- grub_dpkg:
- enabled: <true/false>
- grub-pc/install_devices: <devices>
- grub-pc/install_devices_empty: <devices>
- grub-dpkg: (alias for grub_dpkg)
"""
-
-import os
-
-from cloudinit import subp, util
-from cloudinit.subp import ProcessExecutionError
-
distros = ["ubuntu", "debian"]
+meta: MetaSchema = {
+ "id": "cc_grub_dpkg",
+ "name": "Grub Dpkg",
+ "title": "Configure grub debconf installation device",
+ "description": MODULE_DESCRIPTION,
+ "distros": distros,
+ "frequency": PER_INSTANCE,
+ "examples": [
+ dedent(
+ """\
+ grub_dpkg:
+ enabled: true
+ grub-pc/install_devices: /dev/sda
+ grub-pc/install_devices_empty: false
+ """
+ )
+ ],
+}
+
+__doc__ = get_meta_doc(meta)
def fetch_idevs(log):
@@ -121,14 +126,20 @@ def handle(name, cfg, _cloud, log, _args):
return
idevs = util.get_cfg_option_str(mycfg, "grub-pc/install_devices", None)
- idevs_empty = util.get_cfg_option_str(
- mycfg, "grub-pc/install_devices_empty", None
- )
-
if idevs is None:
idevs = fetch_idevs(log)
+
+ idevs_empty = mycfg.get("grub-pc/install_devices_empty")
if idevs_empty is None:
- idevs_empty = "false" if idevs else "true"
+ idevs_empty = not idevs
+ elif not isinstance(idevs_empty, bool):
+ log.warning(
+ "DEPRECATED: grub_dpkg: grub-pc/install_devices_empty value of "
+ f"'{idevs_empty}' is not boolean. Use of non-boolean values "
+ "will be removed in a future version of cloud-init."
+ )
+ idevs_empty = util.translate_bool(idevs_empty)
+ idevs_empty = str(idevs_empty).lower()
# now idevs and idevs_empty are set to determined values
# or, those set by user
diff --git a/cloudinit/config/cc_install_hotplug.py b/cloudinit/config/cc_install_hotplug.py
index 34c4557e..a3668232 100644
--- a/cloudinit/config/cc_install_hotplug.py
+++ b/cloudinit/config/cc_install_hotplug.py
@@ -4,22 +4,15 @@ import os
from textwrap import dedent
from cloudinit import stages, subp, util
-from cloudinit.config.schema import (
- MetaSchema,
- get_meta_doc,
- validate_cloudconfig_schema,
-)
+from cloudinit.config.schema import MetaSchema, get_meta_doc
from cloudinit.distros import ALL_DISTROS
from cloudinit.event import EventScope, EventType
from cloudinit.settings import PER_INSTANCE
-frequency = PER_INSTANCE
-distros = [ALL_DISTROS]
-
meta: MetaSchema = {
"id": "cc_install_hotplug",
"name": "Install Hotplug",
- "title": "Install hotplug if supported and enabled",
+ "title": "Install hotplug udev rules if supported and enabled",
"description": dedent(
"""\
This module will install the udev rules to enable hotplug if
@@ -36,7 +29,8 @@ meta: MetaSchema = {
Currently supported datasources: Openstack, EC2
"""
),
- "distros": distros,
+ "distros": [ALL_DISTROS],
+ "frequency": PER_INSTANCE,
"examples": [
dedent(
"""\
@@ -55,43 +49,9 @@ meta: MetaSchema = {
"""
),
],
- "frequency": frequency,
-}
-
-schema = {
- "type": "object",
- "properties": {
- "updates": {
- "type": "object",
- "additionalProperties": False,
- "properties": {
- "network": {
- "type": "object",
- "required": ["when"],
- "additionalProperties": False,
- "properties": {
- "when": {
- "type": "array",
- "additionalProperties": False,
- "items": {
- "type": "string",
- "additionalProperties": False,
- "enum": [
- "boot-new-instance",
- "boot-legacy",
- "boot",
- "hotplug",
- ],
- },
- }
- },
- }
- },
- }
- },
}
-__doc__ = get_meta_doc(meta, schema)
+__doc__ = get_meta_doc(meta)
HOTPLUG_UDEV_PATH = "/etc/udev/rules.d/10-cloud-init-hook-hotplug.rules"
@@ -105,7 +65,6 @@ LABEL="cloudinit_end"
def handle(_name, cfg, cloud, log, _args):
- validate_cloudconfig_schema(cfg, schema)
network_hotplug_enabled = (
"updates" in cfg
and "network" in cfg["updates"]
diff --git a/cloudinit/config/cc_keyboard.py b/cloudinit/config/cc_keyboard.py
index 98ef326a..211cb015 100644
--- a/cloudinit/config/cc_keyboard.py
+++ b/cloudinit/config/cc_keyboard.py
@@ -10,31 +10,21 @@ from textwrap import dedent
from cloudinit import distros
from cloudinit import log as logging
-from cloudinit.config.schema import (
- MetaSchema,
- get_meta_doc,
- validate_cloudconfig_schema,
-)
+from cloudinit.config.schema import MetaSchema, get_meta_doc
from cloudinit.settings import PER_INSTANCE
-frequency = PER_INSTANCE
-
# FIXME: setting keyboard layout should be supported by all OSes.
# But currently only implemented for Linux distributions that use systemd.
-osfamilies = ["arch", "debian", "redhat", "suse"]
-distros = distros.Distro.expand_osfamily(osfamilies)
DEFAULT_KEYBOARD_MODEL = "pc105"
+distros = distros.Distro.expand_osfamily(["arch", "debian", "redhat", "suse"])
+
meta: MetaSchema = {
"id": "cc_keyboard",
"name": "Keyboard",
"title": "Set keyboard layout",
- "description": dedent(
- """\
- Handle keyboard configuration.
- """
- ),
+ "description": "Handle keyboard configuration.",
"distros": distros,
"examples": [
dedent(
@@ -55,57 +45,11 @@ meta: MetaSchema = {
"""
),
],
- "frequency": frequency,
+ "frequency": PER_INSTANCE,
}
-schema = {
- "type": "object",
- "properties": {
- "keyboard": {
- "type": "object",
- "properties": {
- "layout": {
- "type": "string",
- "description": dedent(
- """\
- Required. Keyboard layout. Corresponds to XKBLAYOUT.
- """
- ),
- },
- "model": {
- "type": "string",
- "default": DEFAULT_KEYBOARD_MODEL,
- "description": dedent(
- """\
- Optional. Keyboard model. Corresponds to XKBMODEL.
- """
- ),
- },
- "variant": {
- "type": "string",
- "description": dedent(
- """\
- Optional. Keyboard variant. Corresponds to XKBVARIANT.
- """
- ),
- },
- "options": {
- "type": "string",
- "description": dedent(
- """\
- Optional. Keyboard options. Corresponds to XKBOPTIONS.
- """
- ),
- },
- },
- "required": ["layout"],
- "additionalProperties": False,
- }
- },
-}
-
-__doc__ = get_meta_doc(meta, schema)
+__doc__ = get_meta_doc(meta)
LOG = logging.getLogger(__name__)
@@ -116,7 +60,6 @@ def handle(name, cfg, cloud, log, args):
"Skipping module named %s, no 'keyboard' section found", name
)
return
- validate_cloudconfig_schema(cfg, schema)
kb_cfg = cfg["keyboard"]
layout = kb_cfg["layout"]
model = kb_cfg.get("model", DEFAULT_KEYBOARD_MODEL)
diff --git a/cloudinit/config/cc_keys_to_console.py b/cloudinit/config/cc_keys_to_console.py
index ab35e136..dd8b92fe 100644
--- a/cloudinit/config/cc_keys_to_console.py
+++ b/cloudinit/config/cc_keys_to_console.py
@@ -6,46 +6,64 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-"""
-Keys to Console
----------------
-**Summary:** control which SSH host keys may be written to console
-
-For security reasons it may be desirable not to write SSH host keys and their
-fingerprints to the console. To avoid either being written to the console the
-``emit_keys_to_console`` config key under the main ``ssh`` config key can be
-used. To avoid the fingerprint of types of SSH host keys being written to
-console the ``ssh_fp_console_blacklist`` config key can be used. By default
-all types of keys will have their fingerprints written to console. To avoid
-host keys of a key type being written to console the
-``ssh_key_console_blacklist`` config key can be used. By default ``ssh-dss``
-host keys are not written to console.
-
-**Internal name:** ``cc_keys_to_console``
-
-**Module frequency:** per instance
-
-**Supported distros:** all
-
-**Config keys**::
-
- ssh:
- emit_keys_to_console: false
-
- ssh_fp_console_blacklist: <list of key types>
- ssh_key_console_blacklist: <list of key types>
-"""
+"""Keys to Console: Control which SSH host keys may be written to console"""
import os
+from textwrap import dedent
from cloudinit import subp, util
+from cloudinit.config.schema import MetaSchema, get_meta_doc
from cloudinit.settings import PER_INSTANCE
-frequency = PER_INSTANCE
-
# This is a tool that cloud init provides
HELPER_TOOL_TPL = "%s/cloud-init/write-ssh-key-fingerprints"
+distros = ["all"]
+
+meta: MetaSchema = {
+ "id": "cc_keys_to_console",
+ "name": "Keys to Console",
+ "title": "Control which SSH host keys may be written to console",
+ "description": (
+ "For security reasons it may be desirable not to write SSH host keys"
+ " and their fingerprints to the console. To avoid either being written"
+ " to the console the ``emit_keys_to_console`` config key under the"
+ " main ``ssh`` config key can be used. To avoid the fingerprint of"
+ " types of SSH host keys being written to console the"
+ " ``ssh_fp_console_blacklist`` config key can be used. By default,"
+ " all types of keys will have their fingerprints written to console."
+ " To avoid host keys of a key type being written to console the"
+ "``ssh_key_console_blacklist`` config key can be used. By default,"
+ " ``ssh-dss`` host keys are not written to console."
+ ),
+ "distros": distros,
+ "examples": [
+ dedent(
+ """\
+ # Do not print any SSH keys to system console
+ ssh:
+ emit_keys_to_console: false
+ """
+ ),
+ dedent(
+ """\
+ # Do not print certain ssh key types to console
+ ssh_key_console_blacklist: [dsa, ssh-dss]
+ """
+ ),
+ dedent(
+ """\
+ # Do not print specific ssh key fingerprints to console
+ ssh_fp_console_blacklist:
+ - E25451E0221B5773DEBFF178ECDACB160995AA89
+ - FE76292D55E8B28EE6DB2B34B2D8A784F8C0AAB0
+ """
+ ),
+ ],
+ "frequency": PER_INSTANCE,
+}
+__doc__ = get_meta_doc(meta)
+
def _get_helper_tool_path(distro):
try:
diff --git a/cloudinit/config/cc_landscape.py b/cloudinit/config/cc_landscape.py
index 03ebf411..ede09bd9 100644
--- a/cloudinit/config/cc_landscape.py
+++ b/cloudinit/config/cc_landscape.py
@@ -6,17 +6,38 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-"""
-Landscape
----------
-**Summary:** install and configure landscape client
+"""install and configure landscape client"""
+
+import os
+from io import BytesIO
+from textwrap import dedent
+
+from configobj import ConfigObj
+
+from cloudinit import subp, type_utils, util
+from cloudinit.config.schema import MetaSchema, get_meta_doc
+from cloudinit.settings import PER_INSTANCE
+LSC_CLIENT_CFG_FILE = "/etc/landscape/client.conf"
+LS_DEFAULT_FILE = "/etc/default/landscape-client"
+
+# defaults taken from stock client.conf in landscape-client 11.07.1.1-0ubuntu2
+LSC_BUILTIN_CFG = {
+ "client": {
+ "log_level": "info",
+ "url": "https://landscape.canonical.com/message-system",
+ "ping_url": "http://landscape.canonical.com/ping",
+ "data_path": "/var/lib/landscape/client",
+ }
+}
+
+MODULE_DESCRIPTION = """\
This module installs and configures ``landscape-client``. The landscape client
will only be installed if the key ``landscape`` is present in config. Landscape
client configuration is given under the ``client`` key under the main
``landscape`` config key. The config parameters are not interpreted by
cloud-init, but rather are converted into a ConfigObj formatted file and
-written out to ``/etc/landscape/client.conf``.
+written out to the `[client]` section in ``/etc/landscape/client.conf``.
The following default client config is provided, but can be overridden::
@@ -33,53 +54,47 @@ The following default client config is provided, but can be overridden::
.. note::
if ``tags`` is defined, its contents should be a string delimited with
``,`` rather than a list
-
-**Internal name:** ``cc_landscape``
-
-**Module frequency:** per instance
-
-**Supported distros:** ubuntu
-
-**Config keys**::
-
- landscape:
- client:
- url: "https://landscape.canonical.com/message-system"
- ping_url: "http://landscape.canonical.com/ping"
- data_path: "/var/lib/landscape/client"
- http_proxy: "http://my.proxy.com/foobar"
- https_proxy: "https://my.proxy.com/foobar"
- tags: "server,cloud"
- computer_title: "footitle"
- registration_key: "fookey"
- account_name: "fooaccount"
"""
-
-import os
-from io import BytesIO
-
-from configobj import ConfigObj
-
-from cloudinit import subp, type_utils, util
-from cloudinit.settings import PER_INSTANCE
-
-frequency = PER_INSTANCE
-
-LSC_CLIENT_CFG_FILE = "/etc/landscape/client.conf"
-LS_DEFAULT_FILE = "/etc/default/landscape-client"
-
distros = ["ubuntu"]
-# defaults taken from stock client.conf in landscape-client 11.07.1.1-0ubuntu2
-LSC_BUILTIN_CFG = {
- "client": {
- "log_level": "info",
- "url": "https://landscape.canonical.com/message-system",
- "ping_url": "http://landscape.canonical.com/ping",
- "data_path": "/var/lib/landscape/client",
- }
+meta: MetaSchema = {
+ "id": "cc_landscape",
+ "name": "Landscape",
+ "title": "Install and configure landscape client",
+ "description": MODULE_DESCRIPTION,
+ "distros": distros,
+ "examples": [
+ dedent(
+ """\
+ # To discover additional supported client keys, run
+ # man landscape-config.
+ landscape:
+ client:
+ url: "https://landscape.canonical.com/message-system"
+ ping_url: "http://landscape.canonical.com/ping"
+ data_path: "/var/lib/landscape/client"
+ http_proxy: "http://my.proxy.com/foobar"
+ https_proxy: "https://my.proxy.com/foobar"
+ tags: "server,cloud"
+ computer_title: "footitle"
+ registration_key: "fookey"
+ account_name: "fooaccount"
+ """
+ ),
+ dedent(
+ """\
+ # Any keys below `client` are optional and the default values will
+ # be used.
+ landscape:
+ client: {}
+ """
+ ),
+ ],
+ "frequency": PER_INSTANCE,
}
+__doc__ = get_meta_doc(meta)
+
def handle(_name, cfg, cloud, log, _args):
"""
@@ -102,6 +117,7 @@ def handle(_name, cfg, cloud, log, _args):
cloud.distro.install_packages(("landscape-client",))
+ # Later order config values override earlier values
merge_data = [
LSC_BUILTIN_CFG,
LSC_CLIENT_CFG_FILE,
diff --git a/cloudinit/config/cc_locale.py b/cloudinit/config/cc_locale.py
index 29f6a9b6..6a31933e 100644
--- a/cloudinit/config/cc_locale.py
+++ b/cloudinit/config/cc_locale.py
@@ -11,15 +11,11 @@
from textwrap import dedent
from cloudinit import util
-from cloudinit.config.schema import (
- MetaSchema,
- get_meta_doc,
- validate_cloudconfig_schema,
-)
+from cloudinit.config.schema import MetaSchema, get_meta_doc
from cloudinit.settings import PER_INSTANCE
-frequency = PER_INSTANCE
distros = ["all"]
+
meta: MetaSchema = {
"id": "cc_locale",
"name": "Locale",
@@ -45,29 +41,10 @@ meta: MetaSchema = {
"""
),
],
- "frequency": frequency,
-}
-
-schema = {
- "type": "object",
- "properties": {
- "locale": {
- "type": "string",
- "description": (
- "The locale to set as the system's locale (e.g. ar_PS)"
- ),
- },
- "locale_configfile": {
- "type": "string",
- "description": (
- "The file in which to write the locale configuration (defaults"
- " to the distro's default location)"
- ),
- },
- },
+ "frequency": PER_INSTANCE,
}
-__doc__ = get_meta_doc(meta, schema) # Supplement python help()
+__doc__ = get_meta_doc(meta)
def handle(name, cfg, cloud, log, args):
@@ -82,8 +59,6 @@ def handle(name, cfg, cloud, log, args):
)
return
- validate_cloudconfig_schema(cfg, schema)
-
log.debug("Setting locale to %s", locale)
locale_cfgfile = util.get_cfg_option_str(cfg, "locale_configfile")
cloud.distro.apply_locale(locale, locale_cfgfile)
diff --git a/cloudinit/config/cc_lxd.py b/cloudinit/config/cc_lxd.py
index 13ddcbe9..847a7c3c 100644
--- a/cloudinit/config/cc_lxd.py
+++ b/cloudinit/config/cc_lxd.py
@@ -4,59 +4,75 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-"""
-LXD
----
-**Summary:** configure lxd with ``lxd init`` and optionally lxd-bridge
+"""LXD: configure lxd with ``lxd init`` and optionally lxd-bridge"""
+
+import os
+from textwrap import dedent
+
+from cloudinit import log as logging
+from cloudinit import subp, util
+from cloudinit.config.schema import MetaSchema, get_meta_doc
+from cloudinit.settings import PER_INSTANCE
+
+LOG = logging.getLogger(__name__)
+
+_DEFAULT_NETWORK_NAME = "lxdbr0"
+
+MODULE_DESCRIPTION = """\
This module configures lxd with user specified options using ``lxd init``.
If lxd is not present on the system but lxd configuration is provided, then
lxd will be installed. If the selected storage backend is zfs, then zfs will
be installed if missing. If network bridge configuration is provided, then
lxd-bridge will be configured accordingly.
-
-**Internal name:** ``cc_lxd``
-
-**Module frequency:** per instance
-
-**Supported distros:** ubuntu
-
-**Config keys**::
-
- lxd:
- init:
- network_address: <ip addr>
- network_port: <port>
- storage_backend: <zfs/dir>
- storage_create_device: <dev>
- storage_create_loop: <size>
- storage_pool: <name>
- trust_password: <password>
- bridge:
- mode: <new, existing or none>
- name: <name>
- ipv4_address: <ip addr>
- ipv4_netmask: <cidr>
- ipv4_dhcp_first: <ip addr>
- ipv4_dhcp_last: <ip addr>
- ipv4_dhcp_leases: <size>
- ipv4_nat: <bool>
- ipv6_address: <ip addr>
- ipv6_netmask: <cidr>
- ipv6_nat: <bool>
- domain: <domain>
"""
-import os
-
-from cloudinit import log as logging
-from cloudinit import subp, util
-
distros = ["ubuntu"]
-LOG = logging.getLogger(__name__)
-
-_DEFAULT_NETWORK_NAME = "lxdbr0"
+meta: MetaSchema = {
+ "id": "cc_lxd",
+ "name": "LXD",
+ "title": "Configure LXD with ``lxd init`` and optionally lxd-bridge",
+ "description": MODULE_DESCRIPTION,
+ "distros": distros,
+ "examples": [
+ dedent(
+ """\
+ # Simplest working directory backed LXD configuration
+ lxd:
+ init:
+ storage_backend: dir
+ """
+ ),
+ dedent(
+ """\
+ lxd:
+ init:
+ network_address: 0.0.0.0
+ network_port: 8443
+ storage_backend: zfs
+ storage_pool: datapool
+ storage_create_loop: 10
+ bridge:
+ mode: new
+ name: lxdbr0
+ ipv4_address: 10.0.8.1
+ ipv4_netmask: 24
+ ipv4_dhcp_first: 10.0.8.2
+ ipv4_dhcp_last: 10.0.8.3
+ ipv4_dhcp_leases: 250
+ ipv4_nat: true
+ ipv6_address: fd98:9e0:3744::1
+ ipv6_netmask: 64
+ ipv6_nat: true
+ domain: lxd
+ """
+ ),
+ ],
+ "frequency": PER_INSTANCE,
+}
+
+__doc__ = get_meta_doc(meta)
def handle(name, cfg, cloud, log, args):
@@ -300,8 +316,8 @@ def maybe_cleanup_default(
"""Newer versions of lxc (3.0.1+) create a lxdbr0 network when
'lxd init --auto' is run. Older versions did not.
- By removing ay that lxd-init created, we simply leave the add/attach
- code in-tact.
+ By removing any that lxd-init created, we simply leave the add/attach
+ code intact.
https://github.com/lxc/lxd/issues/4649"""
if net_name != _DEFAULT_NETWORK_NAME or not did_init:
diff --git a/cloudinit/config/cc_mcollective.py b/cloudinit/config/cc_mcollective.py
index 1b0158ec..33f7556d 100644
--- a/cloudinit/config/cc_mcollective.py
+++ b/cloudinit/config/cc_mcollective.py
@@ -7,11 +7,28 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-"""
-Mcollective
------------
-**Summary:** install, configure and start mcollective
+""" Mcollective: Install, configure and start mcollective"""
+
+import errno
+import io
+from textwrap import dedent
+
+# Used since this can maintain comments
+# and doesn't need a top level section
+from configobj import ConfigObj
+from cloudinit import log as logging
+from cloudinit import subp, util
+from cloudinit.config.schema import MetaSchema, get_meta_doc
+from cloudinit.settings import PER_INSTANCE
+
+PUBCERT_FILE = "/etc/mcollective/ssl/server-public.pem"
+PRICERT_FILE = "/etc/mcollective/ssl/server-private.pem"
+SERVER_CFG = "/etc/mcollective/server.cfg"
+
+LOG = logging.getLogger(__name__)
+
+MODULE_DESCRIPTION = """\
This module installs, configures and starts mcollective. If the ``mcollective``
key is present in config, then mcollective will be installed and started.
@@ -26,43 +43,48 @@ private certificates for mcollective. Their values will be written to
.. note::
The ec2 metadata service is readable by non-root users.
If security is a concern, use include-once and ssl urls.
-
-**Internal name:** ``cc_mcollective``
-
-**Module frequency:** per instance
-
-**Supported distros:** all
-
-**Config keys**::
-
- mcollective:
- conf:
- <key>: <value>
- public-cert: |
- -------BEGIN CERTIFICATE--------
- <cert data>
- -------END CERTIFICATE--------
- private-cert: |
- -------BEGIN CERTIFICATE--------
- <cert data>
- -------END CERTIFICATE--------
"""
-import errno
-import io
-
-# Used since this can maintain comments
-# and doesn't need a top level section
-from configobj import ConfigObj
-
-from cloudinit import log as logging
-from cloudinit import subp, util
-
-PUBCERT_FILE = "/etc/mcollective/ssl/server-public.pem"
-PRICERT_FILE = "/etc/mcollective/ssl/server-private.pem"
-SERVER_CFG = "/etc/mcollective/server.cfg"
-
-LOG = logging.getLogger(__name__)
+distros = ["all"]
+
+meta: MetaSchema = {
+ "id": "cc_mcollective",
+ "name": "Mcollective",
+ "title": "Install, configure and start mcollective",
+ "description": MODULE_DESCRIPTION,
+ "distros": distros,
+ "examples": [
+ dedent(
+ """\
+ # Provide server private and public key and provide the following
+ # config settings in /etc/mcollective/server.cfg:
+ # loglevel: debug
+ # plugin.stomp.host: dbhost
+
+ # WARNING WARNING WARNING
+ # The ec2 metadata service is a network service, and thus is
+ # readable by non-root users on the system
+ # (ie: 'ec2metadata --user-data')
+ # If you want security for this, please use include-once + SSL urls
+ mcollective:
+ conf:
+ loglevel: debug
+ plugin.stomp.host: dbhost
+ public-cert: |
+ -------BEGIN CERTIFICATE--------
+ <cert data>
+ -------END CERTIFICATE--------
+ private-cert: |
+ -------BEGIN CERTIFICATE--------
+ <cert data>
+ -------END CERTIFICATE--------
+ """
+ ),
+ ],
+ "frequency": PER_INSTANCE,
+}
+
+__doc__ = get_meta_doc(meta)
def configure(
diff --git a/cloudinit/config/cc_migrator.py b/cloudinit/config/cc_migrator.py
index 4fafb4af..6aed54b3 100644
--- a/cloudinit/config/cc_migrator.py
+++ b/cloudinit/config/cc_migrator.py
@@ -4,36 +4,38 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-"""
-Migrator
---------
-**Summary:** migrate old versions of cloud-init data to new
+"""Migrator: Migrate old versions of cloud-init data to new"""
+
+import os
+import shutil
+
+from cloudinit import helpers, util
+from cloudinit.config.schema import MetaSchema, get_meta_doc
+from cloudinit.settings import PER_ALWAYS
+MODULE_DESCRIPTION = """\
This module handles moving old versions of cloud-init data to newer ones.
Currently, it only handles renaming cloud-init's per-frequency semaphore files
to canonicalized name and renaming legacy semaphore names to newer ones. This
module is enabled by default, but can be disabled by specifying ``migrate:
false`` in config.
-
-**Internal name:** ``cc_migrator``
-
-**Module frequency:** always
-
-**Supported distros:** all
-
-**Config keys**::
-
- migrate: <true/false>
"""
-import os
-import shutil
-
-from cloudinit import helpers, util
-from cloudinit.settings import PER_ALWAYS
-
+distros = ["all"]
frequency = PER_ALWAYS
+meta: MetaSchema = {
+ "id": "cc_migrator",
+ "name": "Migrator",
+ "title": "Migrate old versions of cloud-init data to new",
+ "description": MODULE_DESCRIPTION,
+ "distros": distros,
+ "examples": ["# Do not migrate cloud-init semaphores\nmigrate: false\n"],
+ "frequency": frequency,
+}
+
+__doc__ = get_meta_doc(meta)
+
def _migrate_canon_sems(cloud):
paths = (cloud.paths.get_ipath("sem"), cloud.paths.get_cpath("sem"))
diff --git a/cloudinit/config/cc_mounts.py b/cloudinit/config/cc_mounts.py
index 83eb5b1b..1d05c9b9 100644
--- a/cloudinit/config/cc_mounts.py
+++ b/cloudinit/config/cc_mounts.py
@@ -6,11 +6,19 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-"""
-Mounts
-------
-**Summary:** configure mount points and swap files
+"""Mounts: Configure mount points and swap files"""
+
+import logging
+import os
+import re
+from string import whitespace
+from textwrap import dedent
+
+from cloudinit import subp, type_utils, util
+from cloudinit.config.schema import MetaSchema, get_meta_doc
+from cloudinit.settings import PER_INSTANCE
+MODULE_DESCRIPTION = """\
This module can add or remove mountpoints from ``/etc/fstab`` as well as
configure swap. The ``mounts`` config key takes a list of fstab entries to add.
Each entry is specified as a list of ``[ fs_spec, fs_file, fs_vfstype,
@@ -19,55 +27,79 @@ consult the manual for ``/etc/fstab``. When specifying the ``fs_spec``, if the
device name starts with one of ``xvd``, ``sd``, ``hd``, or ``vd``, the leading
``/dev`` may be omitted.
-In order to remove a previously listed mount, an entry can be added to the
-mounts list containing ``fs_spec`` for the device to be removed but no
-mountpoint (i.e. ``[ sda1 ]`` or ``[ sda1, null ]``).
+Any mounts that do not appear to either an attached block device or network
+resource will be skipped with a log like "Ignoring nonexistent mount ...".
+
+Cloud-init will attempt to add the following mount directives if available and
+unconfigured in `/etc/fstab`::
+
+ mounts:
+ - ["ephemeral0", "/mnt", "auto",\
+"defaults,nofail,x-systemd.requires=cloud-init.service", "0", "2"]
+ - ["swap", "none", "swap", "sw", "0", "0"]
+
+In order to remove a previously listed mount, an entry can be added to
+the `mounts` list containing ``fs_spec`` for the device to be removed but no
+mountpoint (i.e. ``[ swap ]`` or ``[ swap, null ]``).
The ``mount_default_fields`` config key allows default options to be specified
for the values in a ``mounts`` entry that are not specified, aside from the
``fs_spec`` and the ``fs_file``. If specified, this must be a list containing 6
values. It defaults to::
- mount_default_fields: [none, none, "auto", "defaults,nobootwait", "0", "2"]
-
-On a systemd booted system that default is the mostly equivalent::
-
- mount_default_fields: [none, none, "auto",
- "defaults,nofail,x-systemd.requires=cloud-init.service", "0", "2"]
+ mount_default_fields: [none, none, "auto",\
+"defaults,nofail,x-systemd.requires=cloud-init.service", "0", "2"]
-Note that `nobootwait` is an upstart specific boot option that somewhat
-equates to the more standard `nofail`.
+Non-systemd init systems will vary in ``mount_default_fields``.
Swap files can be configured by setting the path to the swap file to create
with ``filename``, the size of the swap file with ``size`` maximum size of
the swap file if using an ``size: auto`` with ``maxsize``. By default no
swap file is created.
+"""
-**Internal name:** ``cc_mounts``
-
-**Module frequency:** per instance
-
-**Supported distros:** all
-
-**Config keys**::
-
+example = dedent(
+ """\
+ # Mount ephemeral0 with "noexec" flag, /dev/sdc with mount_default_fields,
+ # and /dev/xvdh with custom fs_passno "0" to avoid fsck on the mount.
+ # Also provide an automatically sized swap with a max size of 10485760
+ # bytes.
mounts:
- [ /dev/ephemeral0, /mnt, auto, "defaults,noexec" ]
- [ sdc, /opt/data ]
- - [ xvdh, /opt/data, "auto", "defaults,nofail", "0", "0" ]
- mount_default_fields: [None, None, "auto", "defaults,nofail", "0", "2"]
+ - [ xvdh, /opt/data, auto, "defaults,nofail", "0", "0" ]
+ mount_default_fields: [None, None, auto, "defaults,nofail", "0", "2"]
swap:
- filename: <file>
- size: <"auto"/size in bytes>
- maxsize: <size in bytes>
-"""
-
-import logging
-import os
-import re
-from string import whitespace
-
-from cloudinit import subp, type_utils, util
+ filename: /my/swapfile
+ size: auto
+ maxsize: 10485760
+ """
+)
+
+distros = ["all"]
+
+meta: MetaSchema = {
+ "id": "cc_mounts",
+ "name": "Mounts",
+ "title": "Configure mount points and swap files",
+ "description": MODULE_DESCRIPTION,
+ "distros": distros,
+ "examples": [
+ example,
+ dedent(
+ """\
+ # Create a 2 GB swap file at /swapfile using human-readable values
+ swap:
+ filename: /swapfile
+ size: 2G
+ maxsize: 2G
+ """
+ ),
+ ],
+ "frequency": PER_INSTANCE,
+}
+
+__doc__ = get_meta_doc(meta)
# Shortname matches 'sda', 'sda1', 'xvda', 'hda', 'sdb', xvdb, vda, vdd1, sr0
DEVICE_NAME_FILTER = r"^([x]{0,1}[shv]d[a-z][0-9]*|sr[0-9]+)$"
@@ -178,7 +210,7 @@ def suggested_swapsize(memsize=None, maxsize=None, fsys=None):
if memsize is None:
memsize = util.read_meminfo()["total"]
- GB = 2 ** 30
+ GB = 2**30
sugg_max = 8 * GB
info = {"avail": "na", "max_in": maxsize, "mem": memsize}
@@ -230,7 +262,7 @@ def suggested_swapsize(memsize=None, maxsize=None, fsys=None):
info["size"] = size
- MB = 2 ** 20
+ MB = 2**20
pinfo = {}
for k, v in info.items():
if isinstance(v, int):
@@ -324,7 +356,7 @@ def setup_swapfile(fname, size=None, maxsize=None):
fsys=swap_dir, maxsize=maxsize, memsize=memsize
)
- mibsize = str(int(size / (2 ** 20)))
+ mibsize = str(int(size / (2**20)))
if not size:
LOG.debug("Not creating swap: suggested size was 0")
return
diff --git a/cloudinit/config/cc_ntp.py b/cloudinit/config/cc_ntp.py
index 25bba764..3bc1d303 100644
--- a/cloudinit/config/cc_ntp.py
+++ b/cloudinit/config/cc_ntp.py
@@ -12,11 +12,7 @@ from textwrap import dedent
from cloudinit import log as logging
from cloudinit import subp, temp_utils, templater, type_utils, util
-from cloudinit.config.schema import (
- MetaSchema,
- get_meta_doc,
- validate_cloudconfig_schema,
-)
+from cloudinit.config.schema import MetaSchema, get_meta_doc
from cloudinit.settings import PER_INSTANCE
LOG = logging.getLogger(__name__)
@@ -210,137 +206,14 @@ meta: MetaSchema = {
],
"frequency": PER_INSTANCE,
}
+__doc__ = get_meta_doc(meta)
+
-schema = {
- "type": "object",
- "properties": {
- "ntp": {
- "type": ["object", "null"],
- "properties": {
- "pools": {
- "type": "array",
- "items": {"type": "string", "format": "hostname"},
- "uniqueItems": True,
- "description": dedent(
- """\
- List of ntp pools. If both pools and servers are
- empty, 4 default pool servers will be provided of
- the format ``{0-3}.{distro}.pool.ntp.org``. NOTE:
- for Alpine Linux when using the Busybox NTP client
- this setting will be ignored due to the limited
- functionality of Busybox's ntpd."""
- ),
- },
- "servers": {
- "type": "array",
- "items": {"type": "string", "format": "hostname"},
- "uniqueItems": True,
- "description": dedent(
- """\
- List of ntp servers. If both pools and servers are
- empty, 4 default pool servers will be provided with
- the format ``{0-3}.{distro}.pool.ntp.org``."""
- ),
- },
- "ntp_client": {
- "type": "string",
- "default": "auto",
- "description": dedent(
- """\
- Name of an NTP client to use to configure system NTP.
- When unprovided or 'auto' the default client preferred
- by the distribution will be used. The following
- built-in client names can be used to override existing
- configuration defaults: chrony, ntp, ntpdate,
- systemd-timesyncd."""
- ),
- },
- "enabled": {
- "type": "boolean",
- "default": True,
- "description": dedent(
- """\
- Attempt to enable ntp clients if set to True. If set
- to False, ntp client will not be configured or
- installed"""
- ),
- },
- "config": {
- "description": dedent(
- """\
- Configuration settings or overrides for the
- ``ntp_client`` specified."""
- ),
- "type": ["object"],
- "properties": {
- "confpath": {
- "type": "string",
- "description": dedent(
- """\
- The path to where the ``ntp_client``
- configuration is written."""
- ),
- },
- "check_exe": {
- "type": "string",
- "description": dedent(
- """\
- The executable name for the ``ntp_client``.
- For example, ntp service ``check_exe`` is
- 'ntpd' because it runs the ntpd binary."""
- ),
- },
- "packages": {
- "type": "array",
- "items": {
- "type": "string",
- },
- "uniqueItems": True,
- "description": dedent(
- """\
- List of packages needed to be installed for the
- selected ``ntp_client``."""
- ),
- },
- "service_name": {
- "type": "string",
- "description": dedent(
- """\
- The systemd or sysvinit service name used to
- start and stop the ``ntp_client``
- service."""
- ),
- },
- "template": {
- "type": "string",
- "description": dedent(
- """\
- Inline template allowing users to define their
- own ``ntp_client`` configuration template.
- The value must start with '## template:jinja'
- to enable use of templating support.
- """
- ),
- },
- },
- # Don't use REQUIRED_NTP_CONFIG_KEYS to allow for override
- # of builtin client values.
- "minProperties": 1, # If we have config, define something
- "additionalProperties": False,
- },
- },
- "additionalProperties": False,
- }
- },
-}
REQUIRED_NTP_CONFIG_KEYS = frozenset(
["check_exe", "confpath", "packages", "service_name"]
)
-__doc__ = get_meta_doc(meta, schema) # Supplement python help()
-
-
def distro_ntp_client_configs(distro):
"""Construct a distro-specific ntp client config dictionary by merging
distro specific changes into base config.
@@ -604,8 +477,6 @@ def handle(name, cfg, cloud, log, _args):
" is a {_type} instead".format(_type=type_utils.obj_name(ntp_cfg))
)
- validate_cloudconfig_schema(cfg, schema)
-
# Allow users to explicitly enable/disable
enabled = ntp_cfg.get("enabled", True)
if util.is_false(enabled):
diff --git a/cloudinit/config/cc_package_update_upgrade_install.py b/cloudinit/config/cc_package_update_upgrade_install.py
index 14cdfab8..5198305e 100644
--- a/cloudinit/config/cc_package_update_upgrade_install.py
+++ b/cloudinit/config/cc_package_update_upgrade_install.py
@@ -4,50 +4,53 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-"""
-Package Update Upgrade Install
-------------------------------
-**Summary:** update, upgrade, and install packages
-
-This module allows packages to be updated, upgraded or installed during boot.
-If any packages are to be installed or an upgrade is to be performed then the
-package cache will be updated first. If a package installation or upgrade
-requires a reboot, then a reboot can be performed if
-``package_reboot_if_required`` is specified. A list of packages to install can
-be provided. Each entry in the list can be either a package name or a list with
-two entries, the first being the package name and the second being the specific
-package version to install.
-
-**Internal name:** ``cc_package_update_upgrade_install``
-
-**Module frequency:** per instance
-
-**Supported distros:** all
-
-**Config keys**::
-
- packages:
- - pwgen
- - pastebinit
- - [libpython2.7, 2.7.3-0ubuntu3.1]
- package_update: <true/false>
- package_upgrade: <true/false>
- package_reboot_if_required: <true/false>
-
- apt_update: (alias for package_update)
- apt_upgrade: (alias for package_upgrade)
- apt_reboot_if_required: (alias for package_reboot_if_required)
-"""
+"""Package Update Upgrade Install: update, upgrade, and install packages"""
import os
import time
+from textwrap import dedent
from cloudinit import log as logging
from cloudinit import subp, util
+from cloudinit.config.schema import MetaSchema, get_meta_doc
+from cloudinit.distros import ALL_DISTROS
+from cloudinit.settings import PER_INSTANCE
REBOOT_FILE = "/var/run/reboot-required"
REBOOT_CMD = ["/sbin/reboot"]
+MODULE_DESCRIPTION = """\
+This module allows packages to be updated, upgraded or installed during boot.
+If any packages are to be installed or an upgrade is to be performed then the
+package cache will be updated first. If a package installation or upgrade
+requires a reboot, then a reboot can be performed if
+``package_reboot_if_required`` is specified.
+"""
+
+meta: MetaSchema = {
+ "id": "cc_package_update_upgrade_install",
+ "name": "Package Update Upgrade Install",
+ "title": "Update, upgrade, and install packages",
+ "description": MODULE_DESCRIPTION,
+ "distros": [ALL_DISTROS],
+ "frequency": PER_INSTANCE,
+ "examples": [
+ dedent(
+ """\
+ packages:
+ - pwgen
+ - pastebinit
+ - [libpython3.8, 3.8.10-0ubuntu1~20.04.2]
+ package_update: true
+ package_upgrade: true
+ package_reboot_if_required: true
+ """
+ )
+ ],
+}
+
+__doc__ = get_meta_doc(meta)
+
def _multi_cfg_bool_get(cfg, *keys):
for k in keys:
@@ -60,7 +63,7 @@ def _fire_reboot(log, wait_attempts=6, initial_sleep=1, backoff=2):
subp.subp(REBOOT_CMD)
start = time.time()
wait_time = initial_sleep
- for _i in range(0, wait_attempts):
+ for _i in range(wait_attempts):
time.sleep(wait_time)
wait_time *= backoff
elapsed = time.time() - start
diff --git a/cloudinit/config/cc_phone_home.py b/cloudinit/config/cc_phone_home.py
index a0e1da78..681c3729 100644
--- a/cloudinit/config/cc_phone_home.py
+++ b/cloudinit/config/cc_phone_home.py
@@ -6,11 +6,28 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-"""
-Phone Home
-----------
-**Summary:** post data to url
+"""Phone Home: Post data to url"""
+
+from textwrap import dedent
+
+from cloudinit import templater, url_helper, util
+from cloudinit.config.schema import MetaSchema, get_meta_doc
+from cloudinit.distros import ALL_DISTROS
+from cloudinit.settings import PER_INSTANCE
+frequency = PER_INSTANCE
+
+POST_LIST_ALL = [
+ "pub_key_dsa",
+ "pub_key_rsa",
+ "pub_key_ecdsa",
+ "pub_key_ed25519",
+ "instance_id",
+ "hostname",
+ "fqdn",
+]
+
+MODULE_DESCRIPTION = """\
This module can be used to post data to a remote host after boot is complete.
If the post url contains the string ``$INSTANCE_ID`` it will be replaced with
the id of the current instance. Either all data can be posted or a list of
@@ -26,7 +43,9 @@ keys to post. Available keys are:
Data is sent as ``x-www-form-urlencoded`` arguments.
-**Example HTTP POST**::
+**Example HTTP POST**:
+
+.. code-block:: http
POST / HTTP/1.1
Content-Length: 1337
@@ -36,39 +55,42 @@ Data is sent as ``x-www-form-urlencoded`` arguments.
Content-Type: application/x-www-form-urlencoded
pub_key_dsa=dsa_contents&pub_key_rsa=rsa_contents&pub_key_ecdsa=ecdsa_contents&pub_key_ed25519=ed25519_contents&instance_id=i-87018aed&hostname=myhost&fqdn=myhost.internal
-
-**Internal name:** ``cc_phone_home``
-
-**Module frequency:** per instance
-
-**Supported distros:** all
-
-**Config keys**::
-
- phone_home:
- url: http://example.com/$INSTANCE_ID/
- post:
- - pub_key_dsa
- - instance_id
- - fqdn
- tries: 10
"""
-from cloudinit import templater, url_helper, util
-from cloudinit.settings import PER_INSTANCE
-
-frequency = PER_INSTANCE
-
-POST_LIST_ALL = [
- "pub_key_dsa",
- "pub_key_rsa",
- "pub_key_ecdsa",
- "pub_key_ed25519",
- "instance_id",
- "hostname",
- "fqdn",
-]
-
+meta: MetaSchema = {
+ "id": "cc_phone_home",
+ "name": "Phone Home",
+ "title": "Post data to url",
+ "description": MODULE_DESCRIPTION,
+ "distros": [ALL_DISTROS],
+ "frequency": PER_INSTANCE,
+ "examples": [
+ dedent(
+ """\
+ phone_home:
+ url: http://example.com/$INSTANCE_ID/
+ post: all
+ """
+ ),
+ dedent(
+ """\
+ phone_home:
+ url: http://example.com/$INSTANCE_ID/
+ post:
+ - pub_key_dsa
+ - pub_key_rsa
+ - pub_key_ecdsa
+ - pub_key_ed25519
+ - instance_id
+ - hostname
+ - fqdn
+ tries: 5
+ """
+ ),
+ ],
+}
+
+__doc__ = get_meta_doc(meta)
# phone_home:
# url: http://my.foo.bar/$INSTANCE/
@@ -80,6 +102,8 @@ POST_LIST_ALL = [
# post: [ pub_key_dsa, pub_key_rsa, pub_key_ecdsa, instance_id, hostname,
# fqdn ]
#
+
+
def handle(name, cfg, cloud, log, args):
if len(args) != 0:
ph_cfg = util.read_conf(args[0])
@@ -105,8 +129,8 @@ def handle(name, cfg, cloud, log, args):
post_list = ph_cfg.get("post", "all")
tries = ph_cfg.get("tries")
try:
- tries = int(tries)
- except Exception:
+ tries = int(tries) # type: ignore
+ except ValueError:
tries = 10
util.logexc(
log,
diff --git a/cloudinit/config/cc_power_state_change.py b/cloudinit/config/cc_power_state_change.py
index d4eb68c0..7fc4e5ca 100644
--- a/cloudinit/config/cc_power_state_change.py
+++ b/cloudinit/config/cc_power_state_change.py
@@ -4,67 +4,78 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-"""
-Power State Change
-------------------
-**Summary:** change power state
-
-This module handles shutdown/reboot after all config modules have been run. By
-default it will take no action, and the system will keep running unless a
-package installation/upgrade requires a system reboot (e.g. installing a new
-kernel) and ``package_reboot_if_required`` is true. The ``power_state`` config
-key accepts a dict of options. If ``mode`` is any value other than
-``poweroff``, ``halt``, or ``reboot``, then no action will be taken.
-
-The system
-can be shutdown before cloud-init has finished using the ``timeout`` option.
-The ``delay`` key specifies a duration to be added onto any shutdown command
-used. Therefore, if a 5 minute delay and a 120 second shutdown are specified,
-the maximum amount of time between cloud-init starting and the system shutting
-down is 7 minutes, and the minimum amount of time is 5 minutes. The ``delay``
-key must have an argument in either the form ``'+5'`` for 5 minutes or ``now``
-for immediate shutdown.
-
-Optionally, a command can be run to determine whether or not
-the system should shut down. The command to be run should be specified in the
-``condition`` key. For command formatting, see the documentation for
-``cc_runcmd``. The specified shutdown behavior will only take place if the
-``condition`` key is omitted or the command specified by the ``condition``
-key returns 0.
-
-.. note::
- With Alpine Linux any message value specified is ignored as Alpine's halt,
- poweroff, and reboot commands do not support broadcasting a message.
-
-**Internal name:** ``cc_power_state_change``
-
-**Module frequency:** per instance
-
-**Supported distros:** all
-
-**Config keys**::
-
- power_state:
- delay: <now/'+minutes'>
- mode: <poweroff/halt/reboot>
- message: <shutdown message>
- timeout: <seconds>
- condition: <true/false/command>
-"""
+"""Power State Change: Change power state"""
import errno
import os
import re
import subprocess
import time
+from textwrap import dedent
from cloudinit import subp, util
+from cloudinit.config.schema import MetaSchema, get_meta_doc
+from cloudinit.distros import ALL_DISTROS
from cloudinit.settings import PER_INSTANCE
frequency = PER_INSTANCE
EXIT_FAIL = 254
+MODULE_DESCRIPTION = """\
+This module handles shutdown/reboot after all config modules have been run. By
+default it will take no action, and the system will keep running unless a
+package installation/upgrade requires a system reboot (e.g. installing a new
+kernel) and ``package_reboot_if_required`` is true.
+
+Using this module ensures that cloud-init is entirely finished with
+modules that would be executed.
+
+An example to distinguish delay from timeout:
+
+If you delay 5 (5 minutes) and have a timeout of
+120 (2 minutes), then the max time until shutdown will be 7 minutes, though
+it could be as soon as 5 minutes. Cloud-init will invoke 'shutdown +5' after
+the process finishes, or when 'timeout' seconds have elapsed.
+
+.. note::
+ With Alpine Linux any message value specified is ignored as Alpine's halt,
+ poweroff, and reboot commands do not support broadcasting a message.
+
+"""
+
+meta: MetaSchema = {
+ "id": "cc_power_state_change",
+ "name": "Power State Change",
+ "title": "Change power state",
+ "description": MODULE_DESCRIPTION,
+ "distros": [ALL_DISTROS],
+ "frequency": PER_INSTANCE,
+ "examples": [
+ dedent(
+ """\
+ power_state:
+ delay: now
+ mode: poweroff
+ message: Powering off
+ timeout: 2
+ condition: true
+ """
+ ),
+ dedent(
+ """\
+ power_state:
+ delay: 30
+ mode: reboot
+ message: Rebooting machine
+ condition: test -f /var/tmp/reboot_me
+ """
+ ),
+ ],
+}
+
+__doc__ = get_meta_doc(meta)
+
def givecmdline(pid):
# Returns the cmdline for the given process id. In Linux we can use procfs
diff --git a/cloudinit/config/cc_puppet.py b/cloudinit/config/cc_puppet.py
index f51f49bc..c0b073b5 100644
--- a/cloudinit/config/cc_puppet.py
+++ b/cloudinit/config/cc_puppet.py
@@ -6,20 +6,30 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-"""
-Puppet
-------
-**Summary:** install, configure and start puppet
+"""Puppet: Install, configure and start puppet"""
+
+import os
+import socket
+from io import StringIO
+from textwrap import dedent
+
+import yaml
+from cloudinit import helpers, subp, temp_utils, url_helper, util
+from cloudinit.config.schema import MetaSchema, get_meta_doc
+from cloudinit.distros import ALL_DISTROS
+from cloudinit.settings import PER_INSTANCE
+
+AIO_INSTALL_URL = "https://raw.githubusercontent.com/puppetlabs/install-puppet/main/install.sh" # noqa: E501
+PUPPET_AGENT_DEFAULT_ARGS = ["--test"]
+
+MODULE_DESCRIPTION = """\
This module handles puppet installation and configuration. If the ``puppet``
key does not exist in global configuration, no action will be taken. If a
config entry for ``puppet`` is present, then by default the latest version of
-puppet will be installed. If ``install`` is set to ``false``, puppet will not
-be installed. However, this will result in an error if puppet is not already
-present on the system. The version of puppet to be installed can be specified
-under ``version``, and defaults to ``none``, which selects the latest version
-in the repos. If the ``puppet`` config key exists in the config archive, this
-module will attempt to start puppet even if no installation was performed.
+puppet will be installed. If the ``puppet`` config key exists in the config
+archive, this module will attempt to start puppet even if no installation was
+performed.
The module also provides keys for configuring the new puppet 4 paths and
installing the puppet package from the puppetlabs repositories:
@@ -28,94 +38,69 @@ The keys are ``package_name``, ``conf_file``, ``ssl_dir`` and
``csr_attributes_path``. If unset, their values will default to
ones that work with puppet 3.x and with distributions that ship modified
puppet 4.x that uses the old paths.
-
-Agent packages from the puppetlabs repositories can be installed by setting
-``install_type`` to ``aio``. Based on this setting, the default config/SSL/CSR
-paths will be adjusted accordingly. To maintain backwards compatibility this
-setting defaults to ``packages`` which will install puppet from the distro
-packages.
-
-If installing ``aio`` packages, ``collection`` can also be set to one of
-``puppet`` (rolling release), ``puppet6``, ``puppet7`` (or their nightly
-counterparts) in order to install specific release streams. By default, the
-puppetlabs repository will be purged after installation finishes; set
-``cleanup`` to ``false`` to prevent this. AIO packages are installed through a
-shell script which is downloaded on the machine and then executed; the path to
-this script can be overridden using the ``aio_install_url`` key.
-
-Puppet configuration can be specified under the ``conf`` key. The
-configuration is specified as a dictionary containing high-level ``<section>``
-keys and lists of ``<key>=<value>`` pairs within each section. Each section
-name and ``<key>=<value>`` pair is written directly to ``puppet.conf``. As
-such, section names should be one of: ``main``, ``server``, ``agent`` or
-``user`` and keys should be valid puppet configuration options. The
-``certname`` key supports string substitutions for ``%i`` and ``%f``,
-corresponding to the instance id and fqdn of the machine respectively.
-If ``ca_cert`` is present, it will not be written to ``puppet.conf``, but
-instead will be used as the puppetserver certificate. It should be specified
-in pem format as a multi-line string (using the ``|`` yaml notation).
-
-Additionally it's possible to create a ``csr_attributes.yaml`` file for CSR
-attributes and certificate extension requests.
-See https://puppet.com/docs/puppet/latest/config_file_csr_attributes.html
-
-By default, the puppet service will be automatically enabled after installation
-and set to automatically start on boot. To override this in favor of manual
-puppet execution set ``start_service`` to ``false``.
-
-A single manual run can be triggered by setting ``exec`` to ``true``, and
-additional arguments can be passed to ``puppet agent`` via the ``exec_args``
-key (by default the agent will execute with the ``--test`` flag).
-
-**Internal name:** ``cc_puppet``
-
-**Module frequency:** per instance
-
-**Supported distros:** all
-
-**Config keys**::
-
- puppet:
- install: <true/false>
- version: <version>
- collection: <aio collection>
- install_type: <packages/aio>
- aio_install_url: 'https://git.io/JBhoQ'
- cleanup: <true/false>
- conf_file: '/etc/puppet/puppet.conf'
- ssl_dir: '/var/lib/puppet/ssl'
- csr_attributes_path: '/etc/puppet/csr_attributes.yaml'
- package_name: 'puppet'
- exec: <true/false>
- exec_args: ['--test']
- start_service: <true/false>
- conf:
- agent:
- server: "puppetserver.example.org"
- certname: "%i.%f"
- ca_cert: |
- -------BEGIN CERTIFICATE-------
- <cert data>
- -------END CERTIFICATE-------
- csr_attributes:
- custom_attributes:
- 1.2.840.113549.1.9.7: 342thbjkt82094y0uthhor289jnqthpc2290
- extension_requests:
- pp_uuid: ED803750-E3C7-44F5-BB08-41A04433FE2E
- pp_image_name: my_ami_image
- pp_preshared_key: 342thbjkt82094y0uthhor289jnqthpc2290
"""
-import os
-import socket
-from io import StringIO
-
-import yaml
-
-from cloudinit import helpers, subp, temp_utils, url_helper, util
-
-AIO_INSTALL_URL = "https://raw.githubusercontent.com/puppetlabs/install-puppet/main/install.sh" # noqa: E501
-PUPPET_AGENT_DEFAULT_ARGS = ["--test"]
+meta: MetaSchema = {
+ "id": "cc_puppet",
+ "name": "Puppet",
+ "title": "Install, configure and start puppet",
+ "description": MODULE_DESCRIPTION,
+ "distros": [ALL_DISTROS],
+ "frequency": PER_INSTANCE,
+ "examples": [
+ dedent(
+ """\
+ puppet:
+ install: true
+ version: "7.7.0"
+ install_type: "aio"
+ collection: "puppet7"
+ aio_install_url: 'https://git.io/JBhoQ'
+ cleanup: true
+ conf_file: "/etc/puppet/puppet.conf"
+ ssl_dir: "/var/lib/puppet/ssl"
+ csr_attributes_path: "/etc/puppet/csr_attributes.yaml"
+ exec: true
+ exec_args: ['--test']
+ conf:
+ agent:
+ server: "puppetserver.example.org"
+ certname: "%i.%f"
+ ca_cert: |
+ -----BEGIN CERTIFICATE-----
+ MIICCTCCAXKgAwIBAgIBATANBgkqhkiG9w0BAQUFADANMQswCQYDVQQDDAJjYTAe
+ Fw0xMDAyMTUxNzI5MjFaFw0xNTAyMTQxNzI5MjFaMA0xCzAJBgNVBAMMAmNhMIGf
+ MA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCu7Q40sm47/E1Pf+r8AYb/V/FWGPgc
+ b014OmNoX7dgCxTDvps/h8Vw555PdAFsW5+QhsGr31IJNI3kSYprFQcYf7A8tNWu
+ 1MASW2CfaEiOEi9F1R3R4Qlz4ix+iNoHiUDTjazw/tZwEdxaQXQVLwgTGRwVa+aA
+ qbutJKi93MILLwIDAQABo3kwdzA4BglghkgBhvhCAQ0EKxYpUHVwcGV0IFJ1Ynkv
+ T3BlblNTTCBHZW5lcmF0ZWQgQ2VydGlmaWNhdGUwDwYDVR0TAQH/BAUwAwEB/zAd
+ BgNVHQ4EFgQUu4+jHB+GYE5Vxo+ol1OAhevspjAwCwYDVR0PBAQDAgEGMA0GCSqG
+ SIb3DQEBBQUAA4GBAH/rxlUIjwNb3n7TXJcDJ6MMHUlwjr03BDJXKb34Ulndkpaf
+ +GAlzPXWa7bO908M9I8RnPfvtKnteLbvgTK+h+zX1XCty+S2EQWk29i2AdoqOTxb
+ hppiGMp0tT5Havu4aceCXiy2crVcudj3NFciy8X66SoECemW9UYDCb9T5D0d
+ -----END CERTIFICATE-----
+ csr_attributes:
+ custom_attributes:
+ 1.2.840.113549.1.9.7: 342thbjkt82094y0uthhor289jnqthpc2290
+ extension_requests:
+ pp_uuid: ED803750-E3C7-44F5-BB08-41A04433FE2E
+ pp_image_name: my_ami_image
+ pp_preshared_key: 342thbjkt82094y0uthhor289jnqthpc2290
+ """ # noqa: E501
+ ),
+ dedent(
+ """\
+ puppet:
+ install_type: "packages"
+ package_name: "puppet"
+ exec: false
+ """
+ ),
+ ],
+}
+
+__doc__ = get_meta_doc(meta)
class PuppetConstants(object):
@@ -142,10 +127,8 @@ def _autostart_puppet(log):
],
capture=False,
)
- elif os.path.exists("/bin/systemctl"):
- subp.subp(
- ["/bin/systemctl", "enable", "puppet.service"], capture=False
- )
+ elif subp.which("systemctl"):
+ subp.subp(["systemctl", "enable", "puppet.service"], capture=False)
elif os.path.exists("/sbin/chkconfig"):
subp.subp(["/sbin/chkconfig", "puppet", "on"], capture=False)
else:
diff --git a/cloudinit/config/cc_refresh_rmc_and_interface.py b/cloudinit/config/cc_refresh_rmc_and_interface.py
index 87be5348..3ed5612b 100644
--- a/cloudinit/config/cc_refresh_rmc_and_interface.py
+++ b/cloudinit/config/cc_refresh_rmc_and_interface.py
@@ -4,11 +4,18 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-"""
-Refresh IPv6 interface and RMC
-------------------------------
-**Summary:** Ensure Network Manager is not managing IPv6 interface
+"""Refresh IPv6 interface and RMC:
+Ensure Network Manager is not managing IPv6 interface"""
+
+import errno
+from cloudinit import log as logging
+from cloudinit import netinfo, subp, util
+from cloudinit.config.schema import MetaSchema
+from cloudinit.distros import ALL_DISTROS
+from cloudinit.settings import PER_ALWAYS
+
+MODULE_DESCRIPTION = """\
This module is IBM PowerVM Hypervisor specific
Reliable Scalable Cluster Technology (RSCT) is a set of software components
@@ -25,22 +32,20 @@ This module handles
- Refreshing RMC
- Disabling NetworkManager from handling IPv6 interface, as IPv6 interface
is used for communication between RMC daemon and PowerVM hypervisor.
-
-**Internal name:** ``cc_refresh_rmc_and_interface``
-
-**Module frequency:** always
-
-**Supported distros:** RHEL
-
"""
-import errno
-
-from cloudinit import log as logging
-from cloudinit import netinfo, subp, util
-from cloudinit.settings import PER_ALWAYS
-
-frequency = PER_ALWAYS
+meta: MetaSchema = {
+ "id": "cc_refresh_rmc_and_interface",
+ "name": "Refresh IPv6 Interface and RMC",
+ "title": "Ensure Network Manager is not managing IPv6 interface",
+ "description": MODULE_DESCRIPTION,
+ "distros": [ALL_DISTROS],
+ "frequency": PER_ALWAYS,
+ "examples": [],
+}
+
+# This module is undocumented in our schema docs
+__doc__ = ""
LOG = logging.getLogger(__name__)
# Ensure that /opt/rsct/bin has been added to standard PATH of the
diff --git a/cloudinit/config/cc_reset_rmc.py b/cloudinit/config/cc_reset_rmc.py
index 3b929903..57f024ef 100644
--- a/cloudinit/config/cc_reset_rmc.py
+++ b/cloudinit/config/cc_reset_rmc.py
@@ -3,13 +3,18 @@
# Author: Aman Kumar Sinha <amansi26@in.ibm.com>
#
# This file is part of cloud-init. See LICENSE file for license information.
+"""Reset RMC: Reset rsct node id"""
-"""
-Reset RMC
-------------
-**Summary:** reset rsct node id
+import os
+
+from cloudinit import log as logging
+from cloudinit import subp, util
+from cloudinit.config.schema import MetaSchema
+from cloudinit.distros import ALL_DISTROS
+from cloudinit.settings import PER_INSTANCE
+MODULE_DESCRIPTION = """\
Reset RMC module is IBM PowerVM Hypervisor specific
Reliable Scalable Cluster Technology (RSCT) is a set of software components,
@@ -28,21 +33,20 @@ This module handles
In order to do so, it restarts RSCT service.
Prerequisite of using this module is to install RSCT packages.
-
-**Internal name:** ``cc_reset_rmc``
-
-**Module frequency:** per instance
-
-**Supported distros:** rhel, sles and ubuntu
-
"""
-import os
-
-from cloudinit import log as logging
-from cloudinit import subp, util
-from cloudinit.settings import PER_INSTANCE
-frequency = PER_INSTANCE
+meta: MetaSchema = {
+ "id": "cc_reset_rmc",
+ "name": "Reset RMC",
+ "title": "reset rsct node id",
+ "description": MODULE_DESCRIPTION,
+ "distros": [ALL_DISTROS],
+ "frequency": PER_INSTANCE,
+ "examples": [],
+}
+
+# This module is undocumented in our schema docs
+__doc__ = ""
# RMCCTRL is expected to be in system PATH (/opt/rsct/bin)
# The symlink for RMCCTRL and RECFGCT are
diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py
index 19b923a8..39da1b5a 100644
--- a/cloudinit/config/cc_resizefs.py
+++ b/cloudinit/config/cc_resizefs.py
@@ -14,18 +14,12 @@ import stat
from textwrap import dedent
from cloudinit import subp, util
-from cloudinit.config.schema import (
- MetaSchema,
- get_meta_doc,
- validate_cloudconfig_schema,
-)
+from cloudinit.config.schema import MetaSchema, get_meta_doc
+from cloudinit.distros import ALL_DISTROS
from cloudinit.settings import PER_ALWAYS
NOBLOCK = "noblock"
-frequency = PER_ALWAYS
-distros = ["all"]
-
meta: MetaSchema = {
"id": "cc_resizefs",
"name": "Resizefs",
@@ -39,30 +33,18 @@ meta: MetaSchema = {
partition and will block the boot process while the resize command is
running. Optionally, the resize operation can be performed in the
background while cloud-init continues running modules. This can be
- enabled by setting ``resize_rootfs`` to ``true``. This module can be
+ enabled by setting ``resize_rootfs`` to ``noblock``. This module can be
disabled altogether by setting ``resize_rootfs`` to ``false``."""
),
- "distros": distros,
+ "distros": [ALL_DISTROS],
"examples": [
- "resize_rootfs: false # disable root filesystem resize operation"
+ "resize_rootfs: false # disable root filesystem resize operation",
+ "resize_rootfs: noblock # runs resize operation in the background",
],
"frequency": PER_ALWAYS,
}
-schema = {
- "type": "object",
- "properties": {
- "resize_rootfs": {
- "enum": [True, False, NOBLOCK],
- "description": dedent(
- """\
- Whether to resize the root partition. Default: 'true'"""
- ),
- }
- },
-}
-
-__doc__ = get_meta_doc(meta, schema) # Supplement python help()
+__doc__ = get_meta_doc(meta)
def _resize_btrfs(mount_point, devpth):
@@ -229,7 +211,6 @@ def handle(name, cfg, _cloud, log, args):
resize_root = args[0]
else:
resize_root = util.get_cfg_option_str(cfg, "resize_rootfs", True)
- validate_cloudconfig_schema(cfg, schema)
if not util.translate_bool(resize_root, addons=[NOBLOCK]):
log.debug("Skipping module named %s, resizing disabled", name)
return
diff --git a/cloudinit/config/cc_resolv_conf.py b/cloudinit/config/cc_resolv_conf.py
index b2970d51..bbf68079 100644
--- a/cloudinit/config/cc_resolv_conf.py
+++ b/cloudinit/config/cc_resolv_conf.py
@@ -6,18 +6,38 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-"""
-Resolv Conf
------------
-**Summary:** configure resolv.conf
+"""Resolv Conf: configure resolv.conf"""
+
+from textwrap import dedent
+from cloudinit import log as logging
+from cloudinit import templater, util
+from cloudinit.config.schema import MetaSchema, get_meta_doc
+from cloudinit.settings import PER_INSTANCE
+
+LOG = logging.getLogger(__name__)
+
+RESOLVE_CONFIG_TEMPLATE_MAP = {
+ "/etc/resolv.conf": "resolv.conf",
+ "/etc/systemd/resolved.conf": "systemd.resolved.conf",
+}
+
+MODULE_DESCRIPTION = """\
This module is intended to manage resolv.conf in environments where early
configuration of resolv.conf is necessary for further bootstrapping and/or
-where configuration management such as puppet or chef own dns configuration.
+where configuration management such as puppet or chef own DNS configuration.
As Debian/Ubuntu will, by default, utilize resolvconf, and similarly Red Hat
will use sysconfig, this module is likely to be of little use unless those
are configured correctly.
+When using a :ref:`datasource_config_drive` and a RHEL-like system,
+resolv.conf will also be managed automatically due to the available
+information provided for DNS servers in the :ref:`network_config_v2` format.
+For those that with to have different settings, use this module.
+
+In order for the ``resolv_conf`` section to be applied, ``manage_resolv_conf``
+must be set ``true``.
+
.. note::
For Red Hat with sysconfig, be sure to set PEERDNS=no for all DHCP
enabled NICs.
@@ -25,42 +45,40 @@ are configured correctly.
.. note::
And, in Ubuntu/Debian it is recommended that DNS be configured via the
standard /etc/network/interfaces configuration file.
-
-**Internal name:** ``cc_resolv_conf``
-
-**Module frequency:** per instance
-
-**Supported distros:** alpine, fedora, photon, rhel, sles
-
-**Config keys**::
-
- manage_resolv_conf: <true/false>
- resolv_conf:
- nameservers: ['8.8.4.4', '8.8.8.8']
- searchdomains:
- - foo.example.com
- - bar.example.com
- domain: example.com
- options:
- rotate: <true/false>
- timeout: 1
"""
-from cloudinit import log as logging
-from cloudinit import templater, util
-from cloudinit.settings import PER_INSTANCE
-
-LOG = logging.getLogger(__name__)
-
-frequency = PER_INSTANCE
-
-distros = ["alpine", "fedora", "opensuse", "photon", "rhel", "sles"]
-
-RESOLVE_CONFIG_TEMPLATE_MAP = {
- "/etc/resolv.conf": "resolv.conf",
- "/etc/systemd/resolved.conf": "systemd.resolved.conf",
+meta: MetaSchema = {
+ "id": "cc_resolv_conf",
+ "name": "Resolv Conf",
+ "title": "Configure resolv.conf",
+ "description": MODULE_DESCRIPTION,
+ "distros": ["alpine", "fedora", "opensuse", "photon", "rhel", "sles"],
+ "frequency": PER_INSTANCE,
+ "examples": [
+ dedent(
+ """\
+ manage_resolv_conf: true
+ resolv_conf:
+ nameservers:
+ - 8.8.8.8
+ - 8.8.4.4
+ searchdomains:
+ - foo.example.com
+ - bar.example.com
+ domain: example.com
+ sortlist:
+ - 10.0.0.1/255
+ - 10.0.0.2
+ options:
+ rotate: true
+ timeout: 1
+ """
+ )
+ ],
}
+__doc__ = get_meta_doc(meta)
+
def generate_resolv_conf(template_fn, params, target_fname):
flags = []
diff --git a/cloudinit/config/cc_rh_subscription.py b/cloudinit/config/cc_rh_subscription.py
index b81a7a9b..b742cb95 100644
--- a/cloudinit/config/cc_rh_subscription.py
+++ b/cloudinit/config/cc_rh_subscription.py
@@ -3,47 +3,77 @@
# Author: Brent Baude <bbaude@redhat.com>
#
# This file is part of cloud-init. See LICENSE file for license information.
+"""Red Hat Subscription: Register Red Hat Enterprise Linux based system"""
-"""
-Red Hat Subscription
---------------------
-**Summary:** register red hat enterprise linux based system
-
-Register a Red Hat system either by username and password *or* activation and
-org. Following a sucessful registration, you can auto-attach subscriptions, set
-the service level, add subscriptions based on pool id, enable/disable yum
-repositories based on repo id, and alter the rhsm_baseurl and server-hostname
-in ``/etc/rhsm/rhs.conf``. For more details, see the ``Register Red Hat
-Subscription`` example config.
-
-**Internal name:** ``cc_rh_subscription``
-
-**Module frequency:** per instance
-
-**Supported distros:** rhel, fedora
-
-**Config keys**::
-
- rh_subscription:
- username: <username>
- password: <password>
- activation-key: <activation key>
- org: <org number>
- auto-attach: <true/false>
- service-level: <service level>
- add-pool: <list of pool ids>
- enable-repo: <list of yum repo ids>
- disable-repo: <list of yum repo ids>
- rhsm-baseurl: <url>
- server-hostname: <hostname>
-"""
+from textwrap import dedent
from cloudinit import log as logging
from cloudinit import subp, util
+from cloudinit.config.schema import MetaSchema, get_meta_doc
+from cloudinit.settings import PER_INSTANCE
LOG = logging.getLogger(__name__)
-distros = ["fedora", "rhel"]
+MODULE_DESCRIPTION = """\
+Register a Red Hat system either by username and password *or* activation and
+org. Following a successful registration, you can:
+
+ - auto-attach subscriptions
+ - set the service level
+ - add subscriptions based on pool id
+ - enable/disable yum repositories based on repo id
+ - alter the rhsm_baseurl and server-hostname in ``/etc/rhsm/rhs.conf``.
+"""
+
+meta: MetaSchema = {
+ "id": "cc_rh_subscription",
+ "name": "Red Hat Subscription",
+ "title": "Register Red Hat Enterprise Linux based system",
+ "description": MODULE_DESCRIPTION,
+ "distros": ["fedora", "rhel"],
+ "frequency": PER_INSTANCE,
+ "examples": [
+ dedent(
+ """\
+ rh_subscription:
+ username: joe@foo.bar
+ ## Quote your password if it has symbols to be safe
+ password: '1234abcd'
+ """
+ ),
+ dedent(
+ """\
+ rh_subscription:
+ activation-key: foobar
+ org: 12345
+ """
+ ),
+ dedent(
+ """\
+ rh_subscription:
+ activation-key: foobar
+ org: 12345
+ auto-attach: true
+ service-level: self-support
+ add-pool:
+ - 1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a
+ - 2b2b2b2b2b2b2b2b2b2b2b2b2b2b2b2b
+ enable-repo:
+ - repo-id-to-enable
+ - other-repo-id-to-enable
+ disable-repo:
+ - repo-id-to-disable
+ - other-repo-id-to-disable
+ # Alter the baseurl in /etc/rhsm/rhsm.conf
+ rhsm-baseurl: http://url
+ # Alter the server hostname in /etc/rhsm/rhsm.conf
+ server-hostname: foo.bar.com
+ """
+ ),
+ ],
+}
+
+__doc__ = get_meta_doc(meta)
def handle(name, cfg, _cloud, log, _args):
diff --git a/cloudinit/config/cc_rightscale_userdata.py b/cloudinit/config/cc_rightscale_userdata.py
index 36a009a2..c1b0f8bd 100644
--- a/cloudinit/config/cc_rightscale_userdata.py
+++ b/cloudinit/config/cc_rightscale_userdata.py
@@ -6,13 +6,23 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-"""
-Rightscale Userdata
--------------------
-**Summary:** support rightscale configuration hooks
+import os
+from urllib.parse import parse_qs
+
+from cloudinit import url_helper as uhelp
+from cloudinit import util
+from cloudinit.config.schema import MetaSchema, get_meta_doc
+from cloudinit.distros import ALL_DISTROS
+from cloudinit.settings import PER_INSTANCE
+MY_NAME = "cc_rightscale_userdata"
+MY_HOOKNAME = "CLOUD_INIT_REMOTE_HOOK"
+
+"""Rightscale Userdata: Support rightscale configuration hooks"""
+
+MODULE_DESCRIPTION = """\
This module adds support for RightScale configuration hooks to cloud-init.
-RightScale adds a entry in the format ``CLOUD_INIT_REMOTE_HOOK=http://...`` to
+RightScale adds an entry in the format ``CLOUD_INIT_REMOTE_HOOK=http://...`` to
ec2 user-data. This module checks for this line in the raw userdata and
retrieves any scripts linked by the RightScale user data and places them in the
user scripts configuration directory, to be run later by ``cc_scripts_user``.
@@ -21,17 +31,23 @@ user scripts configuration directory, to be run later by ``cc_scripts_user``.
the ``CLOUD_INIT_REMOTE_HOOK`` config variable is present in the raw ec2
user data only, not in any cloud-config parts
-**Internal name:** ``cc_rightscale_userdata``
-
-**Module frequency:** per instance
-
-**Supported distros:** all
-
-**Config keys**::
+**Raw user data schema**::
CLOUD_INIT_REMOTE_HOOK=<url>
"""
+meta: MetaSchema = {
+ "id": "cc_rightscale_userdata",
+ "name": "RightScale Userdata",
+ "title": "Support rightscale configuration hooks",
+ "description": MODULE_DESCRIPTION,
+ "distros": [ALL_DISTROS],
+ "frequency": PER_INSTANCE,
+ "examples": [],
+}
+
+__doc__ = get_meta_doc(meta)
+
#
# The purpose of this script is to allow cloud-init to consume
# rightscale style userdata. rightscale user data is key-value pairs
@@ -49,18 +65,6 @@ user scripts configuration directory, to be run later by ``cc_scripts_user``.
#
#
-import os
-from urllib.parse import parse_qs
-
-from cloudinit import url_helper as uhelp
-from cloudinit import util
-from cloudinit.settings import PER_INSTANCE
-
-frequency = PER_INSTANCE
-
-MY_NAME = "cc_rightscale_userdata"
-MY_HOOKNAME = "CLOUD_INIT_REMOTE_HOOK"
-
def handle(name, _cfg, cloud, log, _args):
try:
diff --git a/cloudinit/config/cc_rsyslog.py b/cloudinit/config/cc_rsyslog.py
index db2a3c79..57b8aa62 100644
--- a/cloudinit/config/cc_rsyslog.py
+++ b/cloudinit/config/cc_rsyslog.py
@@ -6,183 +6,63 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-"""
-.. _cc_rsyslog:
+"""Rsyslog: Configure system logging via rsyslog"""
-Rsyslog
--------
-**Summary:** configure system logging via rsyslog
+import os
+import re
+from textwrap import dedent
-This module configures remote system logging using rsyslog.
+from cloudinit import log as logging
+from cloudinit import subp, util
+from cloudinit.config.schema import MetaSchema, get_meta_doc
+from cloudinit.distros import ALL_DISTROS
+from cloudinit.settings import PER_INSTANCE
-The rsyslog config file to write to can be specified in ``config_filename``,
-which defaults to ``20-cloud-config.conf``. The rsyslog config directory to
-write config files to may be specified in ``config_dir``, which defaults to
-``/etc/rsyslog.d``.
-
-A list of configurations for rsyslog can be specified under the ``configs`` key
-in the ``rsyslog`` config. Each entry in ``configs`` is either a string or a
-dictionary. Each config entry contains a configuration string and a file to
-write it to. For config entries that are a dictionary, ``filename`` sets the
-target filename and ``content`` specifies the config string to write. For
-config entries that are only a string, the string is used as the config string
-to write. If the filename to write the config to is not specified, the value of
-the ``config_filename`` key is used. A file with the selected filename will be
-written inside the directory specified by ``config_dir``.
-
-The command to use to reload the rsyslog service after the config has been
-updated can be specified in ``service_reload_command``. If this is set to
-``auto``, then an appropriate command for the distro will be used. This is the
-default behavior. To manually set the command, use a list of command args (e.g.
-``[systemctl, restart, rsyslog]``).
+MODULE_DESCRIPTION = """\
+This module configures remote system logging using rsyslog.
Configuration for remote servers can be specified in ``configs``, but for
-convenience it can be specified as key value pairs in ``remotes``. Each key
-is the name for an rsyslog remote entry. Each value holds the contents of the
-remote config for rsyslog. The config consists of the following parts:
-
- - filter for log messages (defaults to ``*.*``)
- - optional leading ``@`` or ``@@``, indicating udp and tcp respectively
- (defaults to ``@``, for udp)
- - ipv4 or ipv6 hostname or address. ipv6 addresses must be in ``[::1]``
- format, (e.g. ``@[fd00::1]:514``)
- - optional port number (defaults to ``514``)
-
-This module will provide sane defaults for any part of the remote entry that is
-not specified, so in most cases remote hosts can be specified just using
-``<name>: <address>``.
-
-For backwards compatibility, this module still supports legacy names for the
-config entries. Legacy to new mappings are as follows:
-
- - ``rsyslog`` -> ``rsyslog/configs``
- - ``rsyslog_filename`` -> ``rsyslog/config_filename``
- - ``rsyslog_dir`` -> ``rsyslog/config_dir``
-
-.. note::
- The legacy config format does not support specifying
- ``service_reload_command``.
-
-**Internal name:** ``cc_rsyslog``
-
-**Module frequency:** per instance
-
-**Supported distros:** all
-
-**Config keys**::
-
- rsyslog:
- config_dir: config_dir
- config_filename: config_filename
- configs:
- - "*.* @@192.158.1.1"
- - content: "*.* @@192.0.2.1:10514"
- filename: 01-example.conf
- - content: |
- *.* @@syslogd.example.com
- remotes:
- maas: "192.168.1.1"
- juju: "10.0.4.1"
- service_reload_command: [your, syslog, restart, command]
-
-**Legacy config keys**::
-
- rsyslog:
- - "*.* @@192.158.1.1"
- rsyslog_dir: /etc/rsyslog-config.d/
- rsyslog_filename: 99-local.conf
+convenience it can be specified as key value pairs in ``remotes``.
"""
-# Old rsyslog documentation, kept for reference:
-#
-# rsyslog module allows configuration of syslog logging via rsyslog
-# Configuration is done under the cloud-config top level 'rsyslog'.
-#
-# Under 'rsyslog' you can define:
-# - configs: [default=[]]
-# this is a list. entries in it are a string or a dictionary.
-# each entry has 2 parts:
-# * content
-# * filename
-# if the entry is a string, then it is assigned to 'content'.
-# for each entry, content is written to the provided filename.
-# if filename is not provided, its default is read from 'config_filename'
-#
-# Content here can be any valid rsyslog configuration. No format
-# specific format is enforced.
-#
-# For simply logging to an existing remote syslog server, via udp:
-# configs: ["*.* @192.168.1.1"]
-#
-# - remotes: [default={}]
-# This is a dictionary of name / value pairs.
-# In comparison to 'config's, it is more focused in that it only supports
-# remote syslog configuration. It is not rsyslog specific, and could
-# convert to other syslog implementations.
-#
-# Each entry in remotes is a 'name' and a 'value'.
-# * name: an string identifying the entry. good practice would indicate
-# using a consistent and identifiable string for the producer.
-# For example, the MAAS service could use 'maas' as the key.
-# * value consists of the following parts:
-# * optional filter for log messages
-# default if not present: *.*
-# * optional leading '@' or '@@' (indicates udp or tcp respectively).
-# default if not present (udp): @
-# This is rsyslog format for that. if not present, is '@'.
-# * ipv4 or ipv6 or hostname
-# ipv6 addresses must be in [::1] format. (@[fd00::1]:514)
-# * optional port
-# port defaults to 514
-#
-# - config_filename: [default=20-cloud-config.conf]
-# this is the file name to use if none is provided in a config entry.
-#
-# - config_dir: [default=/etc/rsyslog.d]
-# this directory is used for filenames that are not absolute paths.
-#
-# - service_reload_command: [default="auto"]
-# this command is executed if files have been written and thus the syslog
-# daemon needs to be told.
-#
-# Note, since cloud-init 0.5 a legacy version of rsyslog config has been
-# present and is still supported. See below for the mappings between old
-# value and new value:
-# old value -> new value
-# 'rsyslog' -> rsyslog/configs
-# 'rsyslog_filename' -> rsyslog/config_filename
-# 'rsyslog_dir' -> rsyslog/config_dir
-#
-# the legacy config does not support 'service_reload_command'.
-#
-# Example config:
-# #cloud-config
-# rsyslog:
-# configs:
-# - "*.* @@192.158.1.1"
-# - content: "*.* @@192.0.2.1:10514"
-# filename: 01-example.conf
-# - content: |
-# *.* @@syslogd.example.com
-# remotes:
-# maas: "192.168.1.1"
-# juju: "10.0.4.1"
-# config_dir: config_dir
-# config_filename: config_filename
-# service_reload_command: [your, syslog, restart, command]
-#
-# Example Legacy config:
-# #cloud-config
-# rsyslog:
-# - "*.* @@192.158.1.1"
-# rsyslog_dir: /etc/rsyslog-config.d/
-# rsyslog_filename: 99-local.conf
-
-import os
-import re
-
-from cloudinit import log as logging
-from cloudinit import subp, util
+meta: MetaSchema = {
+ "id": "cc_rsyslog",
+ "name": "Rsyslog",
+ "title": "Configure system logging via rsyslog",
+ "description": MODULE_DESCRIPTION,
+ "distros": [ALL_DISTROS],
+ "frequency": PER_INSTANCE,
+ "examples": [
+ dedent(
+ """\
+ rsyslog:
+ remotes:
+ maas: 192.168.1.1
+ juju: 10.0.4.1
+ service_reload_command: auto
+ """
+ ),
+ dedent(
+ """\
+ rsyslog:
+ config_dir: /opt/etc/rsyslog.d
+ config_filename: 99-late-cloud-config.conf
+ configs:
+ - "*.* @@192.158.1.1"
+ - content: "*.* @@192.0.2.1:10514"
+ filename: 01-example.conf
+ - content: |
+ *.* @@syslogd.example.com
+ remotes:
+ maas: 192.168.1.1
+ juju: 10.0.4.1
+ service_reload_command: [your, syslog, restart, command]
+ """
+ ),
+ ],
+}
+
+__doc__ = get_meta_doc(meta)
DEF_FILENAME = "20-cloud-config.conf"
DEF_DIR = "/etc/rsyslog.d"
@@ -214,12 +94,19 @@ def reload_syslog(distro, command=DEF_RELOAD):
return subp.subp(command, capture=True)
-def load_config(cfg):
- # return an updated config with entries of the correct type
- # support converting the old top level format into new format
+def load_config(cfg: dict) -> dict:
+ """Return an updated config.
+
+ Support converting the old top level format into new format.
+ Raise a `ValueError` if some top level entry has an incorrect type.
+ """
mycfg = cfg.get("rsyslog", {})
if isinstance(cfg.get("rsyslog"), list):
+ LOG.warning(
+ "DEPRECATION: This rsyslog list format is deprecated and will be "
+ "removed in a future version of cloud-init. Use documented keys."
+ )
mycfg = {KEYNAME_CONFIGS: cfg.get("rsyslog")}
if KEYNAME_LEGACY_FILENAME in cfg:
mycfg[KEYNAME_FILENAME] = cfg[KEYNAME_LEGACY_FILENAME]
@@ -235,8 +122,13 @@ def load_config(cfg):
)
for key, default, vtypes in fillup:
- if key not in mycfg or not isinstance(mycfg[key], vtypes):
+ if key not in mycfg:
mycfg[key] = default
+ elif not isinstance(mycfg[key], vtypes):
+ raise ValueError(
+ f"Invalid type for key `{key}`. Expected type(s): {vtypes}. "
+ f"Current type: {type(mycfg[key])}"
+ )
return mycfg
diff --git a/cloudinit/config/cc_runcmd.py b/cloudinit/config/cc_runcmd.py
index c5206003..7c614f57 100644
--- a/cloudinit/config/cc_runcmd.py
+++ b/cloudinit/config/cc_runcmd.py
@@ -12,11 +12,7 @@ import os
from textwrap import dedent
from cloudinit import util
-from cloudinit.config.schema import (
- MetaSchema,
- get_meta_doc,
- validate_cloudconfig_schema,
-)
+from cloudinit.config.schema import MetaSchema, get_meta_doc
from cloudinit.distros import ALL_DISTROS
from cloudinit.settings import PER_INSTANCE
@@ -26,36 +22,36 @@ from cloudinit.settings import PER_INSTANCE
# configuration options before actually attempting to deploy with said
# configuration.
-distros = [ALL_DISTROS]
-meta: MetaSchema = {
- "id": "cc_runcmd",
- "name": "Runcmd",
- "title": "Run arbitrary commands",
- "description": dedent(
- """\
- Run arbitrary commands at a rc.local like level with output to the
- console. Each item can be either a list or a string. If the item is a
- list, it will be properly quoted. Each item is written to
- ``/var/lib/cloud/instance/runcmd`` to be later interpreted using
- ``sh``.
+MODULE_DESCRIPTION = """\
+Run arbitrary commands at a rc.local like level with output to the
+console. Each item can be either a list or a string. If the item is a
+list, it will be properly quoted. Each item is written to
+``/var/lib/cloud/instance/runcmd`` to be later interpreted using
+``sh``.
- Note that the ``runcmd`` module only writes the script to be run
- later. The module that actually runs the script is ``scripts-user``
- in the :ref:`Final` boot stage.
+Note that the ``runcmd`` module only writes the script to be run
+later. The module that actually runs the script is ``scripts-user``
+in the :ref:`topics/boot:Final` boot stage.
- .. note::
+.. note::
- all commands must be proper yaml, so you have to quote any characters
- yaml would eat (':' can be problematic)
+ all commands must be proper yaml, so you have to quote any characters
+ yaml would eat (':' can be problematic)
- .. note::
+.. note::
- when writing files, do not use /tmp dir as it races with
- systemd-tmpfiles-clean LP: #1707222. Use /run/somedir instead.
- """
- ),
- "distros": distros,
+ when writing files, do not use /tmp dir as it races with
+ systemd-tmpfiles-clean LP: #1707222. Use /run/somedir instead.
+"""
+
+meta: MetaSchema = {
+ "id": "cc_runcmd",
+ "name": "Runcmd",
+ "title": "Run arbitrary commands",
+ "description": MODULE_DESCRIPTION,
+ "distros": [ALL_DISTROS],
+ "frequency": PER_INSTANCE,
"examples": [
dedent(
"""\
@@ -68,29 +64,9 @@ meta: MetaSchema = {
"""
)
],
- "frequency": PER_INSTANCE,
-}
-
-schema = {
- "type": "object",
- "properties": {
- "runcmd": {
- "type": "array",
- "items": {
- "oneOf": [
- {"type": "array", "items": {"type": "string"}},
- {"type": "string"},
- {"type": "null"},
- ]
- },
- "additionalItems": False, # Reject items of non-string non-list
- "additionalProperties": False,
- "minItems": 1,
- }
- },
}
-__doc__ = get_meta_doc(meta, schema) # Supplement python help()
+__doc__ = get_meta_doc(meta)
def handle(name, cfg, cloud, log, _args):
@@ -100,7 +76,6 @@ def handle(name, cfg, cloud, log, _args):
)
return
- validate_cloudconfig_schema(cfg, schema)
out_fn = os.path.join(cloud.get_ipath("scripts"), "runcmd")
cmd = cfg["runcmd"]
try:
diff --git a/cloudinit/config/cc_salt_minion.py b/cloudinit/config/cc_salt_minion.py
index 0eb46664..df9d4205 100644
--- a/cloudinit/config/cc_salt_minion.py
+++ b/cloudinit/config/cc_salt_minion.py
@@ -2,11 +2,17 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-"""
-Salt Minion
------------
-**Summary:** set up and run salt minion
+"""Salt Minion: Setup and run salt minion"""
+
+import os
+from textwrap import dedent
+
+from cloudinit import safeyaml, subp, util
+from cloudinit.config.schema import MetaSchema, get_meta_doc
+from cloudinit.distros import ALL_DISTROS, bsd_utils
+from cloudinit.settings import PER_INSTANCE
+MODULE_DESCRIPTION = """\
This module installs, configures and starts salt minion. If the ``salt_minion``
key is present in the config parts, then salt minion will be installed and
started. Configuration for salt minion can be specified in the ``conf`` key
@@ -16,37 +22,45 @@ specified with ``public_key`` and ``private_key`` respectively. Optionally if
you have a custom package name, service name or config directory you can
specify them with ``pkg_name``, ``service_name`` and ``config_dir``.
-**Internal name:** ``cc_salt_minion``
-
-**Module frequency:** per instance
-
-**Supported distros:** all
-
-**Config keys**::
-
- salt_minion:
- pkg_name: 'salt-minion'
- service_name: 'salt-minion'
- config_dir: '/etc/salt'
- conf:
- master: salt.example.com
- grains:
- role:
- - web
- public_key: |
- ------BEGIN PUBLIC KEY-------
- <key data>
- ------END PUBLIC KEY-------
- private_key: |
- ------BEGIN PRIVATE KEY------
- <key data>
- ------END PRIVATE KEY-------
+Salt keys can be manually generated by: ``salt-key --gen-keys=GEN_KEYS``,
+where ``GEN_KEYS`` is the name of the keypair, e.g. 'minion'. The keypair
+will be copied to ``/etc/salt/pki`` on the minion instance.
"""
-import os
+meta: MetaSchema = {
+ "id": "cc_salt_minion",
+ "name": "Salt Minion",
+ "title": "Setup and run salt minion",
+ "description": MODULE_DESCRIPTION,
+ "distros": [ALL_DISTROS],
+ "frequency": PER_INSTANCE,
+ "examples": [
+ dedent(
+ """\
+ salt_minion:
+ pkg_name: salt-minion
+ service_name: salt-minion
+ config_dir: /etc/salt
+ conf:
+ master: salt.example.com
+ grains:
+ role:
+ - web
+ public_key: |
+ ------BEGIN PUBLIC KEY-------
+ <key data>
+ ------END PUBLIC KEY-------
+ private_key: |
+ ------BEGIN PRIVATE KEY------
+ <key data>
+ ------END PRIVATE KEY-------
+ pki_dir: /etc/salt/pki/minion
+ """
+ )
+ ],
+}
-from cloudinit import safeyaml, subp, util
-from cloudinit.distros import bsd_utils
+__doc__ = get_meta_doc(meta)
# Note: see https://docs.saltstack.com/en/latest/topics/installation/
# Note: see https://docs.saltstack.com/en/latest/ref/configuration/
diff --git a/cloudinit/config/cc_scripts_per_boot.py b/cloudinit/config/cc_scripts_per_boot.py
index b7bfb7aa..aa311d59 100644
--- a/cloudinit/config/cc_scripts_per_boot.py
+++ b/cloudinit/config/cc_scripts_per_boot.py
@@ -5,29 +5,34 @@
# Author: Juerg Haefliger <juerg.haefliger@hp.com>
#
# This file is part of cloud-init. See LICENSE file for license information.
+"""Scripts Per Boot: Run per boot scripts"""
-"""
-Scripts Per Boot
-----------------
-**Summary:** run per boot scripts
+import os
+from cloudinit import subp
+from cloudinit.config.schema import MetaSchema, get_meta_doc
+from cloudinit.distros import ALL_DISTROS
+from cloudinit.settings import PER_ALWAYS
+
+frequency = PER_ALWAYS
+MODULE_DESCRIPTION = """\
Any scripts in the ``scripts/per-boot`` directory on the datasource will be run
every time the system boots. Scripts will be run in alphabetical order. This
module does not accept any config keys.
-
-**Internal name:** ``cc_scripts_per_boot``
-
-**Module frequency:** always
-
-**Supported distros:** all
"""
-import os
-from cloudinit import subp
-from cloudinit.settings import PER_ALWAYS
+meta: MetaSchema = {
+ "id": "cc_scripts_per_boot",
+ "name": "Scripts Per Boot",
+ "title": "Run per boot scripts",
+ "description": MODULE_DESCRIPTION,
+ "distros": [ALL_DISTROS],
+ "frequency": frequency,
+ "examples": [],
+}
-frequency = PER_ALWAYS
+__doc__ = get_meta_doc(meta)
SCRIPT_SUBDIR = "per-boot"
diff --git a/cloudinit/config/cc_scripts_per_instance.py b/cloudinit/config/cc_scripts_per_instance.py
index ef102b1c..1fb40717 100644
--- a/cloudinit/config/cc_scripts_per_instance.py
+++ b/cloudinit/config/cc_scripts_per_instance.py
@@ -5,32 +5,36 @@
# Author: Juerg Haefliger <juerg.haefliger@hp.com>
#
# This file is part of cloud-init. See LICENSE file for license information.
+"""Scripts Per Instance: Run per instance scripts"""
-"""
-Scripts Per Instance
---------------------
-**Summary:** run per instance scripts
+import os
+
+from cloudinit import subp
+from cloudinit.config.schema import MetaSchema, get_meta_doc
+from cloudinit.distros import ALL_DISTROS
+from cloudinit.settings import PER_INSTANCE
+MODULE_DESCRIPTION = """\
Any scripts in the ``scripts/per-instance`` directory on the datasource will
be run when a new instance is first booted. Scripts will be run in alphabetical
order. This module does not accept any config keys.
Some cloud platforms change instance-id if a significant change was made to
the system. As a result per-instance scripts will run again.
-
-**Internal name:** ``cc_scripts_per_instance``
-
-**Module frequency:** per instance
-
-**Supported distros:** all
"""
-import os
+meta: MetaSchema = {
+ "id": "cc_scripts_per_instance",
+ "name": "Scripts Per Instance",
+ "title": "Run per instance scripts",
+ "description": MODULE_DESCRIPTION,
+ "distros": [ALL_DISTROS],
+ "frequency": PER_INSTANCE,
+ "examples": [],
+}
-from cloudinit import subp
-from cloudinit.settings import PER_INSTANCE
+__doc__ = get_meta_doc(meta)
-frequency = PER_INSTANCE
SCRIPT_SUBDIR = "per-instance"
diff --git a/cloudinit/config/cc_scripts_per_once.py b/cloudinit/config/cc_scripts_per_once.py
index bf4231e7..d9f406b7 100644
--- a/cloudinit/config/cc_scripts_per_once.py
+++ b/cloudinit/config/cc_scripts_per_once.py
@@ -5,30 +5,34 @@
# Author: Juerg Haefliger <juerg.haefliger@hp.com>
#
# This file is part of cloud-init. See LICENSE file for license information.
+"""Scripts Per Once: Run one time scripts"""
-"""
-Scripts Per Once
-----------------
-**Summary:** run one time scripts
+import os
+
+from cloudinit import subp
+from cloudinit.config.schema import MetaSchema, get_meta_doc
+from cloudinit.distros import ALL_DISTROS
+from cloudinit.settings import PER_ONCE
+frequency = PER_ONCE
+MODULE_DESCRIPTION = """\
Any scripts in the ``scripts/per-once`` directory on the datasource will be run
only once. Changes to the instance will not force a re-run. The only way to
re-run these scripts is to run the clean subcommand and reboot. Scripts will
be run in alphabetical order. This module does not accept any config keys.
-
-**Internal name:** ``cc_scripts_per_once``
-
-**Module frequency:** per once
-
-**Supported distros:** all
"""
-import os
-
-from cloudinit import subp
-from cloudinit.settings import PER_ONCE
-
-frequency = PER_ONCE
+meta: MetaSchema = {
+ "id": "cc_scripts_per_once",
+ "name": "Scripts Per Once",
+ "title": "Run one time scripts",
+ "description": MODULE_DESCRIPTION,
+ "distros": [ALL_DISTROS],
+ "frequency": frequency,
+ "examples": [],
+}
+
+__doc__ = get_meta_doc(meta)
SCRIPT_SUBDIR = "per-once"
diff --git a/cloudinit/config/cc_scripts_user.py b/cloudinit/config/cc_scripts_user.py
index e0d6c560..85375dac 100644
--- a/cloudinit/config/cc_scripts_user.py
+++ b/cloudinit/config/cc_scripts_user.py
@@ -5,32 +5,36 @@
# Author: Juerg Haefliger <juerg.haefliger@hp.com>
#
# This file is part of cloud-init. See LICENSE file for license information.
+"""Scripts User: Run user scripts"""
-"""
-Scripts User
-------------
-**Summary:** run user scripts
+import os
+
+from cloudinit import subp
+from cloudinit.config.schema import MetaSchema, get_meta_doc
+from cloudinit.distros import ALL_DISTROS
+from cloudinit.settings import PER_INSTANCE
+MODULE_DESCRIPTION = """\
This module runs all user scripts. User scripts are not specified in the
``scripts`` directory in the datasource, but rather are present in the
``scripts`` dir in the instance configuration. Any cloud-config parts with a
``#!`` will be treated as a script and run. Scripts specified as cloud-config
parts will be run in the order they are specified in the configuration.
This module does not accept any config keys.
-
-**Internal name:** ``cc_scripts_user``
-
-**Module frequency:** per instance
-
-**Supported distros:** all
"""
-import os
+meta: MetaSchema = {
+ "id": "cc_scripts_user",
+ "name": "Scripts User",
+ "title": "Run user scripts",
+ "description": MODULE_DESCRIPTION,
+ "distros": [ALL_DISTROS],
+ "frequency": PER_INSTANCE,
+ "examples": [],
+}
-from cloudinit import subp
-from cloudinit.settings import PER_INSTANCE
+__doc__ = get_meta_doc(meta)
-frequency = PER_INSTANCE
SCRIPT_SUBDIR = "scripts"
diff --git a/cloudinit/config/cc_scripts_vendor.py b/cloudinit/config/cc_scripts_vendor.py
index 1b30fa1b..894404f8 100644
--- a/cloudinit/config/cc_scripts_vendor.py
+++ b/cloudinit/config/cc_scripts_vendor.py
@@ -3,35 +3,59 @@
# Author: Ben Howard <ben.howard@canonical.com>
#
# This file is part of cloud-init. See LICENSE file for license information.
-
-"""
-Scripts Vendor
---------------
-**Summary:** run vendor scripts
-
-Any scripts in the ``scripts/vendor`` directory in the datasource will be run
-when a new instance is first booted. Scripts will be run in alphabetical order.
-Vendor scripts can be run with an optional prefix specified in the ``prefix``
-entry under the ``vendor_data`` config key.
-
-**Internal name:** ``cc_scripts_vendor``
-
-**Module frequency:** per instance
-
-**Supported distros:** all
-
-**Config keys**::
-
- vendor_data:
- prefix: <vendor data prefix>
-"""
+"""Scripts Vendor: Run vendor scripts"""
import os
+from textwrap import dedent
from cloudinit import subp, util
+from cloudinit.config.schema import MetaSchema, get_meta_doc
+from cloudinit.distros import ALL_DISTROS
from cloudinit.settings import PER_INSTANCE
-frequency = PER_INSTANCE
+MODULE_DESCRIPTION = """\
+On select Datasources, vendors have a channel for the consumption
+of all supported user data types via a special channel called
+vendor data. Any scripts in the ``scripts/vendor`` directory in the datasource
+will be run when a new instance is first booted. Scripts will be run in
+alphabetical order. This module allows control over the execution of
+vendor data.
+"""
+
+meta: MetaSchema = {
+ "id": "cc_scripts_vendor",
+ "name": "Scripts Vendor",
+ "title": "Run vendor scripts",
+ "description": MODULE_DESCRIPTION,
+ "distros": [ALL_DISTROS],
+ "frequency": PER_INSTANCE,
+ "examples": [
+ dedent(
+ """\
+ vendor_data:
+ enabled: true
+ prefix: /usr/bin/ltrace
+ """
+ ),
+ dedent(
+ """\
+ vendor_data:
+ enabled: true
+ prefix: [timeout, 30]
+ """
+ ),
+ dedent(
+ """\
+ # Vendor data will not be processed
+ vendor_data:
+ enabled: false
+ """
+ ),
+ ],
+}
+
+__doc__ = get_meta_doc(meta)
+
SCRIPT_SUBDIR = "vendor"
diff --git a/cloudinit/config/cc_seed_random.py b/cloudinit/config/cc_seed_random.py
index 67ba8ef5..b0ffdd15 100644
--- a/cloudinit/config/cc_seed_random.py
+++ b/cloudinit/config/cc_seed_random.py
@@ -6,73 +6,72 @@
# Author: Scott Moser <scott.moser@canonical.com>
#
# This file is part of cloud-init. See LICENSE file for license information.
+"""Seed Random: Provide random seed data"""
-"""
-Seed Random
------------
-**Summary:** provide random seed data
+import base64
+import os
+from io import BytesIO
+from textwrap import dedent
+
+from cloudinit import log as logging
+from cloudinit import subp, util
+from cloudinit.config.schema import MetaSchema, get_meta_doc
+from cloudinit.distros import ALL_DISTROS
+from cloudinit.settings import PER_INSTANCE
+
+LOG = logging.getLogger(__name__)
-Since all cloud instances started from the same image will produce very similar
-data when they are first booted, as they are all starting with the same seed
+MODULE_DESCRIPTION = """\
+All cloud instances started from the same image will produce very similar
+data when they are first booted as they are all starting with the same seed
for the kernel's entropy keyring. To avoid this, random seed data can be
provided to the instance either as a string or by specifying a command to run
to generate the data.
-Configuration for this module is under the ``random_seed`` config key. The
-``file`` key specifies the path to write the data to, defaulting to
-``/dev/urandom``. Data can be passed in directly with ``data``, and may
-optionally be specified in encoded form, with the encoding specified in
-``encoding``.
-
-If the cloud provides its own random seed data, it will be appended to ``data``
+Configuration for this module is under the ``random_seed`` config key. If
+the cloud provides its own random seed data, it will be appended to ``data``
before it is written to ``file``.
-.. note::
- when using a multiline value for ``data`` or specifying binary data, be
- sure to follow yaml syntax and use the ``|`` and ``!binary`` yaml format
- specifiers when appropriate
-
If the ``command`` key is specified, the given command will be executed. This
will happen after ``file`` has been populated. That command's environment will
contain the value of the ``file`` key as ``RANDOM_SEED_FILE``. If a command is
specified that cannot be run, no error will be reported unless
``command_required`` is set to true.
-
-For example, to use ``pollinate`` to gather data from a
-remote entropy server and write it to ``/dev/urandom``, the following could be
-used::
-
- random_seed:
- file: /dev/urandom
- command: ["pollinate", "--server=http://local.polinate.server"]
- command_required: true
-
-**Internal name:** ``cc_seed_random``
-
-**Module frequency:** per instance
-
-**Supported distros:** all
-
-**Config keys**::
-
- random_seed:
- file: <file>
- data: <random string>
- encoding: <raw/base64/b64/gzip/gz>
- command: [<cmd name>, <arg1>, <arg2>...]
- command_required: <true/false>
"""
-import base64
-import os
-from io import BytesIO
-
-from cloudinit import log as logging
-from cloudinit import subp, util
-from cloudinit.settings import PER_INSTANCE
-
-frequency = PER_INSTANCE
-LOG = logging.getLogger(__name__)
+meta: MetaSchema = {
+ "id": "cc_seed_random",
+ "name": "Seed Random",
+ "title": "Provide random seed data",
+ "description": MODULE_DESCRIPTION,
+ "distros": [ALL_DISTROS],
+ "frequency": PER_INSTANCE,
+ "examples": [
+ dedent(
+ """\
+ random_seed:
+ file: /dev/urandom
+ data: my random string
+ encoding: raw
+ command: ['sh', '-c', 'dd if=/dev/urandom of=$RANDOM_SEED_FILE']
+ command_required: true
+ """
+ ),
+ dedent(
+ """\
+ # To use 'pollinate' to gather data from a remote entropy
+ # server and write it to '/dev/urandom', the following
+ # could be used:
+ random_seed:
+ file: /dev/urandom
+ command: ["pollinate", "--server=http://local.polinate.server"]
+ command_required: true
+ """
+ ),
+ ],
+}
+
+__doc__ = get_meta_doc(meta)
def _decode(data, encoding=None):
diff --git a/cloudinit/config/cc_set_hostname.py b/cloudinit/config/cc_set_hostname.py
index eb0ca328..a5b989d0 100644
--- a/cloudinit/config/cc_set_hostname.py
+++ b/cloudinit/config/cc_set_hostname.py
@@ -5,24 +5,36 @@
# Author: Juerg Haefliger <juerg.haefliger@hp.com>
#
# This file is part of cloud-init. See LICENSE file for license information.
+"""Set Hostname: Set hostname and FQDN"""
-"""
-Set Hostname
-------------
-**Summary:** set hostname and fqdn
-
-This module handles setting the system hostname and fqdn. If
-``preserve_hostname`` is set, then the hostname will not be altered.
+import os
+from textwrap import dedent
-A hostname and fqdn can be provided by specifying a full domain name under the
-``fqdn`` key. Alternatively, a hostname can be specified using the ``hostname``
-key, and the fqdn of the cloud wil be used. If a fqdn specified with the
+from cloudinit import util
+from cloudinit.atomic_helper import write_json
+from cloudinit.config.schema import MetaSchema, get_meta_doc
+from cloudinit.distros import ALL_DISTROS
+from cloudinit.settings import PER_ALWAYS
+
+frequency = PER_ALWAYS
+MODULE_DESCRIPTION = """\
+This module handles setting the system hostname and fully qualified domain
+name (FQDN). If ``preserve_hostname`` is set, then the hostname will not be
+altered.
+
+A hostname and FQDN can be provided by specifying a full domain name under the
+``FQDN`` key. Alternatively, a hostname can be specified using the ``hostname``
+key, and the FQDN of the cloud will be used. If a FQDN specified with the
``hostname`` key, it will be handled properly, although it is better to use
the ``fqdn`` config key. If both ``fqdn`` and ``hostname`` are set,
-it is distro dependent whether ``hostname`` or ``fqdn`` is used,
-unless the ``prefer_fqdn_over_hostname`` option is true and fqdn is set
-it will force the use of FQDN in all distros, and if false then it will
-force the hostname use.
+the ``prefer_fqdn_over_hostname`` will force the use of FQDN in all distros
+when true, and when false it will force the short hostname. Otherwise, the
+hostname to use is distro-dependent.
+
+.. note::
+ cloud-init performs no hostname input validation before sending the
+ hostname to distro-specific tools, and most tools will not accept a
+ trailing dot on the FQDN.
This module will run in the init-local stage before networking is configured
if the hostname is set by metadata or user data on the local system.
@@ -31,25 +43,28 @@ This will occur on datasources like nocloud and ovf where metadata and user
data are available locally. This ensures that the desired hostname is applied
before any DHCP requests are preformed on these platforms where dynamic DNS is
based on initial hostname.
-
-**Internal name:** ``cc_set_hostname``
-
-**Module frequency:** always
-
-**Supported distros:** all
-
-**Config keys**::
-
- preserve_hostname: <true/false>
- prefer_fqdn_over_hostname: <true/false>
- fqdn: <fqdn>
- hostname: <fqdn/hostname>
"""
-import os
-
-from cloudinit import util
-from cloudinit.atomic_helper import write_json
+meta: MetaSchema = {
+ "id": "cc_set_hostname",
+ "name": "Set Hostname",
+ "title": "Set hostname and FQDN",
+ "description": MODULE_DESCRIPTION,
+ "distros": [ALL_DISTROS],
+ "frequency": frequency,
+ "examples": [
+ "preserve_hostname: true",
+ dedent(
+ """\
+ hostname: myhost
+ fqdn: myhost.example.com
+ prefer_fqdn_over_hostname: true
+ """
+ ),
+ ],
+}
+
+__doc__ = get_meta_doc(meta)
class SetHostnameError(Exception):
diff --git a/cloudinit/config/cc_set_passwords.py b/cloudinit/config/cc_set_passwords.py
index d8df8e23..3c8b378b 100755..100644
--- a/cloudinit/config/cc_set_passwords.py
+++ b/cloudinit/config/cc_set_passwords.py
@@ -5,85 +5,73 @@
# Author: Juerg Haefliger <juerg.haefliger@hp.com>
#
# This file is part of cloud-init. See LICENSE file for license information.
+"""Set Passwords: Set user passwords and enable/disable SSH password auth"""
-"""
-Set Passwords
--------------
-**Summary:** Set user passwords and enable/disable SSH password authentication
+import re
+from string import ascii_letters, digits
+from textwrap import dedent
+from cloudinit import log as logging
+from cloudinit import subp, util
+from cloudinit.config.schema import MetaSchema, get_meta_doc
+from cloudinit.distros import ALL_DISTROS, Distro, ug_util
+from cloudinit.settings import PER_INSTANCE
+from cloudinit.ssh_util import update_ssh_config
+
+MODULE_DESCRIPTION = """\
This module consumes three top-level config keys: ``ssh_pwauth``, ``chpasswd``
and ``password``.
The ``ssh_pwauth`` config key determines whether or not sshd will be configured
-to accept password authentication. True values will enable password auth,
-false values will disable password auth, and the literal string ``unchanged``
-will leave it unchanged. Setting no value will also leave the current setting
-on-disk unchanged.
+to accept password authentication.
The ``chpasswd`` config key accepts a dictionary containing either or both of
-``expire`` and ``list``.
-
-If the ``list`` key is provided, it should contain a list of
-``username:password`` pairs. This can be either a YAML list (of strings), or a
-multi-line string with one pair per line. Each user will have the
-corresponding password set. A password can be randomly generated by specifying
-``RANDOM`` or ``R`` as a user's password. A hashed password, created by a tool
-like ``mkpasswd``, can be specified; a regex
-(``r'\\$(1|2a|2y|5|6)(\\$.+){2}'``) is used to determine if a password value
-should be treated as a hash.
-
-.. note::
- The users specified must already exist on the system. Users will have been
- created by the ``cc_users_groups`` module at this point.
-
-By default, all users on the system will have their passwords expired (meaning
-that they will have to be reset the next time the user logs in). To disable
-this behaviour, set ``expire`` under ``chpasswd`` to a false value.
-
-If a ``list`` of user/password pairs is not specified under ``chpasswd``, then
-the value of the ``password`` config key will be used to set the default user's
-password.
-
-**Internal name:** ``cc_set_passwords``
-
-**Module frequency:** per instance
-
-**Supported distros:** all
-
-**Config keys**::
-
- ssh_pwauth: <yes/no/unchanged>
-
- password: password1
- chpasswd:
- expire: <true/false>
-
- chpasswd:
- list: |
- user1:password1
- user2:RANDOM
- user3:password3
- user4:R
-
- ##
- # or as yaml list
- ##
- chpasswd:
- list:
- - user1:password1
- - user2:RANDOM
- - user3:password3
- - user4:R
- - user4:$6$rL..$ej...
-"""
+``list`` and ``expire``. The ``list`` key is used to assign a password to a
+to a corresponding pre-existing user. The ``expire`` key is used to set
+whether to expire all user passwords such that a password will need to be reset
+on the user's next login.
-import re
-from string import ascii_letters, digits
+``password`` config key is used to set the default user's password. It is
+ignored if the ``chpasswd`` ``list`` is used.
+"""
-from cloudinit import log as logging
-from cloudinit import subp, util
-from cloudinit.distros import ug_util
-from cloudinit.ssh_util import update_ssh_config
+meta: MetaSchema = {
+ "id": "cc_set_passwords",
+ "name": "Set Passwords",
+ "title": "Set user passwords and enable/disable SSH password auth",
+ "description": MODULE_DESCRIPTION,
+ "distros": [ALL_DISTROS],
+ "frequency": PER_INSTANCE,
+ "examples": [
+ dedent(
+ """\
+ # Set a default password that would need to be changed
+ # at first login
+ ssh_pwauth: true
+ password: password1
+ """
+ ),
+ dedent(
+ """\
+ # Disable ssh password authentication
+ # Don't require users to change their passwords on next login
+ # Set the password for user1 to be 'password1' (OS does hashing)
+ # Set the password for user2 to be a randomly generated password,
+ # which will be written to the system console
+ # Set the password for user3 to a pre-hashed password
+ ssh_pwauth: false
+ chpasswd:
+ expire: false
+ list:
+ - user1:password1
+ - user2:RANDOM
+ - user3:$6$rounds=4096$5DJ8a9WMTEzIo5J4$Yms6imfeBvf3Yfu84mQBerh18l7OR1Wm1BJXZqFSpJ6BVas0AYJqIjP7czkOaAZHZi1kxQ5Y1IhgWN8K9NgxR1
+ """ # noqa
+ ),
+ ],
+}
+
+__doc__ = get_meta_doc(meta)
LOG = logging.getLogger(__name__)
@@ -91,7 +79,7 @@ LOG = logging.getLogger(__name__)
PW_SET = "".join([x for x in ascii_letters + digits if x not in "loLOI01"])
-def handle_ssh_pwauth(pw_auth, distro):
+def handle_ssh_pwauth(pw_auth, distro: Distro):
"""Apply sshd PasswordAuthentication changes.
@param pw_auth: config setting from 'pw_auth'.
@@ -99,8 +87,57 @@ def handle_ssh_pwauth(pw_auth, distro):
@param distro: an instance of the distro class for the target distribution
@return: None"""
+ service = distro.get_option("ssh_svcname", "ssh")
+ restart_ssh = True
+ try:
+ distro.manage_service("status", service)
+ except subp.ProcessExecutionError as e:
+ uses_systemd = distro.uses_systemd()
+ if not uses_systemd:
+ LOG.debug(
+ "Writing config 'ssh_pwauth: %s'. SSH service '%s'"
+ " will not be restarted because it is not running or not"
+ " available.",
+ pw_auth,
+ service,
+ )
+ restart_ssh = False
+ elif e.exit_code == 3:
+ # Service is not running. Write ssh config.
+ LOG.debug(
+ "Writing config 'ssh_pwauth: %s'. SSH service '%s'"
+ " will not be restarted because it is stopped.",
+ pw_auth,
+ service,
+ )
+ restart_ssh = False
+ elif e.exit_code == 4:
+ # Service status is unknown
+ LOG.warning(
+ "Ignoring config 'ssh_pwauth: %s'."
+ " SSH service '%s' is not installed.",
+ pw_auth,
+ service,
+ )
+ return
+ else:
+ LOG.warning(
+ "Ignoring config 'ssh_pwauth: %s'."
+ " SSH service '%s' is not available. Error: %s.",
+ pw_auth,
+ service,
+ e,
+ )
+ return
+
cfg_name = "PasswordAuthentication"
+ if isinstance(pw_auth, str):
+ LOG.warning(
+ "DEPRECATION: The 'ssh_pwauth' config key should be set to "
+ "a boolean value. The string format is deprecated and will be "
+ "removed in a future version of cloud-init."
+ )
if util.is_true(pw_auth):
cfg_val = "yes"
elif util.is_false(pw_auth):
@@ -118,8 +155,11 @@ def handle_ssh_pwauth(pw_auth, distro):
LOG.debug("No need to restart SSH service, %s not updated.", cfg_name)
return
- distro.manage_service("restart", distro.get_option("ssh_svcname", "ssh"))
- LOG.debug("Restarted the SSH daemon.")
+ if restart_ssh:
+ distro.manage_service("restart", service)
+ LOG.debug("Restarted the SSH daemon.")
+ else:
+ LOG.debug("Not restarting SSH service: service is stopped.")
def handle(_name, cfg, cloud, log, args):
@@ -141,6 +181,11 @@ def handle(_name, cfg, cloud, log, args):
log.debug("Handling input for chpasswd as list.")
plist = util.get_cfg_option_list(chfg, "list", plist)
else:
+ log.warning(
+ "DEPRECATION: The chpasswd multiline string format is "
+ "deprecated and will be removed from a future version of "
+ "cloud-init. Use the list format instead."
+ )
log.debug("Handling input for chpasswd as multiline string.")
plist = util.get_cfg_option_str(chfg, "list", plist)
if plist:
@@ -227,7 +272,7 @@ def handle(_name, cfg, cloud, log, args):
handle_ssh_pwauth(cfg.get("ssh_pwauth"), cloud.distro)
if len(errors):
- log.debug("%s errors occured, re-raising the last one", len(errors))
+ log.debug("%s errors occurred, re-raising the last one", len(errors))
raise errors[-1]
diff --git a/cloudinit/config/cc_snap.py b/cloudinit/config/cc_snap.py
index 9f343df0..41a6adf9 100644
--- a/cloudinit/config/cc_snap.py
+++ b/cloudinit/config/cc_snap.py
@@ -9,11 +9,7 @@ from textwrap import dedent
from cloudinit import log as logging
from cloudinit import subp, util
-from cloudinit.config.schema import (
- MetaSchema,
- get_meta_doc,
- validate_cloudconfig_schema,
-)
+from cloudinit.config.schema import MetaSchema, get_meta_doc
from cloudinit.settings import PER_INSTANCE
from cloudinit.subp import prepend_base_command
@@ -54,10 +50,6 @@ meta: MetaSchema = {
best to create a snap seed directory and seed.yaml manifest in
**/var/lib/snapd/seed/** which snapd automatically installs on
startup.
-
- **Development only**: The ``squashfuse_in_container`` boolean can be
- set true to install squashfuse package when in a container to enable
- snap installs. Default is false.
"""
),
"distros": distros,
@@ -78,94 +70,44 @@ meta: MetaSchema = {
),
dedent(
"""\
- # LXC-based containers require squashfuse before snaps can be installed
- snap:
- commands:
- 00: apt-get install squashfuse -y
- 11: snap install emoj
-
- """
- ),
- dedent(
- """\
# Convenience: the snap command can be omitted when specifying commands
# as a list and 'snap' will automatically be prepended.
# The following commands are equivalent:
snap:
- commands:
- 00: ['install', 'vlc']
- 01: ['snap', 'install', 'vlc']
- 02: snap install vlc
- 03: 'snap install vlc'
+ commands:
+ 00: ['install', 'vlc']
+ 01: ['snap', 'install', 'vlc']
+ 02: snap install vlc
+ 03: 'snap install vlc'
"""
),
dedent(
"""\
# You can use a list of commands
snap:
- commands:
- - ['install', 'vlc']
- - ['snap', 'install', 'vlc']
- - snap install vlc
- - 'snap install vlc'
+ commands:
+ - ['install', 'vlc']
+ - ['snap', 'install', 'vlc']
+ - snap install vlc
+ - 'snap install vlc'
"""
),
dedent(
"""\
# You can use a list of assertions
snap:
- assertions:
- - signed_assertion_blob_here
- - |
- signed_assertion_blob_here
+ assertions:
+ - signed_assertion_blob_here
+ - |
+ signed_assertion_blob_here
"""
),
],
"frequency": PER_INSTANCE,
}
-schema = {
- "type": "object",
- "properties": {
- "snap": {
- "type": "object",
- "properties": {
- "assertions": {
- "type": ["object", "array"], # Array of strings or dict
- "items": {"type": "string"},
- "additionalItems": False, # Reject items non-string
- "minItems": 1,
- "minProperties": 1,
- "uniqueItems": True,
- "additionalProperties": {"type": "string"},
- },
- "commands": {
- "type": ["object", "array"], # Array of strings or dict
- "items": {
- "oneOf": [
- {"type": "array", "items": {"type": "string"}},
- {"type": "string"},
- ]
- },
- "additionalItems": False, # Reject non-string & non-list
- "minItems": 1,
- "minProperties": 1,
- "additionalProperties": {
- "oneOf": [
- {"type": "string"},
- {"type": "array", "items": {"type": "string"}},
- ],
- },
- },
- "squashfuse_in_container": {"type": "boolean"},
- },
- "additionalProperties": False, # Reject keys not in schema
- "minProperties": 1,
- }
- },
-}
-__doc__ = get_meta_doc(meta, schema) # Supplement python help()
+__doc__ = get_meta_doc(meta)
SNAP_CMD = "snap"
ASSERTIONS_FILE = "/var/lib/cloud/instance/snapd.assertions"
@@ -240,23 +182,6 @@ def run_commands(commands):
raise RuntimeError(msg)
-# RELEASE_BLOCKER: Once LP: #1628289 is released on xenial, drop this function.
-def maybe_install_squashfuse(cloud):
- """Install squashfuse if we are in a container."""
- if not util.is_container():
- return
- try:
- cloud.distro.update_package_sources()
- except Exception:
- util.logexc(LOG, "Package update failed")
- raise
- try:
- cloud.distro.install_packages(["squashfuse"])
- except Exception:
- util.logexc(LOG, "Failed to install squashfuse")
- raise
-
-
def handle(name, cfg, cloud, log, args):
cfgin = cfg.get("snap", {})
if not cfgin:
@@ -265,9 +190,6 @@ def handle(name, cfg, cloud, log, args):
)
return
- validate_cloudconfig_schema(cfg, schema)
- if util.is_true(cfgin.get("squashfuse_in_container", False)):
- maybe_install_squashfuse(cloud)
add_assertions(cfgin.get("assertions", []))
run_commands(cfgin.get("commands", []))
diff --git a/cloudinit/config/cc_spacewalk.py b/cloudinit/config/cc_spacewalk.py
index 3fa6c388..6820a816 100644
--- a/cloudinit/config/cc_spacewalk.py
+++ b/cloudinit/config/cc_spacewalk.py
@@ -1,10 +1,13 @@
# This file is part of cloud-init. See LICENSE file for license information.
+"""Spacewalk: Install and configure spacewalk"""
-"""
-Spacewalk
----------
-**Summary:** install and configure spacewalk
+from textwrap import dedent
+
+from cloudinit import subp
+from cloudinit.config.schema import MetaSchema, get_meta_doc
+from cloudinit.settings import PER_INSTANCE
+MODULE_DESCRIPTION = """\
This module installs spacewalk and applies basic configuration. If the
``spacewalk`` config key is present spacewalk will be installed. The server to
connect to after installation must be provided in the ``server`` in spacewalk
@@ -12,22 +15,29 @@ configuration. A proxy to connect through and a activation key may optionally
be specified.
For more information about spacewalk see: https://fedorahosted.org/spacewalk/
+"""
-**Internal name:** ``cc_spacewalk``
-
-**Module frequency:** per instance
-
-**Supported distros:** redhat, fedora
+meta: MetaSchema = {
+ "id": "cc_spacewalk",
+ "name": "Spacewalk",
+ "title": "Install and configure spacewalk",
+ "description": MODULE_DESCRIPTION,
+ "distros": ["rhel", "fedora"],
+ "frequency": PER_INSTANCE,
+ "examples": [
+ dedent(
+ """\
+ spacewalk:
+ server: <url>
+ proxy: <proxy host>
+ activation_key: <key>
+ """
+ )
+ ],
+}
-**Config keys**::
+__doc__ = get_meta_doc(meta)
- spacewalk:
- server: <url>
- proxy: <proxy host>
- activation_key: <key>
-"""
-
-from cloudinit import subp
distros = ["redhat", "fedora"]
required_packages = ["rhn-setup"]
diff --git a/cloudinit/config/cc_ssh.py b/cloudinit/config/cc_ssh.py
index 64486b9c..33c1fd0c 100755..100644
--- a/cloudinit/config/cc_ssh.py
+++ b/cloudinit/config/cc_ssh.py
@@ -5,12 +5,23 @@
# Author: Juerg Haefliger <juerg.haefliger@hp.com>
#
# This file is part of cloud-init. See LICENSE file for license information.
+"""SSH: Configure SSH and SSH keys"""
-"""
-SSH
----
-**Summary:** configure SSH and SSH keys (host and authorized)
+import glob
+import os
+import re
+import sys
+from logging import Logger
+from textwrap import dedent
+from typing import List, Optional, Sequence
+
+from cloudinit import ssh_util, subp, util
+from cloudinit.cloud import Cloud
+from cloudinit.config.schema import MetaSchema, get_meta_doc
+from cloudinit.distros import ALL_DISTROS, ug_util
+from cloudinit.settings import PER_INSTANCE
+MODULE_DESCRIPTION = """\
This module handles most configuration for SSH and both host and authorized SSH
keys.
@@ -28,12 +39,7 @@ should be specified as a list of public keys.
password authentication
Root login can be enabled/disabled using the ``disable_root`` config key. Root
-login options can be manually specified with ``disable_root_opts``. If
-``disable_root_opts`` is specified and contains the string ``$USER``,
-it will be replaced with the username of the default user. By default,
-root login is disabled, and root login opts are set to::
-
- no-port-forwarding,no-agent-forwarding,no-X11-forwarding
+login options can be manually specified with ``disable_root_opts``.
Supported public key types for the ``ssh_authorized_keys`` are:
@@ -75,32 +81,18 @@ Host Keys
^^^^^^^^^
Host keys are for authenticating a specific instance. Many images have default
-host SSH keys, which can be removed using ``ssh_deletekeys``. This prevents
-re-use of a private host key from an image on multiple machines. Since
-removing default host keys is usually the desired behavior this option is
-enabled by default.
-
-Host keys can be added using the ``ssh_keys`` configuration key. The argument
-to this config key should be a dictionary entries for the public and private
-keys of each desired key type. Entries in the ``ssh_keys`` config dict should
-have keys in the format ``<key type>_private``, ``<key type>_public``, and,
-optionally, ``<key type>_certificate``, e.g. ``rsa_private: <key>``,
-``rsa_public: <key>``, and ``rsa_certificate: <key>``. See below for supported
-key types. Not all key types have to be specified, ones left unspecified will
-not be used. If this config option is used, then no keys will be generated.
+host SSH keys, which can be removed using ``ssh_deletekeys``.
+
+Host keys can be added using the ``ssh_keys`` configuration key.
When host keys are generated the output of the ssh-keygen command(s) can be
displayed on the console using the ``ssh_quiet_keygen`` configuration key.
-This settings defaults to False which displays the keygen output.
.. note::
when specifying private host keys in cloud-config, care should be taken to
ensure that the communication between the data source and the instance is
secure
-.. note::
- to specify multiline private host keys and certificates, use yaml
- multiline syntax
If no host keys are specified using ``ssh_keys``, then keys will be generated
using ``ssh-keygen``. By default one public/private pair of each supported
@@ -113,59 +105,74 @@ system (i.e. if ``ssh_deletekeys`` was false), no key will be generated.
Supported host key types for the ``ssh_keys`` and the ``ssh_genkeytypes``
config flags are:
- - rsa
- dsa
- ecdsa
- ed25519
+ - rsa
+
+Unsupported host key types for the ``ssh_keys`` and the ``ssh_genkeytypes``
+config flags are:
-**Internal name:** ``cc_ssh``
-
-**Module frequency:** per instance
-
-**Supported distros:** all
-
-**Config keys**::
-
- ssh_deletekeys: <true/false>
- ssh_keys:
- rsa_private: |
- -----BEGIN RSA PRIVATE KEY-----
- MIIBxwIBAAJhAKD0YSHy73nUgysO13XsJmd4fHiFyQ+00R7VVu2iV9Qco
- ...
- -----END RSA PRIVATE KEY-----
- rsa_public: ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAGEAoPRhIfLvedSDKw7Xd ...
- rsa_certificate: |
- ssh-rsa-cert-v01@openssh.com AAAAIHNzaC1lZDI1NTE5LWNlcnQt ...
- dsa_private: |
- -----BEGIN DSA PRIVATE KEY-----
- MIIBxwIBAAJhAKD0YSHy73nUgysO13XsJmd4fHiFyQ+00R7VVu2iV9Qco
- ...
- -----END DSA PRIVATE KEY-----
- dsa_public: ssh-dsa AAAAB3NzaC1yc2EAAAABIwAAAGEAoPRhIfLvedSDKw7Xd ...
- dsa_certificate: |
- ssh-dsa-cert-v01@openssh.com AAAAIHNzaC1lZDI1NTE5LWNlcnQt ...
-
- ssh_genkeytypes: <key type>
- disable_root: <true/false>
- disable_root_opts: <disable root options string>
- ssh_authorized_keys:
- - ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAGEA3FSyQwBI6Z+nCSjUU ...
- - ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA3I7VUf2l5gSn5uavROsc5HRDpZ ...
- allow_public_ssh_keys: <true/false>
- ssh_publish_hostkeys:
- enabled: <true/false> (Defaults to true)
- blacklist: <list of key types> (Defaults to [dsa])
- ssh_quiet_keygen: <true/false>
+ - ecdsa-sk
+ - ed25519-sk
"""
-import glob
-import os
-import sys
+# Note: We do not support *-sk key types because:
+# 1) In the autogeneration case user interaction with the device is needed
+# which does not fit with a cloud-context.
+# 2) This type of keys are user-based, not hostkeys.
+
+
+meta: MetaSchema = {
+ "id": "cc_ssh",
+ "name": "SSH",
+ "title": "Configure SSH and SSH keys",
+ "description": MODULE_DESCRIPTION,
+ "distros": [ALL_DISTROS],
+ "frequency": PER_INSTANCE,
+ "examples": [
+ dedent(
+ """\
+ ssh_keys:
+ rsa_private: |
+ -----BEGIN RSA PRIVATE KEY-----
+ MIIBxwIBAAJhAKD0YSHy73nUgysO13XsJmd4fHiFyQ+00R7VVu2iV9Qco
+ ...
+ -----END RSA PRIVATE KEY-----
+ rsa_public: ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAGEAoPRhIfLvedSDKw7Xd ...
+ rsa_certificate: |
+ ssh-rsa-cert-v01@openssh.com AAAAIHNzaC1lZDI1NTE5LWNlcnQt ...
+ dsa_private: |
+ -----BEGIN DSA PRIVATE KEY-----
+ MIIBxwIBAAJhAKD0YSHy73nUgysO13XsJmd4fHiFyQ+00R7VVu2iV9Qco
+ ...
+ -----END DSA PRIVATE KEY-----
+ dsa_public: ssh-dsa AAAAB3NzaC1yc2EAAAABIwAAAGEAoPRhIfLvedSDKw7Xd ...
+ dsa_certificate: |
+ ssh-dsa-cert-v01@openssh.com AAAAIHNzaC1lZDI1NTE5LWNlcnQt ...
+ ssh_authorized_keys:
+ - ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAGEA3FSyQwBI6Z+nCSjUU ...
+ - ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA3I7VUf2l5gSn5uavROsc5HRDpZ ...
+ ssh_deletekeys: true
+ ssh_genkeytypes: [rsa, dsa, ecdsa, ed25519]
+ disable_root: true
+ disable_root_opts: no-port-forwarding,no-agent-forwarding,no-X11-forwarding
+ allow_public_ssh_keys: true
+ ssh_quiet_keygen: true
+ ssh_publish_hostkeys:
+ enabled: true
+ blacklist: [dsa]
+ """ # noqa: E501
+ )
+ ],
+}
-from cloudinit import ssh_util, subp, util
-from cloudinit.distros import ug_util
+__doc__ = get_meta_doc(meta)
GENERATE_KEY_NAMES = ["rsa", "dsa", "ecdsa", "ed25519"]
+pattern_unsupported_config_keys = re.compile(
+ "^(ecdsa-sk|ed25519-sk)_(private|public|certificate)$"
+)
KEY_FILE_TPL = "/etc/ssh/ssh_host_%s_key"
PUBLISH_HOST_KEYS = True
# Don't publish the dsa hostkey by default since OpenSSH recommends not using
@@ -175,19 +182,19 @@ HOST_KEY_PUBLISH_BLACKLIST = ["dsa"]
CONFIG_KEY_TO_FILE = {}
PRIV_TO_PUB = {}
for k in GENERATE_KEY_NAMES:
- CONFIG_KEY_TO_FILE.update({"%s_private" % k: (KEY_FILE_TPL % k, 0o600)})
- CONFIG_KEY_TO_FILE.update(
- {"%s_public" % k: (KEY_FILE_TPL % k + ".pub", 0o600)}
- )
CONFIG_KEY_TO_FILE.update(
- {"%s_certificate" % k: (KEY_FILE_TPL % k + "-cert.pub", 0o600)}
+ {
+ f"{k}_private": (KEY_FILE_TPL % k, 0o600),
+ f"{k}_public": (f"{KEY_FILE_TPL % k}.pub", 0o600),
+ f"{k}_certificate": (f"{KEY_FILE_TPL % k}-cert.pub", 0o600),
+ }
)
- PRIV_TO_PUB["%s_private" % k] = "%s_public" % k
+ PRIV_TO_PUB[f"{k}_private"] = f"{k}_public"
KEY_GEN_TPL = 'o=$(ssh-keygen -yf "%s") && echo "$o" root@localhost > "%s"'
-def handle(_name, cfg, cloud, log, _args):
+def handle(_name, cfg, cloud: Cloud, log: Logger, _args):
# remove the static keys from the pristine image
if cfg.get("ssh_deletekeys", True):
@@ -201,8 +208,12 @@ def handle(_name, cfg, cloud, log, _args):
if "ssh_keys" in cfg:
# if there are keys and/or certificates in cloud-config, use them
for (key, val) in cfg["ssh_keys"].items():
- # skip entry if unrecognized
if key not in CONFIG_KEY_TO_FILE:
+ if pattern_unsupported_config_keys.match(key):
+ reason = "unsupported"
+ else:
+ reason = "unrecognized"
+ log.warning("Skipping %s ssh_keys" ' entry: "%s"', reason, key)
continue
tgt_fn = CONFIG_KEY_TO_FILE[key][0]
tgt_perms = CONFIG_KEY_TO_FILE[key][1]
@@ -307,7 +318,7 @@ def handle(_name, cfg, cloud, log, _args):
cfg, "disable_root_opts", ssh_util.DISABLE_USER_OPTS
)
- keys = []
+ keys: List[str] = []
if util.get_cfg_option_bool(cfg, "allow_public_ssh_keys", True):
keys = cloud.get_public_ssh_keys() or []
else:
@@ -342,7 +353,7 @@ def apply_credentials(keys, user, disable_root, disable_root_opts):
ssh_util.setup_user_keys(keys, "root", options=key_prefix)
-def get_public_host_keys(blacklist=None):
+def get_public_host_keys(blacklist: Optional[Sequence[str]] = None):
"""Read host keys from /etc/ssh/*.pub files and return them as a list.
@param blacklist: List of key types to ignore. e.g. ['dsa', 'rsa']
diff --git a/cloudinit/config/cc_ssh_authkey_fingerprints.py b/cloudinit/config/cc_ssh_authkey_fingerprints.py
index 020c3469..db5c1454 100755..100644
--- a/cloudinit/config/cc_ssh_authkey_fingerprints.py
+++ b/cloudinit/config/cc_ssh_authkey_fingerprints.py
@@ -3,34 +3,37 @@
# Author: Joshua Harlow <harlowja@yahoo-inc.com>
#
# This file is part of cloud-init. See LICENSE file for license information.
+"""SSH AuthKey Fingerprints: Log fingerprints of user SSH keys"""
-"""
-SSH Authkey Fingerprints
-------------------------
-**Summary:** log fingerprints of user SSH keys
+import base64
+import hashlib
+
+from cloudinit import ssh_util, util
+from cloudinit.config.schema import MetaSchema, get_meta_doc
+from cloudinit.distros import ALL_DISTROS, ug_util
+from cloudinit.settings import PER_INSTANCE
+from cloudinit.simpletable import SimpleTable
+MODULE_DESCRIPTION = """\
Write fingerprints of authorized keys for each user to log. This is enabled by
default, but can be disabled using ``no_ssh_fingerprints``. The hash type for
the keys can be specified, but defaults to ``sha256``.
-
-**Internal name:** ``cc_ssh_authkey_fingerprints``
-
-**Module frequency:** per instance
-
-**Supported distros:** all
-
-**Config keys**::
-
- no_ssh_fingerprints: <true/false>
- authkey_hash: <hash type>
"""
-import base64
-import hashlib
+meta: MetaSchema = {
+ "id": "cc_ssh_authkey_fingerprints",
+ "name": "SSH AuthKey Fingerprints",
+ "title": "Log fingerprints of user SSH keys",
+ "description": MODULE_DESCRIPTION,
+ "distros": [ALL_DISTROS],
+ "frequency": PER_INSTANCE,
+ "examples": [
+ "no_ssh_fingerprints: true",
+ "authkey_hash: sha512",
+ ],
+}
-from cloudinit import ssh_util, util
-from cloudinit.distros import ug_util
-from cloudinit.simpletable import SimpleTable
+__doc__ = get_meta_doc(meta)
def _split_hash(bin_hash):
@@ -119,6 +122,14 @@ def handle(name, cfg, cloud, log, _args):
hash_meth = util.get_cfg_option_str(cfg, "authkey_hash", "sha256")
(users, _groups) = ug_util.normalize_users_groups(cfg, cloud.distro)
for (user_name, _cfg) in users.items():
+ if _cfg.get("no_create_home") or _cfg.get("system"):
+ log.debug(
+ "Skipping printing of ssh fingerprints for user '%s' because "
+ "no home directory is created",
+ user_name,
+ )
+ continue
+
(key_fn, key_entries) = ssh_util.extract_authorized_keys(user_name)
_pprint_key_entries(user_name, key_fn, key_entries, hash_meth)
diff --git a/cloudinit/config/cc_ssh_import_id.py b/cloudinit/config/cc_ssh_import_id.py
index a9575c59..6a15895d 100755..100644
--- a/cloudinit/config/cc_ssh_import_id.py
+++ b/cloudinit/config/cc_ssh_import_id.py
@@ -5,42 +5,64 @@
# Author: Juerg Haefliger <juerg.haefliger@hp.com>
#
# This file is part of cloud-init. See LICENSE file for license information.
-
-"""
-SSH Import Id
--------------
-**Summary:** import SSH id
-
-This module imports SSH keys from either a public keyserver, usually launchpad
-or github using ``ssh-import-id``. Keys are referenced by the username they are
-associated with on the keyserver. The keyserver can be specified by prepending
-either ``lp:`` for launchpad or ``gh:`` for github to the username.
-
-**Internal name:** ``cc_ssh_import_id``
-
-**Module frequency:** per instance
-
-**Supported distros:** ubuntu, debian
-
-**Config keys**::
-
- ssh_import_id:
- - user
- - gh:user
- - lp:user
-"""
+"""SSH Import ID: Import SSH id"""
import pwd
+from textwrap import dedent
from cloudinit import subp, util
+from cloudinit.config.schema import MetaSchema, get_meta_doc
from cloudinit.distros import ug_util
+from cloudinit.settings import PER_INSTANCE
# https://launchpad.net/ssh-import-id
distros = ["ubuntu", "debian"]
+SSH_IMPORT_ID_BINARY = "ssh-import-id"
+MODULE_DESCRIPTION = """\
+This module imports SSH keys from either a public keyserver, usually launchpad
+or github using ``ssh-import-id``. Keys are referenced by the username they are
+associated with on the keyserver. The keyserver can be specified by prepending
+either ``lp:`` for launchpad or ``gh:`` for github to the username.
+"""
+
+meta: MetaSchema = {
+ "id": "cc_ssh_import_id",
+ "name": "SSH Import ID",
+ "title": "Import SSH id",
+ "description": MODULE_DESCRIPTION,
+ "distros": distros,
+ "frequency": PER_INSTANCE,
+ "examples": [
+ dedent(
+ """\
+ ssh_import_id:
+ - user
+ - gh:user
+ - lp:user
+ """
+ )
+ ],
+}
+
+__doc__ = get_meta_doc(meta)
+
def handle(_name, cfg, cloud, log, args):
+ if not is_key_in_nested_dict(cfg, "ssh_import_id"):
+ log.debug(
+ "Skipping module named ssh-import-id, no 'ssh_import_id'"
+ " directives found."
+ )
+ return
+ elif not subp.which(SSH_IMPORT_ID_BINARY):
+ log.warn(
+ "ssh-import-id is not installed, but module ssh_import_id is "
+ "configured. Skipping module."
+ )
+ return
+
# import for "user: XXXXX"
if len(args) != 0:
user = args[0]
@@ -100,7 +122,38 @@ def import_ssh_ids(ids, user, log):
except KeyError as exc:
raise exc
- cmd = ["sudo", "-Hu", user, "ssh-import-id"] + ids
+ # TODO: We have a use case that involes setting a proxy value earlier
+ # in boot and the user wants this env used when using ssh-import-id.
+ # E.g.,:
+ # bootcmd:
+ # - mkdir -p /etc/systemd/system/cloud-config.service.d
+ # - mkdir -p /etc/systemd/system/cloud-final.service.d
+ # write_files:
+ # - content: |
+ # http_proxy=http://192.168.1.2:3128/
+ # https_proxy=http://192.168.1.2:3128/
+ # path: /etc/cloud/env
+ # - content: |
+ # [Service]
+ # EnvironmentFile=/etc/cloud/env
+ # PassEnvironment=https_proxy http_proxy
+ # path: /etc/systemd/system/cloud-config.service.d/override.conf
+ # - content: |
+ # [Service]
+ # EnvironmentFile=/etc/cloud/env
+ # PassEnvironment=https_proxy http_proxy
+ # path: /etc/systemd/system/cloud-final.service.d/override.conf
+ #
+ # I'm including the `--preserve-env` here as a one-off, but we should
+ # have a better way of setting env earlier in boot and using it later.
+ # Perhaps a 'set_env' module?
+ cmd = [
+ "sudo",
+ "--preserve-env=https_proxy",
+ "-Hu",
+ user,
+ SSH_IMPORT_ID_BINARY,
+ ] + ids
log.debug("Importing SSH ids for user %s.", user)
try:
@@ -110,4 +163,23 @@ def import_ssh_ids(ids, user, log):
raise exc
-# vi: ts=4 expandtab
+def is_key_in_nested_dict(config: dict, search_key: str) -> bool:
+ """Search for key nested in config.
+
+ Note: A dict embedded in a list of lists will not be found walked - but in
+ this case we don't need it.
+ """
+ for config_key in config.keys():
+ if search_key == config_key:
+ return True
+ if isinstance(config[config_key], dict):
+ if is_key_in_nested_dict(config[config_key], search_key):
+ return True
+ if isinstance(config[config_key], list):
+ # this code could probably be generalized to walking the whole
+ # config by iterating lists in search of dictionaries
+ for item in config[config_key]:
+ if isinstance(item, dict):
+ if is_key_in_nested_dict(item, search_key):
+ return True
+ return False
diff --git a/cloudinit/config/cc_timezone.py b/cloudinit/config/cc_timezone.py
index 24e6099e..47da2d06 100644
--- a/cloudinit/config/cc_timezone.py
+++ b/cloudinit/config/cc_timezone.py
@@ -5,31 +5,30 @@
# Author: Juerg Haefliger <juerg.haefliger@hp.com>
#
# This file is part of cloud-init. See LICENSE file for license information.
-
-"""
-Timezone
---------
-**Summary:** set system timezone
-
-Set the system timezone. If any args are passed to the module then the first
-will be used for the timezone. Otherwise, the module will attempt to retrieve
-the timezone from cloud config.
-
-**Internal name:** ``cc_timezone``
-
-**Module frequency:** per instance
-
-**Supported distros:** all
-
-**Config keys**::
-
- timezone: <timezone>
-"""
+"""Timezone: Set the system timezone"""
from cloudinit import util
+from cloudinit.config.schema import MetaSchema, get_meta_doc
+from cloudinit.distros import ALL_DISTROS
from cloudinit.settings import PER_INSTANCE
-frequency = PER_INSTANCE
+MODULE_DESCRIPTION = """\
+Sets the system timezone based on the value provided.
+"""
+
+meta: MetaSchema = {
+ "id": "cc_timezone",
+ "name": "Timezone",
+ "title": "Set the system timezone",
+ "description": MODULE_DESCRIPTION,
+ "distros": [ALL_DISTROS],
+ "frequency": PER_INSTANCE,
+ "examples": [
+ "timezone: US/Eastern",
+ ],
+}
+
+__doc__ = get_meta_doc(meta)
def handle(name, cfg, cloud, log, args):
diff --git a/cloudinit/config/cc_ubuntu_advantage.py b/cloudinit/config/cc_ubuntu_advantage.py
index e469bb22..57763c31 100644
--- a/cloudinit/config/cc_ubuntu_advantage.py
+++ b/cloudinit/config/cc_ubuntu_advantage.py
@@ -6,11 +6,7 @@ from textwrap import dedent
from cloudinit import log as logging
from cloudinit import subp, util
-from cloudinit.config.schema import (
- MetaSchema,
- get_meta_doc,
- validate_cloudconfig_schema,
-)
+from cloudinit.config.schema import MetaSchema, get_meta_doc
from cloudinit.settings import PER_INSTANCE
UA_URL = "https://ubuntu.com/advantage"
@@ -32,7 +28,7 @@ meta: MetaSchema = {
Note that when enabling FIPS or FIPS updates you will need to schedule
a reboot to ensure the machine is running the FIPS-compliant kernel.
- See :ref:`Power State Change` for information on how to configure
+ See `Power State Change`_ for information on how to configure
cloud-init to perform this reboot.
"""
),
@@ -53,7 +49,7 @@ meta: MetaSchema = {
# only fips and esm services. Services will only be enabled if
# the environment supports said service. Otherwise warnings will
# be logged for incompatible services specified.
- ubuntu-advantage:
+ ubuntu_advantage:
token: <ua_contract_token>
enable:
- fips
@@ -67,7 +63,7 @@ meta: MetaSchema = {
# completed.
power_state:
mode: reboot
- ubuntu-advantage:
+ ubuntu_advantage:
token: <ua_contract_token>
enable:
- fips
@@ -77,29 +73,7 @@ meta: MetaSchema = {
"frequency": PER_INSTANCE,
}
-schema = {
- "type": "object",
- "properties": {
- "ubuntu_advantage": {
- "type": "object",
- "properties": {
- "enable": {
- "type": "array",
- "items": {"type": "string"},
- },
- "token": {
- "type": "string",
- "description": "A contract token obtained from %s."
- % UA_URL,
- },
- },
- "required": ["token"],
- "additionalProperties": False,
- }
- },
-}
-
-__doc__ = get_meta_doc(meta, schema) # Supplement python help()
+__doc__ = get_meta_doc(meta)
LOG = logging.getLogger(__name__)
@@ -194,7 +168,6 @@ def handle(name, cfg, cloud, log, args):
name,
)
return
- validate_cloudconfig_schema(cfg, schema)
if "commands" in ua_section:
msg = (
'Deprecated configuration "ubuntu-advantage: commands" provided.'
diff --git a/cloudinit/config/cc_ubuntu_drivers.py b/cloudinit/config/cc_ubuntu_drivers.py
index 44a3bdb4..15f621a7 100644
--- a/cloudinit/config/cc_ubuntu_drivers.py
+++ b/cloudinit/config/cc_ubuntu_drivers.py
@@ -7,17 +7,13 @@ from textwrap import dedent
from cloudinit import log as logging
from cloudinit import subp, temp_utils, type_utils, util
-from cloudinit.config.schema import (
- MetaSchema,
- get_meta_doc,
- validate_cloudconfig_schema,
-)
+from cloudinit.config.schema import MetaSchema, get_meta_doc
from cloudinit.settings import PER_INSTANCE
LOG = logging.getLogger(__name__)
-frequency = PER_INSTANCE
distros = ["ubuntu"]
+
meta: MetaSchema = {
"id": "cc_ubuntu_drivers",
"name": "Ubuntu Drivers",
@@ -37,47 +33,15 @@ meta: MetaSchema = {
"""
)
],
- "frequency": frequency,
+ "frequency": PER_INSTANCE,
}
-schema = {
- "type": "object",
- "properties": {
- "drivers": {
- "type": "object",
- "additionalProperties": False,
- "properties": {
- "nvidia": {
- "type": "object",
- "additionalProperties": False,
- "required": ["license-accepted"],
- "properties": {
- "license-accepted": {
- "type": "boolean",
- "description": (
- "Do you accept the NVIDIA driver license?"
- ),
- },
- "version": {
- "type": "string",
- "description": (
- "The version of the driver to install (e.g."
- ' "390", "410"). Defaults to the latest'
- " version."
- ),
- },
- },
- },
- },
- },
- },
-}
+__doc__ = get_meta_doc(meta)
+
OLD_UBUNTU_DRIVERS_STDERR_NEEDLE = (
"ubuntu-drivers: error: argument <command>: invalid choice: 'install'"
)
-__doc__ = get_meta_doc(meta, schema) # Supplement python help()
-
# Use a debconf template to configure a global debconf variable
# (linux/nvidia/latelink) setting this to "true" allows the
@@ -180,5 +144,4 @@ def handle(name, cfg, cloud, log, _args):
log.debug("Skipping module named %s, no 'drivers' key in config", name)
return
- validate_cloudconfig_schema(cfg, schema)
install_drivers(cfg["drivers"], cloud.distro.install_packages)
diff --git a/cloudinit/config/cc_update_etc_hosts.py b/cloudinit/config/cc_update_etc_hosts.py
index f0aa9b0f..5334f453 100644
--- a/cloudinit/config/cc_update_etc_hosts.py
+++ b/cloudinit/config/cc_update_etc_hosts.py
@@ -6,18 +6,22 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-"""
-Update Etc Hosts
-----------------
-**Summary:** update the hosts file (usually ``/etc/hosts``)
+"""Update Etc Hosts: Update the hosts file (usually ``/etc/hosts``)"""
+
+from textwrap import dedent
+
+from cloudinit import templater, util
+from cloudinit.config.schema import MetaSchema, get_meta_doc
+from cloudinit.settings import PER_ALWAYS
+MODULE_DESCRIPTION = """\
This module will update the contents of the local hosts database (hosts file;
usually ``/etc/hosts``) based on the hostname/fqdn specified in config.
Management of the hosts file is controlled using ``manage_etc_hosts``. If this
is set to false, cloud-init will not manage the hosts file at all. This is the
default behavior.
-If set to ``true`` or ``template``, cloud-init will generate the hosts file
+If set to ``true``, cloud-init will generate the hosts file
using the template located in ``/etc/cloud/templates/hosts.tmpl``. In the
``/etc/cloud/templates/hosts.tmpl`` template, the strings ``$hostname`` and
``$fqdn`` will be replaced with the hostname and fqdn respectively.
@@ -36,24 +40,57 @@ ping ``127.0.0.1`` or ``127.0.1.1`` or other ip).
.. note::
for instructions on specifying hostname and fqdn, see documentation for
``cc_set_hostname``
-
-**Internal name:** ``cc_update_etc_hosts``
-
-**Module frequency:** always
-
-**Supported distros:** all
-
-**Config keys**::
-
- manage_etc_hosts: <true/"template"/false/"localhost">
- fqdn: <fqdn>
- hostname: <fqdn/hostname>
"""
-from cloudinit import templater, util
-from cloudinit.settings import PER_ALWAYS
-
-frequency = PER_ALWAYS
+distros = ["all"]
+
+meta: MetaSchema = {
+ "id": "cc_update_etc_hosts",
+ "name": "Update Etc Hosts",
+ "title": "Update the hosts file (usually ``/etc/hosts``)",
+ "description": MODULE_DESCRIPTION,
+ "distros": distros,
+ "examples": [
+ dedent(
+ """\
+ # Do not update or manage /etc/hosts at all. This is the default behavior.
+ #
+ # Whatever is present at instance boot time will be present after boot.
+ # User changes will not be overwritten.
+ manage_etc_hosts: false
+ """
+ ),
+ dedent(
+ """\
+ # Manage /etc/hosts with cloud-init.
+ # On every boot, /etc/hosts will be re-written from
+ # ``/etc/cloud/templates/hosts.tmpl``.
+ #
+ # The strings '$hostname' and '$fqdn' are replaced in the template
+ # with the appropriate values either from the config-config ``fqdn`` or
+ # ``hostname`` if provided. When absent, the cloud metadata will be
+ # checked for ``local-hostname` which can be split into <hostname>.<fqdn>.
+ #
+ # To make modifications persistent across a reboot, you must modify
+ # ``/etc/cloud/templates/hosts.tmpl``.
+ manage_etc_hosts: true
+ """
+ ),
+ dedent(
+ """\
+ # Update /etc/hosts every boot providing a "localhost" 127.0.1.1 entry
+ # with the latest hostname and fqdn as provided by either IMDS or
+ # cloud-config.
+ # All other entries will be left as is.
+ # 'ping `hostname`' will ping 127.0.1.1
+ manage_etc_hosts: localhost
+ """
+ ),
+ ],
+ "frequency": PER_ALWAYS,
+}
+
+__doc__ = get_meta_doc(meta)
def handle(name, cfg, cloud, log, _args):
@@ -62,6 +99,11 @@ def handle(name, cfg, cloud, log, _args):
hosts_fn = cloud.distro.hosts_fn
if util.translate_bool(manage_hosts, addons=["template"]):
+ if manage_hosts == "template":
+ log.warning(
+ "DEPRECATED: please use manage_etc_hosts: true instead of"
+ " 'template'"
+ )
(hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud)
if not hostname:
log.warning(
diff --git a/cloudinit/config/cc_update_hostname.py b/cloudinit/config/cc_update_hostname.py
index 09f6f6da..1042abf3 100644
--- a/cloudinit/config/cc_update_hostname.py
+++ b/cloudinit/config/cc_update_hostname.py
@@ -6,38 +6,76 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-"""
-Update Hostname
----------------
-**Summary:** update hostname and fqdn
+"""Update Hostname: Update hostname and fqdn"""
+
+import os
+from textwrap import dedent
+
+from cloudinit import util
+from cloudinit.config.schema import MetaSchema, get_meta_doc
+from cloudinit.settings import PER_ALWAYS
+MODULE_DESCRIPTION = """\
This module will update the system hostname and fqdn. If ``preserve_hostname``
-is set, then the hostname will not be altered.
+is set ``true``, then the hostname will not be altered.
.. note::
for instructions on specifying hostname and fqdn, see documentation for
``cc_set_hostname``
-
-**Internal name:** ``cc_update_hostname``
-
-**Module frequency:** always
-
-**Supported distros:** all
-
-**Config keys**::
-
- preserve_hostname: <true/false>
- prefer_fqdn_over_hostname: <true/false>
- fqdn: <fqdn>
- hostname: <fqdn/hostname>
"""
-import os
-
-from cloudinit import util
-from cloudinit.settings import PER_ALWAYS
-
-frequency = PER_ALWAYS
+distros = ["all"]
+
+meta: MetaSchema = {
+ "id": "cc_update_hostname",
+ "name": "Update Hostname",
+ "title": "Update hostname and fqdn",
+ "description": MODULE_DESCRIPTION,
+ "distros": distros,
+ "examples": [
+ dedent(
+ """\
+ # By default: when ``preserve_hostname`` is not specified cloud-init
+ # updates ``/etc/hostname`` per-boot based on the cloud provided
+ # ``local-hostname`` setting. If you manually change ``/etc/hostname``
+ # after boot cloud-init will no longer modify it.
+ #
+ # This default cloud-init behavior is equivalent to this cloud-config:
+ preserve_hostname: false
+ """
+ ),
+ dedent(
+ """\
+ # Prevent cloud-init from updating the system hostname.
+ preserve_hostname: true
+ """
+ ),
+ dedent(
+ """\
+ # Prevent cloud-init from updating ``/etc/hostname``
+ preserve_hostname: true
+ """
+ ),
+ dedent(
+ """\
+ # Set hostname to "external.fqdn.me" instead of "myhost"
+ fqdn: external.fqdn.me
+ hostname: myhost
+ prefer_fqdn_over_hostname: true
+ """
+ ),
+ dedent(
+ """\
+ # Set hostname to "external" instead of "external.fqdn.me" when
+ # cloud metadata provides the ``local-hostname``: "external.fqdn.me".
+ prefer_fqdn_over_hostname: false
+ """
+ ),
+ ],
+ "frequency": PER_ALWAYS,
+}
+
+__doc__ = get_meta_doc(meta)
def handle(name, cfg, cloud, log, _args):
diff --git a/cloudinit/config/cc_users_groups.py b/cloudinit/config/cc_users_groups.py
index ef77a799..96e63242 100644
--- a/cloudinit/config/cc_users_groups.py
+++ b/cloudinit/config/cc_users_groups.py
@@ -4,72 +4,48 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-"""
-Users and Groups
-----------------
-**Summary:** configure users and groups
+"Users and Groups: Configure users and groups"
+
+from textwrap import dedent
+
+from cloudinit import log as logging
+from cloudinit.config.schema import MetaSchema, get_meta_doc
+
+# Ensure this is aliased to a name not 'distros'
+# since the module attribute 'distros'
+# is a list of distros that are supported, not a sub-module
+from cloudinit.distros import ug_util
+from cloudinit.settings import PER_INSTANCE
+MODULE_DESCRIPTION = """\
This module configures users and groups. For more detailed information on user
-options, see the ``Including users and groups`` config example.
-
-Groups to add to the system can be specified as a list under the ``groups``
-key. Each entry in the list should either contain a the group name as a string,
-or a dictionary with the group name as the key and a list of users who should
-be members of the group as the value. **Note**: Groups are added before users,
-so any users in a group list must already exist on the system.
-
-The ``users`` config key takes a list of users to configure. The first entry in
-this list is used as the default user for the system. To preserve the standard
-default user for the distro, the string ``default`` may be used as the first
-entry of the ``users`` list. Each entry in the ``users`` list, other than a
-``default`` entry, should be a dictionary of options for the user. Supported
-config keys for an entry in ``users`` are as follows:
-
- - ``name``: The user's login name
- - ``expiredate``: Optional. Date on which the user's account will be
- disabled. Default: none
- - ``gecos``: Optional. Comment about the user, usually a comma-separated
- string of real name and contact information. Default: none
- - ``groups``: Optional. Additional groups to add the user to. Default: none
- - ``homedir``: Optional. Home dir for user. Default is ``/home/<username>``
- - ``inactive``: Optional. Number of days after a password expires until
- the account is permanently disabled. Default: none
- - ``lock_passwd``: Optional. Disable password login. Default: true
- - ``no_create_home``: Optional. Do not create home directory. Default:
- false
- - ``no_log_init``: Optional. Do not initialize lastlog and faillog for
- user. Default: false
- - ``no_user_group``: Optional. Do not create group named after user.
- Default: false
- - ``passwd``: Hash of user password
- - ``primary_group``: Optional. Primary group for user. Default to new group
- named after user.
- - ``selinux_user``: Optional. SELinux user for user's login. Default to
- default SELinux user.
- - ``shell``: Optional. The user's login shell. The default is to set no
- shell, which results in a system-specific default being used.
- - ``snapuser``: Optional. Specify an email address to create the user as
- a Snappy user through ``snap create-user``. If an Ubuntu SSO account is
- associated with the address, username and SSH keys will be requested from
- there. Default: none
- - ``ssh_authorized_keys``: Optional. List of SSH keys to add to user's
- authkeys file. Default: none. This key can not be combined with
- ``ssh_redirect_user``.
- - ``ssh_import_id``: Optional. SSH id to import for user. Default: none.
- This key can not be combined with ``ssh_redirect_user``.
- - ``ssh_redirect_user``: Optional. Boolean set to true to disable SSH
- logins for this user. When specified, all cloud meta-data public SSH
- keys will be set up in a disabled state for this username. Any SSH login
- as this username will timeout and prompt with a message to login instead
- as the configured <default_username> for this instance. Default: false.
- This key can not be combined with ``ssh_import_id`` or
- ``ssh_authorized_keys``.
- - ``sudo``: Optional. Sudo rule to use, list of sudo rules to use or False.
- Default: none. An absence of sudo key, or a value of none or false
- will result in no sudo rules being written for the user.
- - ``system``: Optional. Create user as system user with no home directory.
- Default: false
- - ``uid``: Optional. The user's ID. Default: The next available value.
+options, see the :ref:`Including users and groups<yaml_examples>` config
+example.
+
+Groups to add to the system can be specified under the ``groups`` key as
+a string of comma-separated groups to create, or a list. Each item in
+the list should either contain a string of a single group to create,
+or a dictionary with the group name as the key and string of a single user as
+a member of that group or a list of users who should be members of the group.
+
+.. note::
+ Groups are added before users, so any users in a group list must
+ already exist on the system.
+
+Users to add can be specified as a string or list under the ``users`` key.
+Each entry in the list should either be a string or a dictionary. If a string
+is specified, that string can be comma-separated usernames to create or the
+reserved string ``default`` which represents the primary admin user used to
+access the system. The ``default`` user varies per distribution and is
+generally configured in ``/etc/cloud/cloud.cfg`` by the ``default_user`` key.
+
+Each ``users`` dictionary item must contain either a ``name`` or ``snapuser``
+key, otherwise it will be ignored. Omission of ``default`` as the first item
+in the ``users`` list skips creation the default user. If no ``users`` key is
+provided the default behavior is to create the default user via this config::
+
+ users:
+ - default
.. note::
Specifying a hash of a user's password with ``passwd`` is a security risk
@@ -85,68 +61,120 @@ config keys for an entry in ``users`` are as follows:
to already-existing users: ``plain_text_passwd``, ``hashed_passwd``,
``lock_passwd``, ``sudo``, ``ssh_authorized_keys``, ``ssh_redirect_user``.
-**Internal name:** ``cc_users_groups``
-
-**Module frequency:** per instance
-
-**Supported distros:** all
-
-**Config keys**::
-
- groups:
- - <group>: [<user>, <user>]
- - <group>
+The ``user`` key can be used to override the ``default_user`` configuration
+defined in ``/etc/cloud/cloud.cfg``. The ``user`` value should be a dictionary
+which supports the same config keys as the ``users`` dictionary items.
+"""
- users:
+meta: MetaSchema = {
+ "id": "cc_users_groups",
+ "name": "Users and Groups",
+ "title": "Configure users and groups",
+ "description": MODULE_DESCRIPTION,
+ "distros": ["all"],
+ "examples": [
+ dedent(
+ """\
+ # Add the ``default_user`` from /etc/cloud/cloud.cfg.
+ # This is also the default behavior of cloud-init when no `users` key
+ # is provided.
+ users:
+ - default
+ """
+ ),
+ dedent(
+ """\
+ # Add the 'admingroup' with members 'root' and 'sys' and an empty
+ # group cloud-users.
+ groups:
+ - admingroup: [root,sys]
+ - cloud-users
+ """
+ ),
+ dedent(
+ """\
+ # Skip creation of the <default> user and only create newsuper.
+ # Password-based login is rejected, but the github user TheRealFalcon
+ # and the launchpad user falcojr can SSH as newsuper. The default
+ # shell for newsuper is bash instead of system default.
+ users:
+ - name: newsuper
+ gecos: Big Stuff
+ groups: users, admin
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ shell: /bin/bash
+ lock_passwd: true
+ ssh_import_id:
+ - lp:falcojr
+ - gh:TheRealFalcon
+ """
+ ),
+ dedent(
+ """\
+ # On a system with SELinux enabled, add youruser and set the
+ # SELinux user to 'staff_u'. When omitted on SELinux, the system will
+ # select the configured default SELinux user.
+ users:
+ - default
+ - name: youruser
+ selinux_user: staff_u
+ """
+ ),
+ dedent(
+ """\
+ # To redirect a legacy username to the <default> user for a
+ # distribution, ssh_redirect_user will accept an SSH connection and
+ # emit a message telling the client to ssh as the <default> user.
+ # SSH clients will get the message:
+ users:
- default
- # User explicitly omitted from sudo permission; also default behavior.
- - name: <some_restricted_user>
+ - name: nosshlogins
+ ssh_redirect_user: true
+ """
+ ),
+ dedent(
+ """\
+ # Override any ``default_user`` config in /etc/cloud/cloud.cfg with
+ # supplemental config options.
+ # This config will make the default user to mynewdefault and change
+ # the user to not have sudo rights.
+ ssh_import_id: [chad.smith]
+ user:
+ name: mynewdefault
sudo: false
- - name: <username>
- expiredate: '<date>'
- gecos: <comment>
- groups: <additional groups>
- homedir: <home directory>
- inactive: '<number of days>'
- lock_passwd: <true/false>
- no_create_home: <true/false>
- no_log_init: <true/false>
- no_user_group: <true/false>
- passwd: <password>
- primary_group: <primary group>
- selinux_user: <selinux username>
- shell: <shell path>
- snapuser: <email>
- ssh_redirect_user: <true/false>
- ssh_authorized_keys:
- - <key>
- - <key>
- ssh_import_id: <id>
- sudo: <sudo config>
- system: <true/false>
- uid: <user id>
-"""
-
-from cloudinit import log as logging
+ """
+ ),
+ ],
+ "frequency": PER_INSTANCE,
+}
-# Ensure this is aliased to a name not 'distros'
-# since the module attribute 'distros'
-# is a list of distros that are supported, not a sub-module
-from cloudinit.distros import ug_util
-from cloudinit.settings import PER_INSTANCE
+__doc__ = get_meta_doc(meta)
LOG = logging.getLogger(__name__)
-frequency = PER_INSTANCE
+# NO_HOME and NEED_HOME are mutually exclusive options
+NO_HOME = ("no_create_home", "system")
+NEED_HOME = ("ssh_authorized_keys", "ssh_import_id", "ssh_redirect_user")
def handle(name, cfg, cloud, _log, _args):
(users, groups) = ug_util.normalize_users_groups(cfg, cloud.distro)
(default_user, _user_config) = ug_util.extract_default(users)
cloud_keys = cloud.get_public_ssh_keys() or []
+
for (name, members) in groups.items():
cloud.distro.create_group(name, members)
+
for (user, config) in users.items():
+
+ no_home = [key for key in NO_HOME if config.get(key)]
+ need_home = [key for key in NEED_HOME if config.get(key)]
+ if no_home and need_home:
+ raise ValueError(
+ f"Not creating user {user}. Key(s) {', '.join(need_home)}"
+ f" cannot be provided with {', '.join(no_home)}"
+ )
+
ssh_redirect_user = config.pop("ssh_redirect_user", False)
if ssh_redirect_user:
if "ssh_authorized_keys" in config or "ssh_import_id" in config:
@@ -173,6 +201,7 @@ def handle(name, cfg, cloud, _log, _args):
else:
config["ssh_redirect_user"] = default_user
config["cloud_public_ssh_keys"] = cloud_keys
+
cloud.distro.create_user(user, **config)
diff --git a/cloudinit/config/cc_write_files.py b/cloudinit/config/cc_write_files.py
index 37dae392..7cc7f854 100644
--- a/cloudinit/config/cc_write_files.py
+++ b/cloudinit/config/cc_write_files.py
@@ -12,41 +12,16 @@ from textwrap import dedent
from cloudinit import log as logging
from cloudinit import util
-from cloudinit.config.schema import (
- MetaSchema,
- get_meta_doc,
- validate_cloudconfig_schema,
-)
+from cloudinit.config.schema import MetaSchema, get_meta_doc
from cloudinit.settings import PER_INSTANCE
-frequency = PER_INSTANCE
-
DEFAULT_OWNER = "root:root"
DEFAULT_PERMS = 0o644
DEFAULT_DEFER = False
-UNKNOWN_ENC = "text/plain"
+TEXT_PLAIN_ENC = "text/plain"
LOG = logging.getLogger(__name__)
-distros = ["all"]
-
-# The schema definition for each cloud-config module is a strict contract for
-# describing supported configuration parameters for each cloud-config section.
-# It allows cloud-config to validate and alert users to invalid or ignored
-# configuration options before actually attempting to deploy with said
-# configuration.
-
-supported_encoding_types = [
- "gz",
- "gzip",
- "gz+base64",
- "gzip+base64",
- "gz+b64",
- "gzip+b64",
- "b64",
- "base64",
-]
-
meta: MetaSchema = {
"id": "cc_write_files",
"name": "Write Files",
@@ -70,7 +45,7 @@ meta: MetaSchema = {
the early boot process. Use /run/somedir instead to avoid race
LP:1707222."""
),
- "distros": distros,
+ "distros": ["all"],
"examples": [
dedent(
"""\
@@ -132,113 +107,13 @@ meta: MetaSchema = {
"""
),
],
- "frequency": frequency,
-}
-
-schema = {
- "type": "object",
- "properties": {
- "write_files": {
- "type": "array",
- "items": {
- "type": "object",
- "properties": {
- "path": {
- "type": "string",
- "description": dedent(
- """\
- Path of the file to which ``content`` is decoded
- and written
- """
- ),
- },
- "content": {
- "type": "string",
- "default": "",
- "description": dedent(
- """\
- Optional content to write to the provided ``path``.
- When content is present and encoding is not '%s',
- decode the content prior to writing. Default:
- **''**
- """
- % UNKNOWN_ENC
- ),
- },
- "owner": {
- "type": "string",
- "default": DEFAULT_OWNER,
- "description": dedent(
- """\
- Optional owner:group to chown on the file. Default:
- **{owner}**
- """.format(
- owner=DEFAULT_OWNER
- )
- ),
- },
- "permissions": {
- "type": "string",
- "default": oct(DEFAULT_PERMS).replace("o", ""),
- "description": dedent(
- """\
- Optional file permissions to set on ``path``
- represented as an octal string '0###'. Default:
- **'{perms}'**
- """.format(
- perms=oct(DEFAULT_PERMS).replace("o", "")
- )
- ),
- },
- "encoding": {
- "type": "string",
- "default": UNKNOWN_ENC,
- "enum": supported_encoding_types,
- "description": dedent(
- """\
- Optional encoding type of the content. Default is
- **text/plain** and no content decoding is
- performed. Supported encoding types are:
- %s."""
- % ", ".join(supported_encoding_types)
- ),
- },
- "append": {
- "type": "boolean",
- "default": False,
- "description": dedent(
- """\
- Whether to append ``content`` to existing file if
- ``path`` exists. Default: **false**.
- """
- ),
- },
- "defer": {
- "type": "boolean",
- "default": DEFAULT_DEFER,
- "description": dedent(
- """\
- Defer writing the file until 'final' stage, after
- users were created, and packages were installed.
- Default: **{defer}**.
- """.format(
- defer=DEFAULT_DEFER
- )
- ),
- },
- },
- "required": ["path"],
- "additionalProperties": False,
- },
- }
- },
+ "frequency": PER_INSTANCE,
}
-__doc__ = get_meta_doc(meta, schema) # Supplement python help()
+__doc__ = get_meta_doc(meta)
def handle(name, cfg, _cloud, log, _args):
- validate_cloudconfig_schema(cfg, schema)
file_list = cfg.get("write_files", [])
filtered_files = [
f
@@ -266,14 +141,18 @@ def canonicalize_extraction(encoding_type):
# Yaml already encodes binary data as base64 if it is given to the
# yaml file as binary, so those will be automatically decoded for you.
# But the above b64 is just for people that are more 'comfortable'
- # specifing it manually (which might be a possiblity)
+ # specifing it manually (which might be a possibility)
if encoding_type in ["b64", "base64"]:
return ["application/base64"]
+ if encoding_type == TEXT_PLAIN_ENC:
+ return [TEXT_PLAIN_ENC]
if encoding_type:
LOG.warning(
- "Unknown encoding type %s, assuming %s", encoding_type, UNKNOWN_ENC
+ "Unknown encoding type %s, assuming %s",
+ encoding_type,
+ TEXT_PLAIN_ENC,
)
- return [UNKNOWN_ENC]
+ return [TEXT_PLAIN_ENC]
def write_files(name, files):
@@ -327,7 +206,7 @@ def extract_contents(contents, extraction_types):
result = util.decomp_gzip(result, quiet=False, decode=False)
elif t == "application/base64":
result = base64.b64decode(result)
- elif t == UNKNOWN_ENC:
+ elif t == TEXT_PLAIN_ENC:
pass
return result
diff --git a/cloudinit/config/cc_write_files_deferred.py b/cloudinit/config/cc_write_files_deferred.py
index 1294628c..dbbe90f6 100644
--- a/cloudinit/config/cc_write_files_deferred.py
+++ b/cloudinit/config/cc_write_files_deferred.py
@@ -2,41 +2,38 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-"""Defer writing certain files"""
+"""Write Files Deferred: Defer writing certain files"""
from cloudinit import util
-from cloudinit.config.cc_write_files import DEFAULT_DEFER
-from cloudinit.config.cc_write_files import schema as write_files_schema
-from cloudinit.config.cc_write_files import write_files
-from cloudinit.config.schema import validate_cloudconfig_schema
-
-# meta is not used in this module, but it remains as code documentation
-#
-# id: cc_write_files_deferred'
-# name: 'Write Deferred Files
-# distros: ['all'],
-# frequency: PER_INSTANCE,
-# title:
-# write certain files, whose creation as been deferred, during
-# final stage
-# description:
-# This module is based on `'Write Files' <write-files>`__, and
-# will handle all files from the write_files list, that have been
-# marked as deferred and thus are not being processed by the
-# write-files module.
-#
-# *Please note that his module is not exposed to the user through
-# its own dedicated top-level directive.*
-
-schema = write_files_schema
-
-
-# Not exposed, because related modules should document this behaviour
-__doc__ = None
+from cloudinit.config.cc_write_files import DEFAULT_DEFER, write_files
+from cloudinit.config.schema import MetaSchema
+from cloudinit.distros import ALL_DISTROS
+from cloudinit.settings import PER_INSTANCE
+
+MODULE_DESCRIPTION = """\
+This module is based on `'Write Files' <write-files>`__, and
+will handle all files from the write_files list, that have been
+marked as deferred and thus are not being processed by the
+write-files module.
+
+*Please note that his module is not exposed to the user through
+its own dedicated top-level directive.*
+"""
+meta: MetaSchema = {
+ "id": "cc_write_files_deferred",
+ "name": "Write Files Deferred",
+ "title": "Defer writing certain files",
+ "description": __doc__,
+ "distros": [ALL_DISTROS],
+ "frequency": PER_INSTANCE,
+ "examples": [],
+}
+
+# This module is undocumented in our schema docs
+__doc__ = ""
def handle(name, cfg, _cloud, log, _args):
- validate_cloudconfig_schema(cfg, schema)
file_list = cfg.get("write_files", [])
filtered_files = [
f
@@ -51,6 +48,3 @@ def handle(name, cfg, _cloud, log, _args):
)
return
write_files(name, filtered_files)
-
-
-# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_yum_add_repo.py b/cloudinit/config/cc_yum_add_repo.py
index 7a232689..f7357192 100644
--- a/cloudinit/config/cc_yum_add_repo.py
+++ b/cloudinit/config/cc_yum_add_repo.py
@@ -4,38 +4,23 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-"""
-Yum Add Repo
-------------
-**Summary:** add yum repository configuration to the system
-
-Add yum repository configuration to ``/etc/yum.repos.d``. Configuration files
-are named based on the dictionary key under the ``yum_repos`` they are
-specified with. If a config file already exists with the same name as a config
-entry, the config entry will be skipped.
-
-**Internal name:** ``cc_yum_add_repo``
-
-**Module frequency:** always
-
-**Supported distros:** almalinux, centos, cloudlinux, eurolinux, fedora,
- miraclelinux, openEuler, photon, rhel, rocky, virtuozzo
-
-**Config keys**::
-
- yum_repos:
- <repo-name>:
- baseurl: <repo url>
- name: <repo name>
- enabled: <true/false>
- # any repository configuration options (see man yum.conf)
-"""
+"Yum Add Repo: Add yum repository configuration to the system"
import io
import os
from configparser import ConfigParser
+from textwrap import dedent
from cloudinit import util
+from cloudinit.config.schema import MetaSchema, get_meta_doc
+from cloudinit.settings import PER_INSTANCE
+
+MODULE_DESCRIPTION = """\
+Add yum repository configuration to ``/etc/yum.repos.d``. Configuration files
+are named based on the opaque dictionary key under the ``yum_repos`` they are
+specified with. If a config file already exists with the same name as a config
+entry, the config entry will be skipped.
+"""
distros = [
"almalinux",
@@ -50,6 +35,87 @@ distros = [
"virtuozzo",
]
+COPR_BASEURL = (
+ "https://download.copr.fedorainfracloud.org/results/@cloud-init/"
+ "cloud-init-dev/epel-8-$basearch/"
+)
+COPR_GPG_URL = (
+ "https://download.copr.fedorainfracloud.org/results/@cloud-init/"
+ "cloud-init-dev/pubkey.gpg"
+)
+EPEL_TESTING_BASEURL = (
+ "https://download.copr.fedorainfracloud.org/results/@cloud-init/"
+ "cloud-init-dev/pubkey.gpg"
+)
+
+meta: MetaSchema = {
+ "id": "cc_yum_add_repo",
+ "name": "Yum Add Repo",
+ "title": "Add yum repository configuration to the system",
+ "description": MODULE_DESCRIPTION,
+ "distros": distros,
+ "examples": [
+ dedent(
+ """\
+ yum_repos:
+ my_repo:
+ baseurl: http://blah.org/pub/epel/testing/5/$basearch/
+ yum_repo_dir: /store/custom/yum.repos.d
+ """
+ ),
+ dedent(
+ f"""\
+ # Enable cloud-init upstream's daily testing repo for EPEL 8 to
+ # install latest cloud-init from tip of `main` for testing.
+ yum_repos:
+ cloud-init-daily:
+ name: Copr repo for cloud-init-dev owned by @cloud-init
+ baseurl: {COPR_BASEURL}
+ type: rpm-md
+ skip_if_unavailable: true
+ gpgcheck: true
+ gpgkey: {COPR_GPG_URL}
+ enabled_metadata: 1
+ """
+ ),
+ dedent(
+ f"""\
+ # Add the file /etc/yum.repos.d/epel_testing.repo which can then
+ # subsequently be used by yum for later operations.
+ yum_repos:
+ # The name of the repository
+ epel-testing:
+ baseurl: {EPEL_TESTING_BASEURL}
+ enabled: false
+ failovermethod: priority
+ gpgcheck: true
+ gpgkey: file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL
+ name: Extra Packages for Enterprise Linux 5 - Testing
+ """
+ ),
+ dedent(
+ """\
+ # Any yum repo configuration can be passed directly into
+ # the repository file created. See: man yum.conf for supported
+ # config keys.
+ #
+ # Write /etc/yum.conf.d/my_package_stream.repo with gpgkey checks
+ # on the repo data of the repositoy enabled.
+ yum_repos:
+ my package stream:
+ baseurl: http://blah.org/pub/epel/testing/5/$basearch/
+ mirrorlist: http://some-url-to-list-of-baseurls
+ repo_gpgcheck: 1
+ enable_gpgcheck: true
+ gpgkey: https://url.to.ascii-armored-gpg-key
+ """
+ ),
+ ],
+ "frequency": PER_INSTANCE,
+}
+
+__doc__ = get_meta_doc(meta)
+
def _canonicalize_id(repo_id):
repo_id = repo_id.lower().replace("-", "_")
diff --git a/cloudinit/config/cc_zypper_add_repo.py b/cloudinit/config/cc_zypper_add_repo.py
index be444cce..9b682bc6 100644
--- a/cloudinit/config/cc_zypper_add_repo.py
+++ b/cloudinit/config/cc_zypper_add_repo.py
@@ -3,7 +3,7 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-"""zypper_add_repo: Add zyper repositories to the system"""
+"""zypper_add_repo: Add zypper repositories to the system"""
import os
from textwrap import dedent
@@ -16,22 +16,25 @@ from cloudinit.config.schema import MetaSchema, get_meta_doc
from cloudinit.settings import PER_ALWAYS
distros = ["opensuse", "sles"]
-
+MODULE_DESCRIPTION = """\
+Zypper behavior can be configured using the ``config`` key, which will modify
+``/etc/zypp/zypp.conf``. The configuration writer will only append the
+provided configuration options to the configuration file. Any duplicate
+options will be resolved by the way the zypp.conf INI file is parsed.
+
+.. note::
+ Setting ``configdir`` is not supported and will be skipped.
+
+The ``repos`` key may be used to add repositories to the system. Beyond the
+required ``id`` and ``baseurl`` attributions, no validation is performed
+on the ``repos`` entries. It is assumed the user is familiar with the
+zypper repository file format.
+"""
meta: MetaSchema = {
"id": "cc_zypper_add_repo",
- "name": "ZypperAddRepo",
+ "name": "Zypper Add Repo",
"title": "Configure zypper behavior and add zypper repositories",
- "description": dedent(
- """\
- Configure zypper behavior by modifying /etc/zypp/zypp.conf. The
- configuration writer is "dumb" and will simply append the provided
- configuration options to the configuration file. Option settings
- that may be duplicate will be resolved by the way the zypp.conf file
- is parsed. The file is in INI format.
- Add repositories to the system. No validation is performed on the
- repository file entries, it is assumed the user is familiar with
- the zypper repository file format."""
- ),
+ "description": MODULE_DESCRIPTION,
"distros": distros,
"examples": [
dedent(
@@ -60,53 +63,7 @@ meta: MetaSchema = {
"frequency": PER_ALWAYS,
}
-schema = {
- "type": "object",
- "properties": {
- "zypper": {
- "type": "object",
- "properties": {
- "repos": {
- "type": "array",
- "items": {
- "type": "object",
- "properties": {
- "id": {
- "type": "string",
- "description": dedent(
- """\
- The unique id of the repo, used when
- writing
- /etc/zypp/repos.d/<id>.repo."""
- ),
- },
- "baseurl": {
- "type": "string",
- "format": "uri", # built-in format type
- "description": "The base repositoy URL",
- },
- },
- "required": ["id", "baseurl"],
- "additionalProperties": True,
- },
- "minItems": 1,
- },
- "config": {
- "type": "object",
- "description": dedent(
- """\
- Any supported zypo.conf key is written to
- /etc/zypp/zypp.conf'"""
- ),
- },
- },
- "minProperties": 1, # Either config or repo must be provided
- "additionalProperties": False, # only repos and config allowed
- }
- },
-}
-
-__doc__ = get_meta_doc(meta, schema) # Supplement python help()
+__doc__ = get_meta_doc(meta)
LOG = logging.getLogger(__name__)
diff --git a/cloudinit/config/cloud-init-schema.json b/cloudinit/config/cloud-init-schema.json
deleted file mode 100644
index 2d43d06a..00000000
--- a/cloudinit/config/cloud-init-schema.json
+++ /dev/null
@@ -1,560 +0,0 @@
-{
- "$schema": "http://json-schema.org/draft-04/schema#",
- "$defs": {
- "apt_configure.mirror": {
- "type": "array",
- "items": {
- "type": "object",
- "additionalProperties": false,
- "required": ["arches"],
- "properties": {
- "arches": {
- "type": "array",
- "items": {"type": "string"},
- "minItems": 1
- },
- "uri": {"type": "string", "format": "uri"},
- "search": {
- "type": "array",
- "items": {"type": "string", "format": "uri"},
- "minItems": 1
- },
- "search_dns": {
- "type": "boolean"
- },
- "keyid": {"type": "string"},
- "key": {"type": "string"},
- "keyserver": {"type": "string"}
- }
- },
- "minItems": 1
- },
- "ca_certs.properties": {
- "type": "object",
- "properties": {
- "remove-defaults": {
- "description": "Deprecated key name. Use remove_defaults instead.",
- "type": "boolean",
- "default": false
- },
- "remove_defaults": {
- "description": "Remove default CA certificates if true. Default: false",
- "type": "boolean",
- "default": false
- },
- "trusted": {
- "description": "List of trusted CA certificates to add.",
- "type": "array",
- "items": {"type": "string"},
- "minItems": 1
- }
- },
- "additionalProperties": false,
- "minProperties": 1
- },
- "cc_apk_configure": {
- "type": "object",
- "properties": {
- "apk_repos": {
- "type": "object",
- "properties": {
- "preserve_repositories": {
- "type": "boolean",
- "default": false,
- "description": "By default, cloud-init will generate a new repositories file ``/etc/apk/repositories`` based on any valid configuration settings specified within a apk_repos section of cloud config. To disable this behavior and preserve the repositories file from the pristine image, set ``preserve_repositories`` to ``true``.\n\n The ``preserve_repositories`` option overrides all other config keys that would alter ``/etc/apk/repositories``."
- },
- "alpine_repo": {
- "type": ["object", "null"],
- "properties": {
- "base_url": {
- "type": "string",
- "default": "https://alpine.global.ssl.fastly.net/alpine",
- "description": "The base URL of an Alpine repository, or mirror, to download official packages from. If not specified then it defaults to ``https://alpine.global.ssl.fastly.net/alpine``"
- },
- "community_enabled": {
- "type": "boolean",
- "default": false,
- "description": "Whether to add the Community repo to the repositories file. By default the Community repo is not included."
- },
- "testing_enabled": {
- "type": "boolean",
- "default": false,
- "description": "Whether to add the Testing repo to the repositories file. By default the Testing repo is not included. It is only recommended to use the Testing repo on a machine running the ``Edge`` version of Alpine as packages installed from Testing may have dependencies that conflict with those in non-Edge Main or Community repos."
- },
- "version": {
- "type": "string",
- "description": "The Alpine version to use (e.g. ``v3.12`` or ``edge``)"
- }
- },
- "required": ["version"],
- "minProperties": 1,
- "additionalProperties": false
- },
- "local_repo_base_url": {
- "type": "string",
- "description": "The base URL of an Alpine repository containing unofficial packages"
- }
- },
- "minProperties": 1,
- "additionalProperties": false
- }
- }
- },
- "cc_apt_configure": {
- "properties": {
- "apt": {
- "type": "object",
- "additionalProperties": false,
- "minProperties": 1,
- "properties": {
- "preserve_sources_list": {
- "type": "boolean",
- "default": false,
- "description": "By default, cloud-init will generate a new sources list in ``/etc/apt/sources.list.d`` based on any changes specified in cloud config. To disable this behavior and preserve the sources list from the pristine image, set ``preserve_sources_list`` to ``true``.\n\nThe ``preserve_sources_list`` option overrides all other config keys that would alter ``sources.list`` or ``sources.list.d``, **except** for additional sources to be added to ``sources.list.d``."
- },
- "disable_suites": {
- "type": "array",
- "items": {"type": "string"},
- "minItems": 1,
- "uniqueItems": true,
- "description": "Entries in the sources list can be disabled using ``disable_suites``, which takes a list of suites to be disabled. If the string ``$RELEASE`` is present in a suite in the ``disable_suites`` list, it will be replaced with the release name. If a suite specified in ``disable_suites`` is not present in ``sources.list`` it will be ignored. For convenience, several aliases are provided for`` disable_suites``:\n\n - ``updates`` => ``$RELEASE-updates``\n - ``backports`` => ``$RELEASE-backports``\n - ``security`` => ``$RELEASE-security``\n - ``proposed`` => ``$RELEASE-proposed``\n - ``release`` => ``$RELEASE``.\n\nWhen a suite is disabled using ``disable_suites``, its entry in ``sources.list`` is not deleted; it is just commented out."
- },
- "primary": {
- "$ref": "#/$defs/apt_configure.mirror",
- "description": "The primary and security archive mirrors can be specified using the ``primary`` and ``security`` keys, respectively. Both the ``primary`` and ``security`` keys take a list of configs, allowing mirrors to be specified on a per-architecture basis. Each config is a dictionary which must have an entry for ``arches``, specifying which architectures that config entry is for. The keyword ``default`` applies to any architecture not explicitly listed. The mirror url can be specified with the ``uri`` key, or a list of mirrors to check can be provided in order, with the first mirror that can be resolved being selected. This allows the same configuration to be used in different environment, with different hosts used for a local APT mirror. If no mirror is provided by ``uri`` or ``search``, ``search_dns`` may be used to search for dns names in the format ``<distro>-mirror`` in each of the following:\n\n - fqdn of this host per cloud metadata,\n - localdomain,\n - domains listed in ``/etc/resolv.conf``.\n\nIf there is a dns entry for ``<distro>-mirror``, then it is assumed that there is a distro mirror at ``http://<distro>-mirror.<domain>/<distro>``. If the ``primary`` key is defined, but not the ``security`` key, then then configuration for ``primary`` is also used for ``security``. If ``search_dns`` is used for the ``security`` key, the search pattern will be ``<distro>-security-mirror``.\n\nEach mirror may also specify a key to import via any of the following optional keys:\n\n - ``keyid``: a key to import via shortid or fingerprint.\n - ``key``: a raw PGP key.\n - ``keyserver``: alternate keyserver to pull ``keyid`` key from.\n\nIf no mirrors are specified, or all lookups fail, then default mirrors defined in the datasource are used. If none are present in the datasource either the following defaults are used:\n\n - ``primary`` => ``http://archive.ubuntu.com/ubuntu``.\n - ``security`` => ``http://security.ubuntu.com/ubuntu``"
- },
- "security": {
- "$ref": "#/$defs/apt_configure.mirror",
- "description": "Please refer to the primary config documentation"
- },
- "add_apt_repo_match": {
- "type": "string",
- "default": "^[\\w-]+:\\w",
- "description": "All source entries in ``apt-sources`` that match regex in ``add_apt_repo_match`` will be added to the system using ``add-apt-repository``. If ``add_apt_repo_match`` is not specified, it defaults to ``^[\\w-]+:\\w``"
- },
- "debconf_selections": {
- "type": "object",
- "minProperties": 1,
- "patternProperties": {
- "^.+$": {
- "type": "string"
- }
- },
- "description": "Debconf additional configurations can be specified as a dictionary under the ``debconf_selections`` config key, with each key in the dict representing a different set of configurations. The value of each key must be a string containing all the debconf configurations that must be applied. We will bundle all of the values and pass them to ``debconf-set-selections``. Therefore, each value line must be a valid entry for ``debconf-set-selections``, meaning that they must possess for distinct fields:\n\n``pkgname question type answer``\n\nWhere:\n\n - ``pkgname`` is the name of the package.\n - ``question`` the name of the questions.\n - ``type`` is the type of question.\n - ``answer`` is the value used to answer the question.\n\nFor example: ``ippackage ippackage/ip string 127.0.01``"
- },
- "sources_list": {
- "type": "string",
- "description": "Specifies a custom template for rendering ``sources.list`` . If no ``sources_list`` template is given, cloud-init will use sane default. Within this template, the following strings will be replaced with the appropriate values:\n\n - ``$MIRROR``\n - ``$RELEASE``\n - ``$PRIMARY``\n - ``$SECURITY``\n - ``$KEY_FILE``"
- },
- "conf": {
- "type": "string",
- "description": "Specify configuration for apt, such as proxy configuration. This configuration is specified as a string. For multiline APT configuration, make sure to follow yaml syntax."
- },
- "https_proxy": {
- "type": "string",
- "description": "More convenient way to specify https APT proxy. https proxy url is specified in the format ``https://[[user][:pass]@]host[:port]/``."
- },
- "http_proxy": {
- "type": "string",
- "description": "More convenient way to specify http APT proxy. http proxy url is specified in the format ``http://[[user][:pass]@]host[:port]/``."
- },
- "proxy": {
- "type": "string",
- "description": "Alias for defining a http APT proxy."
- },
- "ftp_proxy": {
- "type": "string",
- "description": "More convenient way to specify ftp APT proxy. ftp proxy url is specified in the format ``ftp://[[user][:pass]@]host[:port]/``."
- },
- "sources": {
- "type": "object",
- "patternProperties": {
- "^.+$": {
- "type": "object",
- "properties": {
- "source": {
- "type": "string"
- },
- "keyid": {
- "type": "string"
- },
- "key": {
- "type": "string"
- },
- "keyserver": {
- "type": "string"
- },
- "filename": {
- "type": "string"
- }
- },
- "additionalProperties": false,
- "minProperties": 1
- }
- },
- "description": "Source list entries can be specified as a dictionary under the ``sources`` config key, with each key in the dict representing a different source file. The key of each source entry will be used as an id that can be referenced in other config entries, as well as the filename for the source's configuration under ``/etc/apt/sources.list.d``. If the name does not end with ``.list``, it will be appended. If there is no configuration for a key in ``sources``, no file will be written, but the key may still be referred to as an id in other ``sources`` entries.\n\nEach entry under ``sources`` is a dictionary which may contain any of the following optional keys:\n - ``source``: a sources.list entry (some variable replacements apply).\n - ``keyid``: a key to import via shortid or fingerprint.\n - ``key``: a raw PGP key.\n - ``keyserver``: alternate keyserver to pull ``keyid`` key from.\n - ``filename``: specify the name of the list file\n\nThe ``source`` key supports variable replacements for the following strings:\n\n - ``$MIRROR``\n - ``$PRIMARY``\n - ``$SECURITY``\n - ``$RELEASE``\n - ``$KEY_FILE``"
- }
- }
- }
- }
- },
- "cc_apt_pipelining": {
- "type": "object",
- "properties": {
- "apt_pipelining": {
- "oneOf": [
- {"type": "integer"},
- {"type": "boolean"},
- {"type": "string", "enum": ["none", "unchanged", "os"]}
- ]
- }
- }
- },
- "cc_bootcmd": {
- "type": "object",
- "properties": {
- "bootcmd": {
- "type": "array",
- "items": {
- "oneOf": [
- {"type": "array", "items": {"type": "string"}},
- {"type": "string"}
- ]
- },
- "additionalItems": false,
- "minItems": 1
- }
- }
- },
- "cc_byobu": {
- "type": "object",
- "properties": {
- "byobu_by_default": {
- "type": "string",
- "enum": [
- "enable-system",
- "enable-user",
- "disable-system",
- "disable-user",
- "enable",
- "disable",
- "user",
- "system"
- ]
- }
- }
- },
- "cc_ca_certs": {
- "type": "object",
- "properties": {
- "ca_certs": {
- "$ref": "#/$defs/ca_certs.properties"
- },
- "ca-certs": {
- "$ref": "#/$defs/ca_certs.properties"
- }
- }
- },
- "cc_chef": {
- "type": "object",
- "properties": {
- "chef": {
- "type": "object",
- "additionalProperties": false,
- "minProperties": 1,
- "properties": {
- "directories": {
- "type": "array",
- "items": {"type": "string"},
- "minItems": 1,
- "uniqueItems": true,
- "description": "Create the necessary directories for chef to run. By default, it creates the following directories:\n\n - ``/etc/chef``\n - ``/var/log/chef``\n - ``/var/lib/chef``\n - ``/var/cache/chef``\n - ``/var/backups/chef``\n - ``/var/run/chef``"
- },
- "validation_cert": {
- "type": "string",
- "description": "Optional string to be written to file validation_key. Special value ``system`` means set use existing file."
- },
- "validation_key": {
- "type": "string",
- "default": "/etc/chef/validation.pem",
- "description": "Optional path for validation_cert. default to ``/etc/chef/validation.pem``"
- },
- "firstboot_path": {
- "type": "string",
- "default": "/etc/chef/firstboot.json",
- "description": "Path to write run_list and initial_attributes keys that should also be present in this configuration, defaults to ``/etc/chef/firstboot.json``"
- },
- "exec": {
- "type": "boolean",
- "default": false,
- "description": "Set true if we should run or not run chef (defaults to false, unless a gem installed is requested where this will then default to true)."
- },
- "client_key": {
- "type": "string",
- "default": "/etc/chef/client.pem",
- "description": "Optional path for client_cert. Default to ``/etc/chef/client.pem``."
- },
- "encrypted_data_bag_secret": {
- "type": "string",
- "default": null,
- "description": "Specifies the location of the secret key used by chef to encrypt data items. By default, this path is set to null, meaning that chef will have to look at the path ``/etc/chef/encrypted_data_bag_secret`` for it."
- },
- "environment": {
- "type": "string",
- "default": "_default",
- "description": "Specifies which environment chef will use. By default, it will use the ``_default`` configuration."
- },
- "file_backup_path": {
- "type": "string",
- "default": "/var/backups/chef",
- "description": "Specifies the location in which backup files are stored. By default, it uses the ``/var/backups/chef`` location."
- },
- "file_cache_path": {
- "type": "string",
- "default": "/var/cache/chef",
- "description": "Specifies the location in which chef cache files will be saved. By default, it uses the ``/var/cache/chef`` location."
- },
- "json_attribs": {
- "type": "string",
- "default": "/etc/chef/firstboot.json",
- "description": "Specifies the location in which some chef json data is stored. By default, it uses the ``/etc/chef/firstboot.json`` location."
- },
- "log_level": {
- "type": "string",
- "default": ":info",
- "description": "Defines the level of logging to be stored in the log file. By default this value is set to ``:info``."
- },
- "log_location": {
- "type": "string",
- "default": "/var/log/chef/client.log",
- "description": "Specifies the location of the chef lof file. By default, the location is specified at ``/var/log/chef/client.log``."
- },
- "node_name": {
- "type": "string",
- "description": "The name of the node to run. By default, we will use th instance id as the node name."
- },
- "omnibus_url": {
- "type": "string",
- "default": "https://www.chef.io/chef/install.sh",
- "description": "Omnibus URL if chef should be installed through Omnibus. By default, it uses the ``https://www.chef.io/chef/install.sh``."
- },
- "omnibus_url_retries": {
- "type": "integer",
- "default": 5,
- "description": "The number of retries that will be attempted to reach the Omnibus URL. Default is 5."
- },
- "omnibus_version": {
- "type": "string",
- "description": "Optional version string to require for omnibus install."
- },
- "pid_file": {
- "type": "string",
- "default": "/var/run/chef/client.pid",
- "description": "The location in which a process identification number (pid) is saved. By default, it saves in the ``/var/run/chef/client.pid`` location."
- },
- "server_url": {
- "type": "string",
- "description": "The URL for the chef server"
- },
- "show_time": {
- "type": "boolean",
- "default": true,
- "description": "Show time in chef logs"
- },
- "ssl_verify_mode": {
- "type": "string",
- "default": ":verify_none",
- "description": "Set the verify mode for HTTPS requests. We can have two possible values for this parameter:\n\n - ``:verify_none``: No validation of SSL certificates.\n - ``:verify_peer``: Validate all SSL certificates.\n\nBy default, the parameter is set as ``:verify_none``."
- },
- "validation_name": {
- "type": "string",
- "description": "The name of the chef-validator key that Chef Infra Client uses to access the Chef Infra Server during the initial Chef Infra Client run."
- },
- "force_install": {
- "type": "boolean",
- "default": false,
- "description": "If set to ``true``, forces chef installation, even if it is already installed."
- },
- "initial_attributes": {
- "type": "object",
- "items": {"type": "string"},
- "description": "Specify a list of initial attributes used by the cookbooks."
- },
- "install_type": {
- "type": "string",
- "default": "packages",
- "enum": [
- "packages",
- "gems",
- "omnibus"
- ],
- "description": "The type of installation for chef. It can be one of the following values:\n\n - ``packages``\n - ``gems``\n - ``omnibus``"
- },
- "run_list": {
- "type": "array",
- "items": {"type": "string"},
- "description": "A run list for a first boot json."
- },
- "chef_license": {
- "type": "string",
- "description": "string that indicates if user accepts or not license related to some of chef products"
- }
- }
- }
- }
- },
- "cc_debug": {
- "type": "object",
- "properties": {
- "debug": {
- "additionalProperties": false,
- "minProperties": 1,
- "type": "object",
- "properties": {
- "verbose": {
- "description": "Should always be true for this module",
- "type": "boolean"
- },
- "output": {
- "description": "Location to write output. Defaults to console + log",
- "type": "string"
- }
- }
- }
- }
- },
- "cc_disable_ec2_metadata": {
- "type": "object",
- "properties": {
- "disable_ec2_metadata": {
- "default": false,
- "description": "Set true to disable IPv4 routes to EC2 metadata. Default: false.",
- "type": "boolean"
- }
- }
- },
- "cc_disk_setup": {
- "type": "object",
- "properties": {
- "device_aliases": {
- "type": "object",
- "patternProperties": {
- "^.+$": {
- "label": "<alias_name>",
- "type": "string",
- "description": "Path to disk to be aliased by this name."
- }
- }
- },
- "disk_setup": {
- "type": "object",
- "patternProperties": {
- "^.+$": {
- "label": "<alias name/path>",
- "type": "object",
- "additionalProperties": false,
- "properties": {
- "table_type": {
- "type": "string",
- "default": "mbr",
- "enum": ["mbr", "gpt"],
- "description": "Specifies the partition table type, either ``mbr`` or ``gpt``. Default: ``mbr``."
- },
- "layout": {
- "type": ["string", "boolean", "array"],
- "default": false,
- "oneOf": [
- {"type": "string", "enum": ["remove"]},
- {"type": "boolean"},
- {
- "type": "array",
- "items": {
- "oneOf": [
- {"type": "integer"},
- {
- "type": "array",
- "items": {"type": "integer"},
- "minItems": 2,
- "maxItems": 2
- }
- ]
- }
- }
- ],
- "description": "If set to ``true``, a single partition using all the space on the device will be created. If set to ``false``, no partitions will be created. If set to ``remove``, any existing partition table will be purged. Partitions can be specified by providing a list to ``layout``, where each entry in the list is either a size or a list containing a size and the numerical value for a partition type. The size for partitions is specified in **percentage** of disk space, not in bytes (e.g. a size of 33 would take up 1/3 of the disk space). Default: ``false``."
- },
- "overwrite": {
- "type": "boolean",
- "default": false,
- "description": "Controls whether this module tries to be safe about writing partition tables or not. If ``overwrite: false`` is set, the device will be checked for a partition table and for a file system and if either is found, the operation will be skipped. If ``overwrite: true`` is set, no checks will be performed. Using ``overwrite: true`` is **dangerous** and can lead to data loss, so double check that the correct device has been specified if using this option. Default: ``false``"
- }
- }
- }
- }
- },
- "fs_setup": {
- "type": "array",
- "items": {
- "type": "object",
- "additionalProperties": false,
- "properties": {
- "label": {
- "type": "string",
- "description": "Label for the filesystem."
- },
- "filesystem": {
- "type": "string",
- "description": "Filesystem type to create. E.g., ``ext4`` or ``btrfs``"
- },
- "device": {
- "type": "string",
- "description": "Specified either as a path or as an alias in the format ``<alias name>.<y>`` where ``<y>`` denotes the partition number on the device. If specifying device using the ``<device name>.<partition number>`` format, the value of ``partition`` will be overwritten."
- },
- "partition": {
- "type": ["string", "integer"],
- "oneOf": [
- {
- "type": "string",
- "enum": ["auto", "any", "none"]
- },
- {"type": "integer"}
- ],
- "description": "The partition can be specified by setting ``partition`` to the desired partition number. The ``partition`` option may also be set to ``auto``, in which this module will search for the existence of a filesystem matching the ``label``, ``type`` and ``device`` of the ``fs_setup`` entry and will skip creating the filesystem if one is found. The ``partition`` option may also be set to ``any``, in which case any file system that matches ``type`` and ``device`` will cause this module to skip filesystem creation for the ``fs_setup`` entry, regardless of ``label`` matching or not. To write a filesystem directly to a device, use ``partition: none``. ``partition: none`` will **always** write the filesystem, even when the ``label`` and ``filesystem`` are matched, and ``overwrite`` is ``false``."
- },
- "overwrite": {
- "type": "boolean",
- "description": "If ``true``, overwrite any existing filesystem. Using ``overwrite: true`` for filesystems is **dangerous** and can lead to data loss, so double check the entry in ``fs_setup``. Default: ``false``"
- },
- "replace_fs": {
- "type": "string",
- "description": "Ignored unless ``partition`` is ``auto`` or ``any``. Default ``false``."
- },
- "extra_opts": {
- "type": ["array", "string"],
- "items": {"type": "string"},
- "description": "Optional options to pass to the filesystem creation command. Ignored if you using ``cmd`` directly."
- },
- "cmd": {
- "type": ["array", "string"],
- "items": {"type": "string"},
- "description": "Optional command to run to create the filesystem. Can include string substitutions of the other ``fs_setup`` config keys. This is only necessary if you need to override the default command."
- }
- }
- }
- }
- }
- }
- },
- "allOf": [
- { "$ref": "#/$defs/cc_apk_configure" },
- { "$ref": "#/$defs/cc_apt_configure" },
- { "$ref": "#/$defs/cc_apt_pipelining" },
- { "$ref": "#/$defs/cc_bootcmd" },
- { "$ref": "#/$defs/cc_byobu" },
- { "$ref": "#/$defs/cc_ca_certs" },
- { "$ref": "#/$defs/cc_chef" },
- { "$ref": "#/$defs/cc_debug" },
- { "$ref": "#/$defs/cc_disable_ec2_metadata" },
- { "$ref": "#/$defs/cc_disk_setup" }
- ]
-}
diff --git a/cloudinit/config/modules.py b/cloudinit/config/modules.py
new file mode 100644
index 00000000..efb7a5a4
--- /dev/null
+++ b/cloudinit/config/modules.py
@@ -0,0 +1,302 @@
+# Copyright (C) 2008-2022 Canonical Ltd.
+# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
+#
+# Author: Chuck Short <chuck.short@canonical.com>
+# Author: Juerg Haefliger <juerg.haefliger@hp.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import copy
+from collections import namedtuple
+from typing import List
+
+from cloudinit import config, importer
+from cloudinit import log as logging
+from cloudinit import type_utils, util
+from cloudinit.distros import ALL_DISTROS
+from cloudinit.helpers import ConfigMerger
+from cloudinit.reporting.events import ReportEventStack
+from cloudinit.settings import FREQUENCIES
+from cloudinit.stages import Init
+
+LOG = logging.getLogger(__name__)
+
+# This prefix is used to make it less
+# of a chance that when importing
+# we will not find something else with the same
+# name in the lookup path...
+MOD_PREFIX = "cc_"
+ModuleDetails = namedtuple(
+ "ModuleDetails", ["module", "name", "frequency", "run_args"]
+)
+
+
+def form_module_name(name):
+ canon_name = name.replace("-", "_")
+ if canon_name.lower().endswith(".py"):
+ canon_name = canon_name[0 : (len(canon_name) - 3)]
+ canon_name = canon_name.strip()
+ if not canon_name:
+ return None
+ if not canon_name.startswith(MOD_PREFIX):
+ canon_name = "%s%s" % (MOD_PREFIX, canon_name)
+ return canon_name
+
+
+def validate_module(mod, name):
+ if (
+ not hasattr(mod, "meta")
+ or "frequency" not in mod.meta
+ or "distros" not in mod.meta
+ ):
+ raise ValueError(
+ f"Module '{mod}' with name '{name}' MUST have a 'meta' attribute "
+ "of type 'MetaSchema'."
+ )
+ if mod.meta["frequency"] not in FREQUENCIES:
+ raise ValueError(
+ f"Module '{mod}' with name '{name}' has an invalid frequency "
+ f"{mod.meta['frequency']}."
+ )
+ if hasattr(mod, "schema"):
+ raise ValueError(
+ f"Module '{mod}' with name '{name}' has a JSON 'schema' attribute "
+ "defined. Please define schema in cloud-init-schema,json."
+ )
+
+
+class Modules(object):
+ def __init__(self, init: Init, cfg_files=None, reporter=None):
+ self.init = init
+ self.cfg_files = cfg_files
+ # Created on first use
+ self._cached_cfg = None
+ if reporter is None:
+ reporter = ReportEventStack(
+ name="module-reporter",
+ description="module-desc",
+ reporting_enabled=False,
+ )
+ self.reporter = reporter
+
+ @property
+ def cfg(self):
+ # None check to avoid empty case causing re-reading
+ if self._cached_cfg is None:
+ merger = ConfigMerger(
+ paths=self.init.paths,
+ datasource=self.init.datasource,
+ additional_fns=self.cfg_files,
+ base_cfg=self.init.cfg,
+ )
+ self._cached_cfg = merger.cfg
+ # Only give out a copy so that others can't modify this...
+ return copy.deepcopy(self._cached_cfg)
+
+ def _read_modules(self, name):
+ """Read the modules from the config file given the specified name.
+
+ Returns a list of module definitions. E.g.,
+ [
+ {
+ "mod": "bootcmd",
+ "freq": "always"
+ "args": "some_arg",
+ }
+ ]
+
+ Note that in the default case, only "mod" will be set.
+ """
+ module_list = []
+ if name not in self.cfg:
+ return module_list
+ cfg_mods = self.cfg.get(name)
+ if not cfg_mods:
+ return module_list
+ for item in cfg_mods:
+ if not item:
+ continue
+ if isinstance(item, str):
+ module_list.append(
+ {
+ "mod": item.strip(),
+ }
+ )
+ elif isinstance(item, (list)):
+ contents = {}
+ # Meant to fall through...
+ if len(item) >= 1:
+ contents["mod"] = item[0].strip()
+ if len(item) >= 2:
+ contents["freq"] = item[1].strip()
+ if len(item) >= 3:
+ contents["args"] = item[2:]
+ if contents:
+ module_list.append(contents)
+ elif isinstance(item, (dict)):
+ contents = {}
+ valid = False
+ if "name" in item:
+ contents["mod"] = item["name"].strip()
+ valid = True
+ if "frequency" in item:
+ contents["freq"] = item["frequency"].strip()
+ if "args" in item:
+ contents["args"] = item["args"] or []
+ if contents and valid:
+ module_list.append(contents)
+ else:
+ raise TypeError(
+ "Failed to read '%s' item in config, unknown type %s"
+ % (item, type_utils.obj_name(item))
+ )
+ return module_list
+
+ def _fixup_modules(self, raw_mods) -> List[ModuleDetails]:
+ """Convert list of returned from _read_modules() into new format.
+
+ Invalid modules and arguments are ingnored.
+ Also ensures that the module has the required meta fields.
+ """
+ mostly_mods = []
+ for raw_mod in raw_mods:
+ raw_name = raw_mod["mod"]
+ freq = raw_mod.get("freq")
+ run_args = raw_mod.get("args") or []
+ mod_name = form_module_name(raw_name)
+ if not mod_name:
+ continue
+ if freq and freq not in FREQUENCIES:
+ LOG.warning(
+ "Config specified module %s has an unknown frequency %s",
+ raw_name,
+ freq,
+ )
+ # Misconfigured in /etc/cloud/cloud.cfg. Reset so cc_* module
+ # default meta attribute "frequency" value is used.
+ freq = None
+ mod_locs, looked_locs = importer.find_module(
+ mod_name, ["", type_utils.obj_name(config)], ["handle"]
+ )
+ if not mod_locs:
+ LOG.warning(
+ "Could not find module named %s (searched %s)",
+ mod_name,
+ looked_locs,
+ )
+ continue
+ mod = importer.import_module(mod_locs[0])
+ validate_module(mod, raw_name)
+ if freq is None:
+ # Use cc_* module default setting since no cloud.cfg overrides
+ freq = mod.meta["frequency"]
+ mostly_mods.append(
+ ModuleDetails(
+ module=mod,
+ name=raw_name,
+ frequency=freq,
+ run_args=run_args,
+ )
+ )
+ return mostly_mods
+
+ def _run_modules(self, mostly_mods: List[ModuleDetails]):
+ cc = self.init.cloudify()
+ # Return which ones ran
+ # and which ones failed + the exception of why it failed
+ failures = []
+ which_ran = []
+ for (mod, name, freq, args) in mostly_mods:
+ try:
+ LOG.debug(
+ "Running module %s (%s) with frequency %s", name, mod, freq
+ )
+
+ # Use the configs logger and not our own
+ # TODO(harlowja): possibly check the module
+ # for having a LOG attr and just give it back
+ # its own logger?
+ func_args = [name, self.cfg, cc, LOG, args]
+ # Mark it as having started running
+ which_ran.append(name)
+ # This name will affect the semaphore name created
+ run_name = f"config-{name}"
+
+ desc = "running %s with frequency %s" % (run_name, freq)
+ myrep = ReportEventStack(
+ name=run_name, description=desc, parent=self.reporter
+ )
+
+ with myrep:
+ ran, _r = cc.run(
+ run_name, mod.handle, func_args, freq=freq
+ )
+ if ran:
+ myrep.message = "%s ran successfully" % run_name
+ else:
+ myrep.message = "%s previously ran" % run_name
+
+ except Exception as e:
+ util.logexc(LOG, "Running module %s (%s) failed", name, mod)
+ failures.append((name, e))
+ return (which_ran, failures)
+
+ def run_single(self, mod_name, args=None, freq=None):
+ # Form the users module 'specs'
+ mod_to_be = {
+ "mod": mod_name,
+ "args": args,
+ "freq": freq,
+ }
+ # Now resume doing the normal fixups and running
+ raw_mods = [mod_to_be]
+ mostly_mods = self._fixup_modules(raw_mods)
+ return self._run_modules(mostly_mods)
+
+ def run_section(self, section_name):
+ """Runs all modules in the given section.
+
+ section_name - One of the modules lists as defined in
+ /etc/cloud/cloud.cfg. One of:
+ - cloud_init_modules
+ - cloud_config_modules
+ - cloud_final_modules
+ """
+ raw_mods = self._read_modules(section_name)
+ mostly_mods = self._fixup_modules(raw_mods)
+ distro_name = self.init.distro.name
+
+ skipped = []
+ forced = []
+ overridden = self.cfg.get("unverified_modules", [])
+ active_mods = []
+ for (mod, name, _freq, _args) in mostly_mods:
+ if mod is None:
+ continue
+ worked_distros = mod.meta["distros"]
+
+ # Skip only when the following conditions are all met:
+ # - distros are defined in the module != ALL_DISTROS
+ # - the current d_name isn't in distros
+ # - and the module is unverified and not in the unverified_modules
+ # override list
+ if worked_distros and worked_distros != [ALL_DISTROS]:
+ if distro_name not in worked_distros:
+ if name not in overridden:
+ skipped.append(name)
+ continue
+ forced.append(name)
+ active_mods.append([mod, name, _freq, _args])
+
+ if skipped:
+ LOG.info(
+ "Skipping modules '%s' because they are not verified "
+ "on distro '%s'. To run anyway, add them to "
+ "'unverified_modules' in config.",
+ ",".join(skipped),
+ distro_name,
+ )
+ if forced:
+ LOG.info("running unverified_modules: '%s'", ", ".join(forced))
+
+ return self._run_modules(active_mods)
diff --git a/cloudinit/config/schema.py b/cloudinit/config/schema.py
index 1f969c97..7a6ecf08 100644
--- a/cloudinit/config/schema.py
+++ b/cloudinit/config/schema.py
@@ -7,20 +7,24 @@ import logging
import os
import re
import sys
+import typing
from collections import defaultdict
from copy import deepcopy
from functools import partial
import yaml
-from cloudinit import importer
+from cloudinit import importer, safeyaml
from cloudinit.cmd.devel import read_cfg_paths
-from cloudinit.importer import MetaSchema
from cloudinit.util import error, find_modules, load_file
error = partial(error, sys_exit=True)
LOG = logging.getLogger(__name__)
+VERSIONED_USERDATA_SCHEMA_FILE = "versions.schema.cloud-config.json"
+# Bump this file when introducing incompatible schema changes.
+# Also add new version definition to versions.schema.json.
+USERDATA_SCHEMA_FILE = "schema-cloud-config-v1.json"
_YAML_MAP = {True: "true", False: "false", None: "null"}
CLOUD_CONFIG_HEADER = b"#cloud-config"
SCHEMA_DOC_TMPL = """
@@ -36,18 +40,39 @@ SCHEMA_DOC_TMPL = """
**Supported distros:** {distros}
-**Config schema**:
+{property_header}
{property_doc}
+
{examples}
"""
-SCHEMA_PROPERTY_TMPL = "{prefix}**{prop_name}:** ({prop_type}) {description}"
+SCHEMA_PROPERTY_HEADER = "**Config schema**:"
+SCHEMA_PROPERTY_TMPL = "{prefix}**{prop_name}:** ({prop_type}){description}"
SCHEMA_LIST_ITEM_TMPL = (
- "{prefix}Each item in **{prop_name}** list supports the following keys:"
+ "{prefix}Each object in **{prop_name}** list supports the following keys:"
)
-SCHEMA_EXAMPLES_HEADER = "\n**Examples**::\n\n"
+SCHEMA_EXAMPLES_HEADER = "**Examples**::\n\n"
SCHEMA_EXAMPLES_SPACER_TEMPLATE = "\n # --- Example{0} ---"
+# annotations add value for development, but don't break old versions
+# pyver: 3.6 -> 3.8
+# pylint: disable=E1101
+if sys.version_info >= (3, 8):
+
+ class MetaSchema(typing.TypedDict):
+ name: str
+ id: str
+ title: str
+ description: str
+ distros: typing.List[str]
+ examples: typing.List[str]
+ frequency: str
+
+else:
+ MetaSchema = dict
+# pylint: enable=E1101
+
+
class SchemaValidationError(ValueError):
"""Raised when validating a cloud-config file against a schema."""
@@ -215,7 +240,9 @@ def validate_cloudconfig_schema(
)
-def annotated_cloudconfig_file(cloudconfig, original_content, schema_errors):
+def annotated_cloudconfig_file(
+ cloudconfig, original_content, schema_errors, schemamarks
+):
"""Return contents of the cloud-config file annotated with schema errors.
@param cloudconfig: YAML-loaded dict from the original_content or empty
@@ -226,7 +253,6 @@ def annotated_cloudconfig_file(cloudconfig, original_content, schema_errors):
"""
if not schema_errors:
return original_content
- schemapaths = {}
errors_by_line = defaultdict(list)
error_footer = []
error_header = "# Errors: -------------\n{0}\n\n"
@@ -238,10 +264,6 @@ def annotated_cloudconfig_file(cloudconfig, original_content, schema_errors):
lines
+ [error_header.format("# E1: Cloud-config is not a YAML dict.")]
)
- if cloudconfig:
- schemapaths = _schemapath_for_cloudconfig(
- cloudconfig, original_content
- )
for path, msg in schema_errors:
match = re.match(r"format-l(?P<line>\d+)\.c(?P<col>\d+).*", path)
if match:
@@ -249,7 +271,7 @@ def annotated_cloudconfig_file(cloudconfig, original_content, schema_errors):
errors_by_line[int(line)].append(msg)
else:
col = None
- errors_by_line[schemapaths[path]].append(msg)
+ errors_by_line[schemamarks[path]].append(msg)
if col is not None:
msg = "Line {line} column {col}: {msg}".format(
line=line, col=col, msg=msg
@@ -310,10 +332,18 @@ def validate_cloudconfig_file(config_path, schema, annotate=False):
)
error = SchemaValidationError(errors)
if annotate:
- print(annotated_cloudconfig_file({}, content, error.schema_errors))
+ print(
+ annotated_cloudconfig_file(
+ {}, content, error.schema_errors, {}
+ )
+ )
raise error
try:
- cloudconfig = yaml.safe_load(content)
+ if annotate:
+ cloudconfig, marks = safeyaml.load_with_marks(content)
+ else:
+ cloudconfig = safeyaml.load(content)
+ marks = {}
except (yaml.YAMLError) as e:
line = column = 1
mark = None
@@ -332,7 +362,11 @@ def validate_cloudconfig_file(config_path, schema, annotate=False):
)
error = SchemaValidationError(errors)
if annotate:
- print(annotated_cloudconfig_file({}, content, error.schema_errors))
+ print(
+ annotated_cloudconfig_file(
+ {}, content, error.schema_errors, {}
+ )
+ )
raise error from e
if not isinstance(cloudconfig, dict):
# Return a meaningful message on empty cloud-config
@@ -344,103 +378,63 @@ def validate_cloudconfig_file(config_path, schema, annotate=False):
if annotate:
print(
annotated_cloudconfig_file(
- cloudconfig, content, e.schema_errors
+ cloudconfig, content, e.schema_errors, marks
)
)
raise
-def _schemapath_for_cloudconfig(config, original_content):
- """Return a dictionary mapping schemapath to original_content line number.
+def _sort_property_order(value):
+ """Provide a sorting weight for documentation of property types.
- @param config: The yaml.loaded config dictionary of a cloud-config file.
- @param original_content: The simple file content of the cloud-config file
+ Weight values ensure 'array' sorted after 'object' which is sorted
+ after anything else which remains unsorted.
"""
- # TODO( handle multi-line lists or multi-line strings, inline dicts)
- content_lines = original_content.decode().split("\n")
- schema_line_numbers = {}
- list_index = 0
- RE_YAML_INDENT = r"^(\s*)"
- scopes = []
- if not config:
- return {} # No YAML config dict, no schemapaths to annotate
- for line_number, line in enumerate(content_lines, 1):
- indent_depth = len(re.match(RE_YAML_INDENT, line).groups()[0])
- line = line.strip()
- if not line or line.startswith("#"):
- continue
- if scopes:
- previous_depth, path_prefix = scopes[-1]
- else:
- previous_depth = -1
- path_prefix = ""
- if line.startswith("- "):
- # Process list items adding a list_index to the path prefix
- previous_list_idx = ".%d" % (list_index - 1)
- if path_prefix and path_prefix.endswith(previous_list_idx):
- path_prefix = path_prefix[: -len(previous_list_idx)]
- key = str(list_index)
- item_indent = len(re.match(RE_YAML_INDENT, line[1:]).groups()[0])
- item_indent += 1 # For the leading '-' character
- previous_depth = indent_depth
- indent_depth += item_indent
- line = line[item_indent:] # Strip leading list item + whitespace
- list_index += 1
- else:
- # Process non-list lines setting value if present
- list_index = 0
- key, value = line.split(":", 1)
- if path_prefix and indent_depth > previous_depth:
- # Append any existing path_prefix for a fully-pathed key
- key = path_prefix + "." + key
- while indent_depth <= previous_depth:
- if scopes:
- previous_depth, path_prefix = scopes.pop()
- if list_index > 0 and indent_depth == previous_depth:
- path_prefix = ".".join(path_prefix.split(".")[:-1])
- break
- else:
- previous_depth = -1
- path_prefix = ""
- scopes.append((indent_depth, key))
- if value:
- value = value.strip()
- if value.startswith("["):
- scopes.append((indent_depth + 2, key + ".0"))
- for inner_list_index in range(0, len(yaml.safe_load(value))):
- list_key = key + "." + str(inner_list_index)
- schema_line_numbers[list_key] = line_number
- schema_line_numbers[key] = line_number
- return schema_line_numbers
-
-
-def _get_property_type(property_dict: dict) -> str:
+ if value == "array":
+ return 2
+ elif value == "object":
+ return 1
+ return 0
+
+
+def _get_property_type(property_dict: dict, defs: dict) -> str:
"""Return a string representing a property type from a given
jsonschema.
"""
- property_type = property_dict.get("type")
- if property_type is None:
- if property_dict.get("enum"):
- property_type = [
- str(_YAML_MAP.get(k, k)) for k in property_dict["enum"]
- ]
- elif property_dict.get("oneOf"):
- property_type = [
+ _flatten_schema_refs(property_dict, defs)
+ property_types = property_dict.get("type", [])
+ if not isinstance(property_types, list):
+ property_types = [property_types]
+ if property_dict.get("enum"):
+ property_types = [
+ f"``{_YAML_MAP.get(k, k)}``" for k in property_dict["enum"]
+ ]
+ elif property_dict.get("oneOf"):
+ property_types.extend(
+ [
subschema["type"]
for subschema in property_dict.get("oneOf")
if subschema.get("type")
]
- if isinstance(property_type, list):
- property_type = "/".join(property_type)
+ )
+ if len(property_types) == 1:
+ property_type = property_types[0]
+ else:
+ property_types.sort(key=_sort_property_order)
+ property_type = "/".join(property_types)
items = property_dict.get("items", {})
- sub_property_type = items.get("type", "")
+ sub_property_types = items.get("type", [])
+ if not isinstance(sub_property_types, list):
+ sub_property_types = [sub_property_types]
# Collect each item type
for sub_item in items.get("oneOf", {}):
- if sub_property_type:
- sub_property_type += "/"
- sub_property_type += "(" + _get_property_type(sub_item) + ")"
- if sub_property_type:
- return "{0} of {1}".format(property_type, sub_property_type)
+ sub_property_types.append(_get_property_type(sub_item, defs))
+ if sub_property_types:
+ if len(sub_property_types) == 1:
+ return f"{property_type} of {sub_property_types[0]}"
+ sub_property_types.sort(key=_sort_property_order)
+ sub_property_doc = f"({'/'.join(sub_property_types)})"
+ return f"{property_type} of {sub_property_doc}"
return property_type or "UNDEFINED"
@@ -466,23 +460,50 @@ def _parse_description(description, prefix) -> str:
return description
+def _flatten_schema_refs(src_cfg: dict, defs: dict):
+ """Flatten schema: replace $refs in src_cfg with definitions from $defs."""
+ if "$ref" in src_cfg:
+ reference = src_cfg.pop("$ref").replace("#/$defs/", "")
+ # Update the defined references in subschema for doc rendering
+ src_cfg.update(defs[reference])
+ if "items" in src_cfg:
+ if "$ref" in src_cfg["items"]:
+ reference = src_cfg["items"].pop("$ref").replace("#/$defs/", "")
+ # Update the references in subschema for doc rendering
+ src_cfg["items"].update(defs[reference])
+ if "oneOf" in src_cfg["items"]:
+ for alt_schema in src_cfg["items"]["oneOf"]:
+ if "$ref" in alt_schema:
+ reference = alt_schema.pop("$ref").replace("#/$defs/", "")
+ alt_schema.update(defs[reference])
+ for alt_schema in src_cfg.get("oneOf", []):
+ if "$ref" in alt_schema:
+ reference = alt_schema.pop("$ref").replace("#/$defs/", "")
+ alt_schema.update(defs[reference])
+
+
def _get_property_doc(schema: dict, defs: dict, prefix=" ") -> str:
"""Return restructured text describing the supported schema properties."""
new_prefix = prefix + " "
properties = []
+ if schema.get("hidden") is True:
+ return "" # no docs for this schema
property_keys = [
- schema.get("properties", {}),
- schema.get("patternProperties", {}),
+ key
+ for key in ("properties", "patternProperties")
+ if "hidden" not in schema or key not in schema["hidden"]
]
+ property_schemas = [schema.get(key, {}) for key in property_keys]
- for props in property_keys:
- for prop_key, prop_config in props.items():
- if "$ref" in prop_config:
- # Update the defined references in subschema for doc rendering
- ref = defs[prop_config["$ref"].replace("#/$defs/", "")]
- prop_config.update(ref)
+ for prop_schema in property_schemas:
+ for prop_key, prop_config in prop_schema.items():
+ _flatten_schema_refs(prop_config, defs)
+ if prop_config.get("hidden") is True:
+ continue # document nothing for this property
# Define prop_name and description for SCHEMA_PROPERTY_TMPL
description = prop_config.get("description", "")
+ if description:
+ description = " " + description
# Define prop_name and description for SCHEMA_PROPERTY_TMPL
label = prop_config.get("label", prop_key)
@@ -491,21 +512,13 @@ def _get_property_doc(schema: dict, defs: dict, prefix=" ") -> str:
prefix=prefix,
prop_name=label,
description=_parse_description(description, prefix),
- prop_type=_get_property_type(prop_config),
+ prop_type=_get_property_type(prop_config, defs),
)
)
items = prop_config.get("items")
if items:
- if isinstance(items, list):
- for item in items:
- properties.append(
- _get_property_doc(
- item, defs=defs, prefix=new_prefix
- )
- )
- elif isinstance(items, dict) and (
- items.get("properties") or items.get("patternProperties")
- ):
+ _flatten_schema_refs(items, defs)
+ if items.get("properties") or items.get("patternProperties"):
properties.append(
SCHEMA_LIST_ITEM_TMPL.format(
prefix=new_prefix, prop_name=label
@@ -515,6 +528,21 @@ def _get_property_doc(schema: dict, defs: dict, prefix=" ") -> str:
properties.append(
_get_property_doc(items, defs=defs, prefix=new_prefix)
)
+ for alt_schema in items.get("oneOf", []):
+ if alt_schema.get("properties") or alt_schema.get(
+ "patternProperties"
+ ):
+ properties.append(
+ SCHEMA_LIST_ITEM_TMPL.format(
+ prefix=new_prefix, prop_name=label
+ )
+ )
+ new_prefix += " "
+ properties.append(
+ _get_property_doc(
+ alt_schema, defs=defs, prefix=new_prefix
+ )
+ )
if (
"properties" in prop_config
or "patternProperties" in prop_config
@@ -585,6 +613,7 @@ def get_meta_doc(meta: MetaSchema, schema: dict = None) -> str:
# cast away type annotation
meta_copy = dict(deepcopy(meta))
+ meta_copy["property_header"] = ""
defs = schema.get("$defs", {})
if defs.get(meta["id"]):
schema = defs.get(meta["id"])
@@ -593,6 +622,8 @@ def get_meta_doc(meta: MetaSchema, schema: dict = None) -> str:
except AttributeError:
LOG.warning("Unable to render property_doc due to invalid schema")
meta_copy["property_doc"] = ""
+ if meta_copy["property_doc"]:
+ meta_copy["property_header"] = SCHEMA_PROPERTY_HEADER
meta_copy["examples"] = _get_examples(meta)
meta_copy["distros"] = ", ".join(meta["distros"])
# Need an underbar of the same length as the name
@@ -632,11 +663,22 @@ def load_doc(requested_modules: list) -> str:
return docs
+def get_schema_dir() -> str:
+ return os.path.join(os.path.dirname(os.path.abspath(__file__)), "schemas")
+
+
def get_schema() -> dict:
"""Return jsonschema coalesced from all cc_* cloud-config modules."""
- schema_file = os.path.join(
- os.path.dirname(os.path.abspath(__file__)), "cloud-init-schema.json"
- )
+ # Note versions.schema.json is publicly consumed by schemastore.org.
+ # If we change the location of versions.schema.json in github, we need
+ # to provide an updated PR to
+ # https://github.com/SchemaStore/schemastore.
+
+ # When bumping schema version due to incompatible changes:
+ # 1. Add a new schema-cloud-config-v#.json
+ # 2. change the USERDATA_SCHEMA_FILE to cloud-init-schema-v#.json
+ # 3. Add the new version definition to versions.schema.cloud-config.json
+ schema_file = os.path.join(get_schema_dir(), USERDATA_SCHEMA_FILE)
full_schema = None
try:
full_schema = json.loads(load_file(schema_file))
@@ -653,20 +695,6 @@ def get_schema() -> dict:
"$schema": "http://json-schema.org/draft-04/schema#",
"allOf": [],
}
-
- # TODO( Drop the get_modules loop when all legacy cc_* schema migrates )
- # Supplement base_schema with any legacy modules which still contain a
- # "schema" attribute. Legacy cc_* modules will be migrated to use the
- # store module schema in the composite cloud-init-schema-<version>.json
- # and will drop "schema" at that point.
- for (_, mod_name) in get_modules().items():
- # All cc_* modules need a "meta" attribute to represent schema defs
- (mod_locs, _) = importer.find_module(
- mod_name, ["cloudinit.config"], ["schema"]
- )
- if mod_locs:
- mod = importer.import_module(mod_locs[0])
- full_schema["allOf"].append(mod.schema)
return full_schema
diff --git a/cloudinit/config/schemas/__init__.py b/cloudinit/config/schemas/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/cloudinit/config/schemas/__init__.py
diff --git a/cloudinit/config/schemas/schema-cloud-config-v1.json b/cloudinit/config/schemas/schema-cloud-config-v1.json
new file mode 100644
index 00000000..d409d5d6
--- /dev/null
+++ b/cloudinit/config/schemas/schema-cloud-config-v1.json
@@ -0,0 +1,2273 @@
+{
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "$defs": {
+ "users_groups.groups_by_groupname": {
+ "patternProperties": {
+ "^.+$": {
+ "label": "<group_name>",
+ "description": "Optional string of single username or a list of usernames to add to the group",
+ "type": ["string", "array"],
+ "items": {
+ "type": "string"
+ },
+ "minItems": 1
+ }
+ }
+ },
+ "users_groups.user": {
+ "oneOf": [
+ {"required": ["name"]},
+ {"required": ["snapuser"]}
+ ],
+ "properties": {
+ "name": {
+ "description": "The user's login name. Required otherwise user creation will be skipped for this user.",
+ "type": "string"
+ },
+ "expiredate": {
+ "default": null,
+ "description": "Optional. Date on which the user's account will be disabled. Default: ``null``",
+ "type": "string"
+ },
+ "gecos": {
+ "description": "Optional comment about the user, usually a comma-separated string of real name and contact information",
+ "type": "string"
+ },
+ "groups": {
+ "description": "Optional comma-separated string of groups to add the user to.",
+ "type": "string"
+ },
+ "homedir": {
+ "description": "Optional home dir for user. Default: ``/home/<username>``",
+ "default": "``/home/<username>``",
+ "type": "string"
+ },
+ "inactive": {
+ "description": "Optional string representing the number of days until the user is disabled. ",
+ "type": "string"
+ },
+ "lock_passwd": {
+ "default": true,
+ "description": "Disable password login. Default: ``true``",
+ "type": "boolean"
+ },
+ "no_create_home": {
+ "default": false,
+ "description": "Do not create home directory. Default: ``false``",
+ "type": "boolean"
+ },
+ "no_log_init": {
+ "default": false,
+ "description": "Do not initialize lastlog and faillog for user. Default: ``false``",
+ "type": "boolean"
+ },
+ "no_user_group": {
+ "default": false,
+ "description": "Do not create group named after user. Default: ``false``",
+ "type": "boolean"
+ },
+ "passwd": {
+ "description": "Hash of user password applied when user does not exist. To generate this hash, run: mkpasswd --method=SHA-512 --rounds=4096. **Note:** While hashed password is better than plain text, using ``passwd`` in user-data represents a security risk as user-data could be accessible by third-parties depending on your cloud platform.",
+ "type": "string"
+ },
+ "hashed_passwd": {
+ "description": "Hash of user password applied to new or existing users. To generate this hash, run: mkpasswd --method=SHA-512 --rounds=4096. **Note:** While ``hashed_password`` is better than ``plain_text_passwd``, using ``passwd`` in user-data represents a security risk as user-data could be accessible by third-parties depending on your cloud platform.",
+ "type": "string"
+ },
+ "plain_text_passwd": {
+ "description": "Clear text of user password applied to new or existing users. There are many more secure options than using plain text passwords, such as ``ssh_import_id`` or ``hashed_passwd``. Do not use this in production as user-data and your password can be exposed.",
+ "type": "string"
+ },
+ "create_groups": {
+ "default": true,
+ "description": "Boolean set ``false`` to disable creation of specified user ``groups``. Default: ``true``.",
+ "type": "boolean"
+ },
+ "primary_group": {
+ "default": "``<username>``",
+ "description": "Primary group for user. Default: ``<username>``",
+ "type": "string"
+ },
+ "selinux_user": {
+ "description": "SELinux user for user's login. Default to default SELinux user.",
+ "type": "string"
+ },
+ "shell": {
+ "description": "Path to the user's login shell. The default is to set no shell, which results in a system-specific default being used.",
+ "type": "string"
+ },
+ "snapuser": {
+ "description": " Specify an email address to create the user as a Snappy user through ``snap create-user``. If an Ubuntu SSO account is associated with the address, username and SSH keys will be requested from there.",
+ "type": "string"
+ },
+ "ssh_authorized_keys": {
+ "description": "List of SSH keys to add to user's authkeys file. Can not be combined with ``ssh_redirect_user``",
+ "type": "array",
+ "items": {"type": "string"},
+ "minItems": 1
+ },
+ "ssh_import_id": {
+ "description": "List of SSH IDs to import for user. Can not be combined with ``ssh_redirect_user``.",
+ "type": "array",
+ "items": {"type": "string"},
+ "minItems": 1
+ },
+ "ssh_redirect_user": {
+ "type": "boolean",
+ "default": false,
+ "description": "Boolean set to true to disable SSH logins for this user. When specified, all cloud meta-data public SSH keys will be set up in a disabled state for this username. Any SSH login as this username will timeout and prompt with a message to login instead as the ``default_username`` for this instance. Default: ``false``. This key can not be combined with ``ssh_import_id`` or ``ssh_authorized_keys``."
+ },
+ "system": {
+ "description": "Optional. Create user as system user with no home directory. Default: ``false``.",
+ "type": "boolean",
+ "default": false
+ },
+ "sudo": {
+ "type": ["boolean", "string"],
+ "description": "Sudo rule to use or false. Absence of a sudo value or ``false`` will result in no sudo rules added for this user. DEPRECATED: the value ``false`` will be deprecated in the future release. Use ``null`` or no ``sudo`` key instead."
+ },
+ "uid": {
+ "description": "The user's ID. Default is next available value.",
+ "type": "integer"
+ }
+ },
+ "additionalProperties": false
+ },
+ "apt_configure.mirror": {
+ "type": "array",
+ "items": {
+ "type": "object",
+ "additionalProperties": false,
+ "required": ["arches"],
+ "properties": {
+ "arches": {
+ "type": "array",
+ "items": {"type": "string"},
+ "minItems": 1
+ },
+ "uri": {"type": "string", "format": "uri"},
+ "search": {
+ "type": "array",
+ "items": {"type": "string", "format": "uri"},
+ "minItems": 1
+ },
+ "search_dns": {
+ "type": "boolean"
+ },
+ "keyid": {"type": "string"},
+ "key": {"type": "string"},
+ "keyserver": {"type": "string"}
+ }
+ },
+ "minItems": 1
+ },
+ "ca_certs.properties": {
+ "type": "object",
+ "properties": {
+ "remove-defaults": {
+ "description": "DEPRECATED. Use ``remove_defaults``. ",
+ "deprecated": true,
+ "type": "boolean",
+ "default": false
+ },
+ "remove_defaults": {
+ "description": "Remove default CA certificates if true. Default: false",
+ "type": "boolean",
+ "default": false
+ },
+ "trusted": {
+ "description": "List of trusted CA certificates to add.",
+ "type": "array",
+ "items": {"type": "string"},
+ "minItems": 1
+ }
+ },
+ "additionalProperties": false,
+ "minProperties": 1
+ },
+ "cc_apk_configure": {
+ "type": "object",
+ "properties": {
+ "apk_repos": {
+ "type": "object",
+ "properties": {
+ "preserve_repositories": {
+ "type": "boolean",
+ "default": false,
+ "description": "By default, cloud-init will generate a new repositories file ``/etc/apk/repositories`` based on any valid configuration settings specified within a apk_repos section of cloud config. To disable this behavior and preserve the repositories file from the pristine image, set ``preserve_repositories`` to ``true``.\n\n The ``preserve_repositories`` option overrides all other config keys that would alter ``/etc/apk/repositories``."
+ },
+ "alpine_repo": {
+ "type": ["object", "null"],
+ "properties": {
+ "base_url": {
+ "type": "string",
+ "default": "https://alpine.global.ssl.fastly.net/alpine",
+ "description": "The base URL of an Alpine repository, or mirror, to download official packages from. If not specified then it defaults to ``https://alpine.global.ssl.fastly.net/alpine``"
+ },
+ "community_enabled": {
+ "type": "boolean",
+ "default": false,
+ "description": "Whether to add the Community repo to the repositories file. By default the Community repo is not included."
+ },
+ "testing_enabled": {
+ "type": "boolean",
+ "default": false,
+ "description": "Whether to add the Testing repo to the repositories file. By default the Testing repo is not included. It is only recommended to use the Testing repo on a machine running the ``Edge`` version of Alpine as packages installed from Testing may have dependencies that conflict with those in non-Edge Main or Community repos."
+ },
+ "version": {
+ "type": "string",
+ "description": "The Alpine version to use (e.g. ``v3.12`` or ``edge``)"
+ }
+ },
+ "required": ["version"],
+ "minProperties": 1,
+ "additionalProperties": false
+ },
+ "local_repo_base_url": {
+ "type": "string",
+ "description": "The base URL of an Alpine repository containing unofficial packages"
+ }
+ },
+ "minProperties": 1,
+ "additionalProperties": false
+ }
+ }
+ },
+ "cc_apt_configure": {
+ "properties": {
+ "apt": {
+ "type": "object",
+ "additionalProperties": false,
+ "minProperties": 1,
+ "properties": {
+ "preserve_sources_list": {
+ "type": "boolean",
+ "default": false,
+ "description": "By default, cloud-init will generate a new sources list in ``/etc/apt/sources.list.d`` based on any changes specified in cloud config. To disable this behavior and preserve the sources list from the pristine image, set ``preserve_sources_list`` to ``true``.\n\nThe ``preserve_sources_list`` option overrides all other config keys that would alter ``sources.list`` or ``sources.list.d``, **except** for additional sources to be added to ``sources.list.d``."
+ },
+ "disable_suites": {
+ "type": "array",
+ "items": {"type": "string"},
+ "minItems": 1,
+ "uniqueItems": true,
+ "description": "Entries in the sources list can be disabled using ``disable_suites``, which takes a list of suites to be disabled. If the string ``$RELEASE`` is present in a suite in the ``disable_suites`` list, it will be replaced with the release name. If a suite specified in ``disable_suites`` is not present in ``sources.list`` it will be ignored. For convenience, several aliases are provided for`` disable_suites``:\n\n - ``updates`` => ``$RELEASE-updates``\n - ``backports`` => ``$RELEASE-backports``\n - ``security`` => ``$RELEASE-security``\n - ``proposed`` => ``$RELEASE-proposed``\n - ``release`` => ``$RELEASE``.\n\nWhen a suite is disabled using ``disable_suites``, its entry in ``sources.list`` is not deleted; it is just commented out."
+ },
+ "primary": {
+ "$ref": "#/$defs/apt_configure.mirror",
+ "description": "The primary and security archive mirrors can be specified using the ``primary`` and ``security`` keys, respectively. Both the ``primary`` and ``security`` keys take a list of configs, allowing mirrors to be specified on a per-architecture basis. Each config is a dictionary which must have an entry for ``arches``, specifying which architectures that config entry is for. The keyword ``default`` applies to any architecture not explicitly listed. The mirror url can be specified with the ``uri`` key, or a list of mirrors to check can be provided in order, with the first mirror that can be resolved being selected. This allows the same configuration to be used in different environment, with different hosts used for a local APT mirror. If no mirror is provided by ``uri`` or ``search``, ``search_dns`` may be used to search for dns names in the format ``<distro>-mirror`` in each of the following:\n\n - fqdn of this host per cloud metadata,\n - localdomain,\n - domains listed in ``/etc/resolv.conf``.\n\nIf there is a dns entry for ``<distro>-mirror``, then it is assumed that there is a distro mirror at ``http://<distro>-mirror.<domain>/<distro>``. If the ``primary`` key is defined, but not the ``security`` key, then then configuration for ``primary`` is also used for ``security``. If ``search_dns`` is used for the ``security`` key, the search pattern will be ``<distro>-security-mirror``.\n\nEach mirror may also specify a key to import via any of the following optional keys:\n\n - ``keyid``: a key to import via shortid or fingerprint.\n - ``key``: a raw PGP key.\n - ``keyserver``: alternate keyserver to pull ``keyid`` key from.\n\nIf no mirrors are specified, or all lookups fail, then default mirrors defined in the datasource are used. If none are present in the datasource either the following defaults are used:\n\n - ``primary`` => ``http://archive.ubuntu.com/ubuntu``.\n - ``security`` => ``http://security.ubuntu.com/ubuntu``"
+ },
+ "security": {
+ "$ref": "#/$defs/apt_configure.mirror",
+ "description": "Please refer to the primary config documentation"
+ },
+ "add_apt_repo_match": {
+ "type": "string",
+ "default": "^[\\w-]+:\\w",
+ "description": "All source entries in ``apt-sources`` that match regex in ``add_apt_repo_match`` will be added to the system using ``add-apt-repository``. If ``add_apt_repo_match`` is not specified, it defaults to ``^[\\w-]+:\\w``"
+ },
+ "debconf_selections": {
+ "type": "object",
+ "minProperties": 1,
+ "patternProperties": {
+ "^.+$": {
+ "type": "string"
+ }
+ },
+ "description": "Debconf additional configurations can be specified as a dictionary under the ``debconf_selections`` config key, with each key in the dict representing a different set of configurations. The value of each key must be a string containing all the debconf configurations that must be applied. We will bundle all of the values and pass them to ``debconf-set-selections``. Therefore, each value line must be a valid entry for ``debconf-set-selections``, meaning that they must possess for distinct fields:\n\n``pkgname question type answer``\n\nWhere:\n\n - ``pkgname`` is the name of the package.\n - ``question`` the name of the questions.\n - ``type`` is the type of question.\n - ``answer`` is the value used to answer the question.\n\nFor example: ``ippackage ippackage/ip string 127.0.01``"
+ },
+ "sources_list": {
+ "type": "string",
+ "description": "Specifies a custom template for rendering ``sources.list`` . If no ``sources_list`` template is given, cloud-init will use sane default. Within this template, the following strings will be replaced with the appropriate values:\n\n - ``$MIRROR``\n - ``$RELEASE``\n - ``$PRIMARY``\n - ``$SECURITY``\n - ``$KEY_FILE``"
+ },
+ "conf": {
+ "type": "string",
+ "description": "Specify configuration for apt, such as proxy configuration. This configuration is specified as a string. For multiline APT configuration, make sure to follow yaml syntax."
+ },
+ "https_proxy": {
+ "type": "string",
+ "description": "More convenient way to specify https APT proxy. https proxy url is specified in the format ``https://[[user][:pass]@]host[:port]/``."
+ },
+ "http_proxy": {
+ "type": "string",
+ "description": "More convenient way to specify http APT proxy. http proxy url is specified in the format ``http://[[user][:pass]@]host[:port]/``."
+ },
+ "proxy": {
+ "type": "string",
+ "description": "Alias for defining a http APT proxy."
+ },
+ "ftp_proxy": {
+ "type": "string",
+ "description": "More convenient way to specify ftp APT proxy. ftp proxy url is specified in the format ``ftp://[[user][:pass]@]host[:port]/``."
+ },
+ "sources": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {
+ "type": "object",
+ "properties": {
+ "source": {
+ "type": "string"
+ },
+ "keyid": {
+ "type": "string"
+ },
+ "key": {
+ "type": "string"
+ },
+ "keyserver": {
+ "type": "string"
+ },
+ "filename": {
+ "type": "string"
+ }
+ },
+ "additionalProperties": false,
+ "minProperties": 1
+ }
+ },
+ "description": "Source list entries can be specified as a dictionary under the ``sources`` config key, with each key in the dict representing a different source file. The key of each source entry will be used as an id that can be referenced in other config entries, as well as the filename for the source's configuration under ``/etc/apt/sources.list.d``. If the name does not end with ``.list``, it will be appended. If there is no configuration for a key in ``sources``, no file will be written, but the key may still be referred to as an id in other ``sources`` entries.\n\nEach entry under ``sources`` is a dictionary which may contain any of the following optional keys:\n - ``source``: a sources.list entry (some variable replacements apply).\n - ``keyid``: a key to import via shortid or fingerprint.\n - ``key``: a raw PGP key.\n - ``keyserver``: alternate keyserver to pull ``keyid`` key from.\n - ``filename``: specify the name of the list file\n\nThe ``source`` key supports variable replacements for the following strings:\n\n - ``$MIRROR``\n - ``$PRIMARY``\n - ``$SECURITY``\n - ``$RELEASE``\n - ``$KEY_FILE``"
+ }
+ }
+ }
+ }
+ },
+ "cc_apt_pipelining": {
+ "type": "object",
+ "properties": {
+ "apt_pipelining": {
+ "oneOf": [
+ {"type": "integer"},
+ {"type": "boolean"},
+ {"type": "string", "enum": ["none", "unchanged", "os"]}
+ ]
+ }
+ }
+ },
+ "cc_bootcmd": {
+ "type": "object",
+ "properties": {
+ "bootcmd": {
+ "type": "array",
+ "items": {
+ "oneOf": [
+ {"type": "array", "items": {"type": "string"}},
+ {"type": "string"}
+ ]
+ },
+ "additionalItems": false,
+ "minItems": 1
+ }
+ }
+ },
+ "cc_byobu": {
+ "type": "object",
+ "properties": {
+ "byobu_by_default": {
+ "type": "string",
+ "enum": [
+ "enable-system",
+ "enable-user",
+ "disable-system",
+ "disable-user",
+ "enable",
+ "disable",
+ "user",
+ "system"
+ ]
+ }
+ }
+ },
+ "cc_ca_certs": {
+ "type": "object",
+ "properties": {
+ "ca_certs": {
+ "$ref": "#/$defs/ca_certs.properties"
+ },
+ "ca-certs": {
+ "$ref": "#/$defs/ca_certs.properties"
+ }
+ }
+ },
+ "cc_chef": {
+ "type": "object",
+ "properties": {
+ "chef": {
+ "type": "object",
+ "additionalProperties": false,
+ "minProperties": 1,
+ "properties": {
+ "directories": {
+ "type": "array",
+ "items": {"type": "string"},
+ "minItems": 1,
+ "uniqueItems": true,
+ "description": "Create the necessary directories for chef to run. By default, it creates the following directories:\n\n - ``/etc/chef``\n - ``/var/log/chef``\n - ``/var/lib/chef``\n - ``/var/cache/chef``\n - ``/var/backups/chef``\n - ``/var/run/chef``"
+ },
+ "validation_cert": {
+ "type": "string",
+ "description": "Optional string to be written to file validation_key. Special value ``system`` means set use existing file."
+ },
+ "validation_key": {
+ "type": "string",
+ "default": "/etc/chef/validation.pem",
+ "description": "Optional path for validation_cert. default to ``/etc/chef/validation.pem``"
+ },
+ "firstboot_path": {
+ "type": "string",
+ "default": "/etc/chef/firstboot.json",
+ "description": "Path to write run_list and initial_attributes keys that should also be present in this configuration, defaults to ``/etc/chef/firstboot.json``"
+ },
+ "exec": {
+ "type": "boolean",
+ "default": false,
+ "description": "Set true if we should run or not run chef (defaults to false, unless a gem installed is requested where this will then default to true)."
+ },
+ "client_key": {
+ "type": "string",
+ "default": "/etc/chef/client.pem",
+ "description": "Optional path for client_cert. Default to ``/etc/chef/client.pem``."
+ },
+ "encrypted_data_bag_secret": {
+ "type": "string",
+ "default": null,
+ "description": "Specifies the location of the secret key used by chef to encrypt data items. By default, this path is set to null, meaning that chef will have to look at the path ``/etc/chef/encrypted_data_bag_secret`` for it."
+ },
+ "environment": {
+ "type": "string",
+ "default": "_default",
+ "description": "Specifies which environment chef will use. By default, it will use the ``_default`` configuration."
+ },
+ "file_backup_path": {
+ "type": "string",
+ "default": "/var/backups/chef",
+ "description": "Specifies the location in which backup files are stored. By default, it uses the ``/var/backups/chef`` location."
+ },
+ "file_cache_path": {
+ "type": "string",
+ "default": "/var/cache/chef",
+ "description": "Specifies the location in which chef cache files will be saved. By default, it uses the ``/var/cache/chef`` location."
+ },
+ "json_attribs": {
+ "type": "string",
+ "default": "/etc/chef/firstboot.json",
+ "description": "Specifies the location in which some chef json data is stored. By default, it uses the ``/etc/chef/firstboot.json`` location."
+ },
+ "log_level": {
+ "type": "string",
+ "default": ":info",
+ "description": "Defines the level of logging to be stored in the log file. By default this value is set to ``:info``."
+ },
+ "log_location": {
+ "type": "string",
+ "default": "/var/log/chef/client.log",
+ "description": "Specifies the location of the chef lof file. By default, the location is specified at ``/var/log/chef/client.log``."
+ },
+ "node_name": {
+ "type": "string",
+ "description": "The name of the node to run. By default, we will use th instance id as the node name."
+ },
+ "omnibus_url": {
+ "type": "string",
+ "default": "https://www.chef.io/chef/install.sh",
+ "description": "Omnibus URL if chef should be installed through Omnibus. By default, it uses the ``https://www.chef.io/chef/install.sh``."
+ },
+ "omnibus_url_retries": {
+ "type": "integer",
+ "default": 5,
+ "description": "The number of retries that will be attempted to reach the Omnibus URL. Default is 5."
+ },
+ "omnibus_version": {
+ "type": "string",
+ "description": "Optional version string to require for omnibus install."
+ },
+ "pid_file": {
+ "type": "string",
+ "default": "/var/run/chef/client.pid",
+ "description": "The location in which a process identification number (pid) is saved. By default, it saves in the ``/var/run/chef/client.pid`` location."
+ },
+ "server_url": {
+ "type": "string",
+ "description": "The URL for the chef server"
+ },
+ "show_time": {
+ "type": "boolean",
+ "default": true,
+ "description": "Show time in chef logs"
+ },
+ "ssl_verify_mode": {
+ "type": "string",
+ "default": ":verify_none",
+ "description": "Set the verify mode for HTTPS requests. We can have two possible values for this parameter:\n\n - ``:verify_none``: No validation of SSL certificates.\n - ``:verify_peer``: Validate all SSL certificates.\n\nBy default, the parameter is set as ``:verify_none``."
+ },
+ "validation_name": {
+ "type": "string",
+ "description": "The name of the chef-validator key that Chef Infra Client uses to access the Chef Infra Server during the initial Chef Infra Client run."
+ },
+ "force_install": {
+ "type": "boolean",
+ "default": false,
+ "description": "If set to ``true``, forces chef installation, even if it is already installed."
+ },
+ "initial_attributes": {
+ "type": "object",
+ "items": {"type": "string"},
+ "description": "Specify a list of initial attributes used by the cookbooks."
+ },
+ "install_type": {
+ "type": "string",
+ "default": "packages",
+ "enum": [
+ "packages",
+ "gems",
+ "omnibus"
+ ],
+ "description": "The type of installation for chef. It can be one of the following values:\n\n - ``packages``\n - ``gems``\n - ``omnibus``"
+ },
+ "run_list": {
+ "type": "array",
+ "items": {"type": "string"},
+ "description": "A run list for a first boot json."
+ },
+ "chef_license": {
+ "type": "string",
+ "description": "string that indicates if user accepts or not license related to some of chef products"
+ }
+ }
+ }
+ }
+ },
+ "cc_debug": {
+ "type": "object",
+ "properties": {
+ "debug": {
+ "additionalProperties": false,
+ "minProperties": 1,
+ "type": "object",
+ "properties": {
+ "verbose": {
+ "description": "Should always be true for this module",
+ "type": "boolean"
+ },
+ "output": {
+ "description": "Location to write output. Defaults to console + log",
+ "type": "string"
+ }
+ }
+ }
+ }
+ },
+ "cc_disable_ec2_metadata": {
+ "type": "object",
+ "properties": {
+ "disable_ec2_metadata": {
+ "default": false,
+ "description": "Set true to disable IPv4 routes to EC2 metadata. Default: false.",
+ "type": "boolean"
+ }
+ }
+ },
+ "cc_disk_setup": {
+ "type": "object",
+ "properties": {
+ "device_aliases": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {
+ "label": "<alias_name>",
+ "type": "string",
+ "description": "Path to disk to be aliased by this name."
+ }
+ }
+ },
+ "disk_setup": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {
+ "label": "<alias name/path>",
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "table_type": {
+ "type": "string",
+ "default": "mbr",
+ "enum": ["mbr", "gpt"],
+ "description": "Specifies the partition table type, either ``mbr`` or ``gpt``. Default: ``mbr``."
+ },
+ "layout": {
+ "type": ["string", "boolean", "array"],
+ "default": false,
+ "oneOf": [
+ {"type": "string", "enum": ["remove"]},
+ {"type": "boolean"},
+ {
+ "type": "array",
+ "items": {
+ "oneOf": [
+ {"type": "integer"},
+ {
+ "type": "array",
+ "items": {"type": "integer"},
+ "minItems": 2,
+ "maxItems": 2
+ }
+ ]
+ }
+ }
+ ],
+ "description": "If set to ``true``, a single partition using all the space on the device will be created. If set to ``false``, no partitions will be created. If set to ``remove``, any existing partition table will be purged. Partitions can be specified by providing a list to ``layout``, where each entry in the list is either a size or a list containing a size and the numerical value for a partition type. The size for partitions is specified in **percentage** of disk space, not in bytes (e.g. a size of 33 would take up 1/3 of the disk space). Default: ``false``."
+ },
+ "overwrite": {
+ "type": "boolean",
+ "default": false,
+ "description": "Controls whether this module tries to be safe about writing partition tables or not. If ``overwrite: false`` is set, the device will be checked for a partition table and for a file system and if either is found, the operation will be skipped. If ``overwrite: true`` is set, no checks will be performed. Using ``overwrite: true`` is **dangerous** and can lead to data loss, so double check that the correct device has been specified if using this option. Default: ``false``"
+ }
+ }
+ }
+ }
+ },
+ "fs_setup": {
+ "type": "array",
+ "items": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "label": {
+ "type": "string",
+ "description": "Label for the filesystem."
+ },
+ "filesystem": {
+ "type": "string",
+ "description": "Filesystem type to create. E.g., ``ext4`` or ``btrfs``"
+ },
+ "device": {
+ "type": "string",
+ "description": "Specified either as a path or as an alias in the format ``<alias name>.<y>`` where ``<y>`` denotes the partition number on the device. If specifying device using the ``<device name>.<partition number>`` format, the value of ``partition`` will be overwritten."
+ },
+ "partition": {
+ "type": ["string", "integer"],
+ "oneOf": [
+ {
+ "type": "string",
+ "enum": ["auto", "any", "none"]
+ }
+ ],
+ "description": "The partition can be specified by setting ``partition`` to the desired partition number. The ``partition`` option may also be set to ``auto``, in which this module will search for the existence of a filesystem matching the ``label``, ``type`` and ``device`` of the ``fs_setup`` entry and will skip creating the filesystem if one is found. The ``partition`` option may also be set to ``any``, in which case any file system that matches ``type`` and ``device`` will cause this module to skip filesystem creation for the ``fs_setup`` entry, regardless of ``label`` matching or not. To write a filesystem directly to a device, use ``partition: none``. ``partition: none`` will **always** write the filesystem, even when the ``label`` and ``filesystem`` are matched, and ``overwrite`` is ``false``."
+ },
+ "overwrite": {
+ "type": "boolean",
+ "description": "If ``true``, overwrite any existing filesystem. Using ``overwrite: true`` for filesystems is **dangerous** and can lead to data loss, so double check the entry in ``fs_setup``. Default: ``false``"
+ },
+ "replace_fs": {
+ "type": "string",
+ "description": "Ignored unless ``partition`` is ``auto`` or ``any``. Default ``false``."
+ },
+ "extra_opts": {
+ "type": ["array", "string"],
+ "items": {"type": "string"},
+ "description": "Optional options to pass to the filesystem creation command. Ignored if you using ``cmd`` directly."
+ },
+ "cmd": {
+ "type": ["array", "string"],
+ "items": {"type": "string"},
+ "description": "Optional command to run to create the filesystem. Can include string substitutions of the other ``fs_setup`` config keys. This is only necessary if you need to override the default command."
+ }
+ }
+ }
+ }
+ }
+ },
+ "cc_fan": {
+ "type": "object",
+ "properties": {
+ "fan": {
+ "type": "object",
+ "additionalProperties": false,
+ "required": ["config"],
+ "properties": {
+ "config": {
+ "type": "string",
+ "description": "The fan configuration to use as a single multi-line string"
+ },
+ "config_path": {
+ "type": "string",
+ "default": "/etc/network/fan",
+ "description": "The path to write the fan configuration to. Default: ``/etc/network/fan``"
+ }
+ }
+ }
+ }
+ },
+ "cc_final_message": {
+ "type": "object",
+ "properties": {
+ "final_message": {
+ "type": "string",
+ "description": "The message to display at the end of the run"
+ }
+ }
+ },
+ "cc_growpart": {
+ "type": "object",
+ "properties": {
+ "growpart": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "mode": {
+ "enum": [false, "auto", "growpart", "gpart", "off"],
+ "default": "auto",
+ "description": "The utility to use for resizing. Default: ``auto``\n\nPossible options:\n\n* ``auto`` - Use any available utility\n\n* ``growpart`` - Use growpart utility\n\n* ``gpart`` - Use BSD gpart utility\n\n* ``off`` - Take no action\n\nSpecifying a boolean ``false`` value for this key is deprecated. Use ``off`` instead."
+ },
+ "devices": {
+ "type": "array",
+ "default": ["/"],
+ "items": {
+ "type": "string"
+ },
+ "description": "The devices to resize. Each entry can either be the path to the device's mountpoint in the filesystem or a path to the block device in '/dev'. Default: ``[/]``"
+ },
+ "ignore_growroot_disabled": {
+ "type": "boolean",
+ "default": false,
+ "description": "If ``true``, ignore the presence of ``/etc/growroot-disabled``. If ``false`` and the file exists, then don't resize. Default: ``false``"
+ }
+ }
+ }
+ }
+ },
+ "cc_grub_dpkg": {
+ "type": "object",
+ "properties": {
+ "grub_dpkg": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "enabled": {
+ "type": "boolean",
+ "default": true,
+ "description": "Whether to configure which device is used as the target for grub installation. Default: ``true``"
+ },
+ "grub-pc/install_devices": {
+ "type": "string",
+ "description": "Device to use as target for grub installation. If unspecified, ``grub-probe`` of ``/boot`` will be used to find the device"
+ },
+ "grub-pc/install_devices_empty": {
+ "type": ["string", "boolean"],
+ "description": "Sets values for ``grub-pc/install_devices_empty``. If unspecified, will be set to ``true`` if ``grub-pc/install_devices`` is empty, otherwise ``false``. Using a non-boolean value for this field is deprecated."
+ }
+ }
+ },
+ "grub-dpkg": {
+ "type": "object",
+ "description": "DEPRECATED: Use ``grub_dpkg`` instead"
+ }
+ }
+ },
+ "cc_install_hotplug": {
+ "type": "object",
+ "properties": {
+ "updates": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "network": {
+ "type": "object",
+ "required": ["when"],
+ "additionalProperties": false,
+ "properties": {
+ "when": {
+ "type": "array",
+ "additionalProperties": false,
+ "items": {
+ "type": "string",
+ "additionalProperties": false,
+ "enum": [
+ "boot-new-instance",
+ "boot-legacy",
+ "boot",
+ "hotplug"
+ ]
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "cc_keyboard": {
+ "type": "object",
+ "properties": {
+ "keyboard": {
+ "type": "object",
+ "properties": {
+ "layout": {
+ "type": "string",
+ "description": "Required. Keyboard layout. Corresponds to XKBLAYOUT."
+ },
+ "model": {
+ "type": "string",
+ "default": "pc105",
+ "description": "Optional. Keyboard model. Corresponds to XKBMODEL. Default: ``pc105``."
+ },
+ "variant": {
+ "type": "string",
+ "description": "Optional. Keyboard variant. Corresponds to XKBVARIANT."
+ },
+ "options": {
+ "type": "string",
+ "description": "Optional. Keyboard options. Corresponds to XKBOPTIONS."
+ }
+ },
+ "required": ["layout"],
+ "additionalProperties": false
+ }
+ }
+ },
+ "cc_keys_to_console": {
+ "type": "object",
+ "properties": {
+ "ssh": {
+ "type": "object",
+ "properties": {
+ "emit_keys_to_console": {
+ "type": "boolean",
+ "default": true,
+ "description": "Set false to avoid printing SSH keys to system console. Default: ``true``."
+ }
+ },
+ "additionalProperties": false,
+ "required": ["emit_keys_to_console"]
+ },
+ "ssh_key_console_blacklist": {
+ "type": "array",
+ "default": ["ssh-dss"],
+ "description": "Avoid printing matching SSH key types to the system console.",
+ "items": {"type": "string"},
+ "uniqueItems": true
+ },
+ "ssh_fp_console_blacklist": {
+ "type": "array",
+ "description": "Avoid printing matching SSH fingerprints to the system console.",
+ "items": {"type": "string"},
+ "uniqueItems": true
+ }
+ }
+ },
+ "cc_landscape": {
+ "type": "object",
+ "properties": {
+ "landscape": {
+ "type": "object",
+ "required": ["client"],
+ "properties": {
+ "client": {
+ "type": "object",
+ "properties": {
+ "url": {
+ "type": "string",
+ "default": "https://landscape.canonical.com/message-system",
+ "description": "The Landscape server URL to connect to. Default: ``https://landscape.canonical.com/message-system``."
+ },
+ "ping_url": {
+ "type": "string",
+ "default": "https://landscape.canonical.com/ping",
+ "description": "The URL to perform lightweight exchange initiation with. Default: ``https://landscape.canonical.com/ping``."
+ },
+ "data_path": {
+ "type": "string",
+ "default": "/var/lib/landscape/client",
+ "description": "The directory to store data files in. Default: ``/var/lib/land‐scape/client/``."
+ },
+ "log_level": {
+ "type": "string",
+ "default": "info",
+ "enum": ["debug", "info", "warning", "error", "critical"],
+ "description": "The log level for the client. Default: ``info``."
+ },
+ "computer_tite": {
+ "type": "string",
+ "description": "The title of this computer."
+ },
+ "account_name": {
+ "type": "string",
+ "description": "The account this computer belongs to."
+ },
+ "registration_key": {
+ "type": "string",
+ "description": "The account-wide key used for registering clients."
+ },
+ "tags": {
+ "type": "string",
+ "pattern": "^[-_0-9a-zA-Z]+(,[-_0-9a-zA-Z]+)*$",
+ "description": "Comma separated list of tag names to be sent to the server."
+ },
+ "http_proxy": {
+ "type": "string",
+ "description": "The URL of the HTTP proxy, if one is needed."
+ },
+ "https_proxy": {
+ "type": "string",
+ "description": "The URL of the HTTPS proxy, if one is needed."
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "cc_locale": {
+ "properties": {
+ "locale": {
+ "type": "string",
+ "description": "The locale to set as the system's locale (e.g. ar_PS)"
+ },
+ "locale_configfile": {
+ "type": "string",
+ "description": "The file in which to write the locale configuration (defaults to the distro's default location)"
+ }
+ }
+ },
+ "cc_lxd": {
+ "type": "object",
+ "properties": {
+ "lxd": {
+ "type": "object",
+ "minProperties": 1,
+ "properties": {
+ "init": {
+ "type": "object",
+ "properties": {
+ "network_address": {
+ "type": "string",
+ "description": "IP address for LXD to listen on"
+ },
+ "network_port": {
+ "type": "integer",
+ "description": "Network port to bind LXD to."
+ },
+ "storage_backend": {
+ "type": "string",
+ "enum": ["zfs", "dir"],
+ "default": "dir",
+ "description": "Storage backend to use. Default: ``dir``."
+ },
+ "storage_create_device": {
+ "type": "string",
+ "description": "Setup device based storage using DEVICE"
+ },
+ "storage_create_loop": {
+ "type": "integer",
+ "description": "Setup loop based storage with SIZE in GB"
+ },
+ "storage_pool": {
+ "type": "string",
+ "description": "Name of storage pool to use or create"
+ },
+ "trust_password": {
+ "type": "string",
+ "description": "The password required to add new clients"
+ }
+ }
+ },
+ "bridge": {
+ "type": "object",
+ "required": ["mode"],
+ "properties": {
+ "mode": {
+ "type": "string",
+ "description": "Whether to setup LXD bridge, use an existing bridge by ``name`` or create a new bridge. `none` will avoid bridge setup, `existing` will configure lxd to use the bring matching ``name`` and `new` will create a new bridge.",
+ "enum": ["none", "existing", "new"]
+ },
+ "name": {
+ "type": "string",
+ "description": "Name of the LXD network bridge to attach or create. Default: ``lxdbr0``.",
+ "default": "lxdbr0"
+ },
+ "ipv4_address": {
+ "type": "string",
+ "description": "IPv4 address for the bridge. If set, ``ipv4_netmask`` key required."
+ },
+ "ipv4_netmask": {
+ "type": "integer",
+ "description": "Prefix length for the ``ipv4_address`` key. Required when ``ipv4_address`` is set."
+ },
+ "ipv4_dhcp_first": {
+ "type": "string",
+ "description": "First IPv4 address of the DHCP range for the network created. This value will combined with ``ipv4_dhcp_last`` key to set LXC ``ipv4.dhcp.ranges``."
+ },
+ "ipv4_dhcp_last": {
+ "type": "string",
+ "description": "Last IPv4 address of the DHCP range for the network created. This value will combined with ``ipv4_dhcp_first`` key to set LXC ``ipv4.dhcp.ranges``."
+ },
+ "ipv4_dhcp_leases": {
+ "type": "integer",
+ "description": "Number of DHCP leases to allocate within the range. Automatically calculated based on `ipv4_dhcp_first` and `ipv4_dchp_last` when unset."
+ },
+ "ipv4_nat": {
+ "type": "boolean",
+ "default": false,
+ "description": "Set ``true`` to NAT the IPv4 traffic allowing for a routed IPv4 network. Default: ``false``."
+ },
+ "ipv6_address": {
+ "type": "string",
+ "description": "IPv6 address for the bridge (CIDR notation). When set, ``ipv6_netmask`` key is required. When absent, no IPv6 will be configured."
+ },
+ "ipv6_netmask": {
+ "type": "integer",
+ "description": "Prefix length for ``ipv6_address`` provided. Required when ``ipv6_address`` is set."
+ },
+ "ipv6_nat": {
+ "type": "boolean",
+ "default": false,
+ "description": "Whether to NAT. Default: ``false``."
+ },
+ "domain": {
+ "type": "string",
+ "description": "Domain to advertise to DHCP clients and use for DNS resolution."
+ }
+ }
+ }
+ },
+ "additionalProperties": false
+ }
+ }
+ },
+ "cc_mcollective": {
+ "type": "object",
+ "properties": {
+ "mcollective": {
+ "type": "object",
+ "properties": {
+ "conf": {
+ "type": "object",
+ "properties": {
+ "public-cert": {
+ "type": "string",
+ "description": "Optional value of server public certificate which will be written to ``/etc/mcollective/ssl/server-public.pem``"
+ },
+ "private-cert": {
+ "type": "string",
+ "description": "Optional value of server private certificate which will be written to ``/etc/mcollective/ssl/server-private.pem``"
+ }
+ },
+ "patternProperties": {
+ "^.+$": {
+ "description": "Optional config key: value pairs which will be appended to ``/etc/mcollective/server.cfg``.",
+ "oneOf": [
+ {"type": "boolean"},
+ {"type": "integer"},
+ {"type": "string"}
+ ]
+ }
+ }
+ }
+ },
+ "additionalProperties": false
+ }
+ }
+ },
+ "cc_migrator": {
+ "type": "object",
+ "properties": {
+ "migrate": {
+ "type": "boolean",
+ "default": true,
+ "description": "Whether to migrate legacy cloud-init semaphores to new format. Default: ``true``"
+ }
+ }
+ },
+ "cc_mounts": {
+ "type": "object",
+ "properties": {
+ "mounts": {
+ "type": "array",
+ "items": {
+ "type": "array",
+ "items": {"type": "string"},
+ "minItems": 1,
+ "maxItems": 6
+ },
+ "description": "List of lists. Each inner list entry is a list of ``/etc/fstab`` mount declarations of the format: [ fs_spec, fs_file, fs_vfstype, fs_mntops, fs-freq, fs_passno ]. A mount declaration with less than 6 items will get remaining values from ``mount_default_fields``. A mount declaration with only `fs_spec` and no `fs_file` mountpoint will be skipped.",
+ "minItems": 1
+ },
+ "mount_default_fields": {
+ "type": "array",
+ "description": "Default mount configuration for any mount entry with less than 6 options provided. When specified, 6 items are required and represent ``/etc/fstab`` entries. Default: ``defaults,nofail,x-systemd.requires=cloud-init.service,_netdev``",
+ "default": [null, null, "auto", "defaults,nofail,x-systemd.requires=cloud-init.service", "0", "2"],
+ "items": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "null"}
+ ]
+ },
+ "minItems": 6,
+ "maxItems": 6
+ },
+ "swap": {
+ "type": "object",
+ "properties": {
+ "filename": {
+ "type": "string",
+ "description": "Path to the swap file to create"
+ },
+ "size": {
+ "description": "The size in bytes of the swap file, 'auto' or a human-readable size abbreviation of the format <float_size><units> where units are one of B, K, M, G or T.",
+ "oneOf": [
+ {"enum": ["auto"]},
+ {"type": "integer"},
+ {"type": "string", "pattern": "^([0-9]+)?\\.?[0-9]+[BKMGT]$"}
+ ]
+ },
+ "maxsize": {
+ "oneOf": [
+ {"type": "integer"},
+ {"type": "string", "pattern": "^([0-9]+)?\\.?[0-9]+[BKMGT]$"}
+ ],
+ "description": "The maxsize in bytes of the swap file"
+ }
+ },
+ "additionalProperties": false
+ }
+ }
+ },
+ "cc_ntp": {
+ "type": "object",
+ "properties": {
+ "ntp": {
+ "type": ["null", "object"],
+ "properties": {
+ "pools": {
+ "type": "array",
+ "items": {
+ "type": "string",
+ "format": "hostname"
+ },
+ "uniqueItems": true,
+ "description": "List of ntp pools. If both pools and servers are\nempty, 4 default pool servers will be provided of\nthe format ``{0-3}.{distro}.pool.ntp.org``. NOTE:\nfor Alpine Linux when using the Busybox NTP client\nthis setting will be ignored due to the limited\nfunctionality of Busybox's ntpd."
+ },
+ "servers": {
+ "type": "array",
+ "items": {
+ "type": "string",
+ "format": "hostname"
+ },
+ "uniqueItems": true,
+ "description": "List of ntp servers. If both pools and servers are\nempty, 4 default pool servers will be provided with\nthe format ``{0-3}.{distro}.pool.ntp.org``."
+ },
+ "ntp_client": {
+ "type": "string",
+ "default": "auto",
+ "description": "Name of an NTP client to use to configure system NTP.\nWhen unprovided or 'auto' the default client preferred\nby the distribution will be used. The following\nbuilt-in client names can be used to override existing\nconfiguration defaults: chrony, ntp, ntpdate,\nsystemd-timesyncd."
+ },
+ "enabled": {
+ "type": "boolean",
+ "default": true,
+ "description": "Attempt to enable ntp clients if set to True. If set\nto False, ntp client will not be configured or\ninstalled"
+ },
+ "config": {
+ "description": "Configuration settings or overrides for the\n``ntp_client`` specified.",
+ "type": "object",
+ "properties": {
+ "confpath": {
+ "type": "string",
+ "description": "The path to where the ``ntp_client``\nconfiguration is written."
+ },
+ "check_exe": {
+ "type": "string",
+ "description": "The executable name for the ``ntp_client``.\nFor example, ntp service ``check_exe`` is\n'ntpd' because it runs the ntpd binary."
+ },
+ "packages": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ },
+ "uniqueItems": true,
+ "description": "List of packages needed to be installed for the\nselected ``ntp_client``."
+ },
+ "service_name": {
+ "type": "string",
+ "description": "The systemd or sysvinit service name used to\nstart and stop the ``ntp_client``\nservice."
+ },
+ "template": {
+ "type": "string",
+ "description": "Inline template allowing users to define their\nown ``ntp_client`` configuration template.\nThe value must start with '## template:jinja'\nto enable use of templating support.\n"
+ }
+ },
+ "minProperties": 1,
+ "additionalProperties": false
+ }
+ },
+ "additionalProperties": false
+ }
+ }
+ },
+ "cc_package_update_upgrade_install": {
+ "type": "object",
+ "properties": {
+ "packages": {
+ "type": "array",
+ "description": "A list of packages to install. Each entry in the list can be either a package name or a list with two entries, the first being the package name and the second being the specific package version to install.",
+ "items": {
+ "oneOf": [
+ {"type": "array", "items": {"type": "string"}, "minItems": 2, "maxItems": 2},
+ {"type": "string"}
+ ]
+ },
+ "minItems": 1
+ },
+ "package_update": {
+ "type": "boolean",
+ "default": false,
+ "description": "Set ``true`` to update packages. Happens before upgrade or install. Default: ``false``"
+ },
+ "package_upgrade": {
+ "type": "boolean",
+ "default": false,
+ "description": "Set ``true`` to upgrade packages. Happens before install. Default: ``false``"
+ },
+ "package_reboot_if_required": {
+ "type": "boolean",
+ "default": false,
+ "description": "Set ``true`` to reboot the system if required by presence of `/var/run/reboot-required`. Default: ``false``"
+ },
+ "apt_update": {
+ "type": "boolean",
+ "default": false,
+ "description": "DEPRECATED. Use ``package_update``. Default: ``false``",
+ "deprecated": true
+ },
+ "apt_upgrade": {
+ "type": "boolean",
+ "default": false,
+ "description": "DEPRECATED. Use ``package_upgrade``. Default: ``false``",
+ "deprecated": true
+ },
+ "apt_reboot_if_required": {
+ "type": "boolean",
+ "default": false,
+ "description": "DEPRECATED. Use ``package_reboot_if_required``. Default: ``false``",
+ "deprecated": true
+ }
+ }
+ },
+ "cc_phone_home": {
+ "type": "object",
+ "properties": {
+ "phone_home": {
+ "type": "object",
+ "additionalProperties": false,
+ "required": ["url"],
+ "properties": {
+ "url": {
+ "type": "string",
+ "format": "uri",
+ "description": "The URL to send the phone home data to."
+ },
+ "post": {
+ "description": "A list of keys to post or ``all``. Default: ``all``",
+ "oneOf": [
+ {"enum": ["all"]},
+ {
+ "type": "array",
+ "items": {
+ "type": "string",
+ "enum": [
+ "pub_key_dsa",
+ "pub_key_rsa",
+ "pub_key_ecdsa",
+ "pub_key_ed25519",
+ "instance_id",
+ "hostname",
+ "fqdn"
+ ]
+ }
+ }
+ ]
+ },
+ "tries": {
+ "type": "integer",
+ "description": "The number of times to try sending the phone home data. Default: ``10``",
+ "default": 10
+ }
+ }
+ }
+ }
+ },
+ "cc_power_state_change": {
+ "type": "object",
+ "properties": {
+ "power_state": {
+ "type": "object",
+ "additionalProperties": false,
+ "required": ["mode"],
+ "properties": {
+ "delay": {
+ "description": "Time in minutes to delay after cloud-init has finished. Can be ``now`` or an integer specifying the number of minutes to delay. Default: ``now``",
+ "default": "now",
+ "oneOf": [
+ {"type": "integer", "minimum": 0},
+ {"type": "string", "pattern": "^\\+?[0-9]+$"},
+ {"enum": ["now"]}
+ ]
+ },
+ "mode": {
+ "description": "Must be one of ``poweroff``, ``halt``, or ``reboot``.",
+ "type": "string",
+ "enum": ["poweroff", "reboot", "halt"]
+ },
+ "message": {
+ "description": "Optional message to display to the user when the system is powering off or rebooting.",
+ "type": "string"
+ },
+ "timeout": {
+ "description": "Time in seconds to wait for the cloud-init process to finish before executing shutdown. Default: ``30``",
+ "type": "integer",
+ "default": 30
+ },
+ "condition": {
+ "description": "Apply state change only if condition is met. May be boolean true (always met), false (never met), or a command string or list to be executed. For command formatting, see the documentation for ``cc_runcmd``. If exit code is 0, condition is met, otherwise not. Default: ``true``",
+ "default": true,
+ "oneOf": [
+ {"type": "string"},
+ {"type": "boolean"},
+ {"type": "array"}
+ ]
+ }
+ }
+ }
+ }
+ },
+ "cc_puppet": {
+ "type": "object",
+ "properties": {
+ "puppet": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "install": {
+ "type": "boolean",
+ "default": true,
+ "description": "Whether or not to install puppet. Setting to ``false`` will result in an error if puppet is not already present on the system. Default: ``true``"
+ },
+ "version": {
+ "type": "string",
+ "description": "Optional version to pass to the installer script or package manager. If unset, the latest version from the repos will be installed."
+ },
+ "install_type": {
+ "type": "string",
+ "description": "Valid values are ``packages`` and ``aio``. Agent packages from the puppetlabs repositories can be installed by setting ``aio``. Based on this setting, the default config/SSL/CSR paths will be adjusted accordingly. Default: ``packages``",
+ "enum": ["packages", "aio"],
+ "default": "packages"
+ },
+ "collection": {
+ "type": "string",
+ "description": "Puppet collection to install if ``install_type`` is ``aio``. This can be set to one of ``puppet`` (rolling release), ``puppet6``, ``puppet7`` (or their nightly counterparts) in order to install specific release streams."
+ },
+ "aio_install_url": {
+ "type": "string",
+ "description": "If ``install_type`` is ``aio``, change the url of the install script."
+ },
+ "cleanup": {
+ "type": "boolean",
+ "default": true,
+ "description": "Whether to remove the puppetlabs repo after installation if ``install_type`` is ``aio`` Default: ``true``"
+ },
+ "conf_file": {
+ "type": "string",
+ "description": "The path to the puppet config file. Default depends on ``install_type``"
+ },
+ "ssl_dir": {
+ "type": "string",
+ "description": "The path to the puppet SSL directory. Default depends on ``install_type``"
+ },
+ "csr_attributes_path": {
+ "type": "string",
+ "description": "The path to the puppet csr attributes file. Default depends on ``install_type``"
+ },
+ "package_name": {
+ "type": "string",
+ "description": "Name of the package to install if ``install_type`` is ``packages``. Default: ``puppet``"
+ },
+ "exec": {
+ "type": "boolean",
+ "default": false,
+ "description": "Whether or not to run puppet after configuration finishes. A single manual run can be triggered by setting ``exec`` to ``true``, and additional arguments can be passed to ``puppet agent`` via the ``exec_args`` key (by default the agent will execute with the ``--test`` flag). Default: ``false``"
+ },
+ "exec_args": {
+ "type": "array",
+ "description": "A list of arguments to pass to 'puppet agent' if 'exec' is true Default: ``['--test']``",
+ "items": {
+ "type": "string"
+ }
+ },
+ "start_service": {
+ "type": "boolean",
+ "default": true,
+ "description": "By default, the puppet service will be automatically enabled after installation and set to automatically start on boot. To override this in favor of manual puppet execution set ``start_service`` to ``false``"
+ },
+ "conf": {
+ "type": "object",
+ "description": "Every key present in the conf object will be added to puppet.conf. As such, section names should be one of: ``main``, ``server``, ``agent`` or ``user`` and keys should be valid puppet configuration options. The configuration is specified as a dictionary containing high-level ``<section>`` keys and lists of ``<key>=<value>`` pairs within each section. The ``certname`` key supports string substitutions for ``%i`` and ``%f``, corresponding to the instance id and fqdn of the machine respectively.\n\n``ca_cert`` is a special case. It won't be added to puppet.conf. It holds the puppetserver certificate in pem format. It should be a multi-line string (using the | yaml notation for multi-line strings).",
+ "additionalProperties": false,
+ "properties": {
+ "main": {
+ "type": "object"
+ },
+ "server": {
+ "type": "object"
+ },
+ "agent": {
+ "type": "object"
+ },
+ "user": {
+ "type": "object"
+ },
+ "ca_cert": {
+ "type": "string"
+ }
+ }
+ },
+ "csr_attributes": {
+ "type": "object",
+ "description": "create a ``csr_attributes.yaml`` file for CSR attributes and certificate extension requests. See https://puppet.com/docs/puppet/latest/config_file_csr_attributes.html",
+ "additionalProperties": false,
+ "properties": {
+ "custom_attributes": {
+ "type": "object"
+ },
+ "extension_requests": {
+ "type": "object"
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "cc_resizefs": {
+ "type": "object",
+ "properties": {
+ "resize_rootfs": {
+ "enum": [true, false, "noblock"],
+ "description": "Whether to resize the root partition. ``noblock`` will resize in the background. Default: ``true``"
+ }
+ }
+ },
+ "cc_resolv_conf": {
+ "type": "object",
+ "properties": {
+ "manage_resolv_conf": {
+ "type": "boolean",
+ "default": false,
+ "description": "Whether to manage the resolv.conf file. ``resolv_conf`` block will be ignored unless this is set to ``true``. Default: ``false``"
+ },
+ "resolv_conf": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "nameservers": {
+ "type": "array",
+ "description": "A list of nameservers to use to be added as ``nameserver`` lines"
+ },
+ "searchdomains": {
+ "type": "array",
+ "description": "A list of domains to be added ``search`` line"
+ },
+ "domain": {
+ "type": "string",
+ "description": "The domain to be added as ``domain`` line"
+ },
+ "sortlist": {
+ "type": "array",
+ "description": "A list of IP addresses to be added to ``sortlist`` line"
+ },
+ "options": {
+ "type": "object",
+ "description": "Key/value pairs of options to go under ``options`` heading. A unary option should be specified as ``true``"
+ }
+ }
+ }
+ }
+ },
+ "cc_rh_subscription": {
+ "type": "object",
+ "properties": {
+ "rh_subscription": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "username": {
+ "type": "string",
+ "description": "The username to use. Must be used with password. Should not be used with ``activation-key`` or ``org``"
+ },
+ "password": {
+ "type": "string",
+ "description": "The password to use. Must be used with username. Should not be used with ``activation-key`` or ``org``"
+ },
+ "activation-key": {
+ "type": "string",
+ "description": "The activation key to use. Must be used with ``org``. Should not be used with ``username`` or ``password``"
+ },
+ "org": {
+ "type": "integer",
+ "description": "The organization number to use. Must be used with ``activation-key``. Should not be used with ``username`` or ``password``"
+ },
+ "auto-attach": {
+ "type": "boolean",
+ "description": "Whether to attach subscriptions automatically"
+ },
+ "service-level": {
+ "type": "string",
+ "description": "The service level to use when subscribing to RH repositories. ``auto-attach`` must be true for this to be used"
+ },
+ "add-pool": {
+ "type": "array",
+ "description": "A list of pools ids add to the subscription",
+ "items": {
+ "type": "string"
+ }
+ },
+ "enable-repo": {
+ "type": "array",
+ "description": "A list of repositories to enable",
+ "items": {
+ "type": "string"
+ }
+ },
+ "disable-repo": {
+ "type": "array",
+ "description": "A list of repositories to disable",
+ "items": {
+ "type": "string"
+ }
+ },
+ "rhsm-baseurl": {
+ "type": "string",
+ "description": "Sets the baseurl in ``/etc/rhsm/rhsm.conf``"
+ },
+ "server-hostname": {
+ "type": "string",
+ "description": "Sets the serverurl in ``/etc/rhsm/rhsm.conf``"
+ }
+ }
+ }
+ }
+ },
+ "cc_rsyslog": {
+ "type": "object",
+ "properties": {
+ "rsyslog": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "config_dir": {
+ "type": "string",
+ "description": "The directory where rsyslog configuration files will be written. Default: ``/etc/rsyslog.d``"
+ },
+ "config_filename": {
+ "type": "string",
+ "description": "The name of the rsyslog configuration file. Default: ``20-cloud-config.conf``"
+ },
+ "configs": {
+ "type": "array",
+ "description": "Each entry in ``configs`` is either a string or an object. Each config entry contains a configuration string and a file to write it to. For config entries that are an object, ``filename`` sets the target filename and ``content`` specifies the config string to write. For config entries that are only a string, the string is used as the config string to write. If the filename to write the config to is not specified, the value of the ``config_filename`` key is used. A file with the selected filename will be written inside the directory specified by ``config_dir``.",
+ "items": {
+ "oneOf": [
+ {"type": "string"},
+ {
+ "type": "object",
+ "additionalProperties": false,
+ "required": ["content"],
+ "properties": {
+ "filename": {
+ "type": "string"
+ },
+ "content": {
+ "type": "string"
+ }
+ }
+ }
+ ]
+ }
+ },
+ "remotes": {
+ "type": "object",
+ "description": "Each key is the name for an rsyslog remote entry. Each value holds the contents of the remote config for rsyslog. The config consists of the following parts:\n\n- filter for log messages (defaults to ``*.*``)\n\n- optional leading ``@`` or ``@@``, indicating udp and tcp respectively (defaults to ``@``, for udp)\n\n- ipv4 or ipv6 hostname or address. ipv6 addresses must be in ``[::1]`` format, (e.g. ``@[fd00::1]:514``)\n\n- optional port number (defaults to ``514``)\n\nThis module will provide sane defaults for any part of the remote entry that is not specified, so in most cases remote hosts can be specified just using ``<name>: <address>``."
+ },
+ "service_reload_command": {
+ "description": "The command to use to reload the rsyslog service after the config has been updated. If this is set to ``auto``, then an appropriate command for the distro will be used. This is the default behavior. To manually set the command, use a list of command args (e.g. ``[systemctl, restart, rsyslog]``).",
+ "oneOf": [
+ {"enum": ["auto"]},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ }
+ }
+ }
+ }
+ },
+ "cc_runcmd": {
+ "type": "object",
+ "properties": {
+ "runcmd": {
+ "type": "array",
+ "items": {
+ "oneOf": [
+ {"type": "array", "items": {"type": "string"}},
+ {"type": "string"},
+ {"type": "null"}
+ ]
+ },
+ "minItems": 1
+ }
+ }
+ },
+ "cc_salt_minion": {
+ "type": "object",
+ "properties": {
+ "salt_minion": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "pkg_name": {
+ "type": "string",
+ "description": "Package name to install. Default: ``salt-minion``"
+ },
+ "service_name": {
+ "type": "string",
+ "description": "Service name to enable. Default: ``salt-minion``"
+ },
+ "config_dir": {
+ "type": "string",
+ "description": "Directory to write config files to. Default: ``/etc/salt``"
+ },
+ "conf": {
+ "type": "object",
+ "description": "Configuration to be written to `config_dir`/minion"
+ },
+ "grains": {
+ "type": "object",
+ "description": "Configuration to be written to `config_dir`/grains"
+ },
+ "public_key": {
+ "type": "string",
+ "description": "Public key to be used by the salt minion"
+ },
+ "private_key": {
+ "type": "string",
+ "description": "Private key to be used by salt minion"
+ },
+ "pki_dir": {
+ "type": "string",
+ "description": "Directory to write key files. Default: `config_dir`/pki/minion"
+ }
+ }
+ }
+ }
+ },
+ "cc_scripts_vendor": {
+ "type": "object",
+ "properties": {
+ "vendor_data": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "enabled": {
+ "type": ["boolean", "string"],
+ "description": "Whether vendor data is enabled or not. Use of string for this value is DEPRECATED. Default: ``true``"
+ },
+ "prefix": {
+ "type": ["array", "string"],
+ "items": {"type": ["string", "integer"]},
+ "description": "The command to run before any vendor scripts. Its primary use case is for profiling a script, not to prevent its run"
+ }
+ }
+ }
+ }
+ },
+ "cc_seed_random": {
+ "type": "object",
+ "properties": {
+ "random_seed": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "file": {
+ "type": "string",
+ "default": "/dev/urandom",
+ "description": "File to write random data to. Default: ``/dev/urandom``"
+ },
+ "data": {
+ "type": "string",
+ "description": "This data will be written to ``file`` before data from the datasource. When using a multiline value or specifying binary data, be sure to follow yaml syntax and use the ``|`` and ``!binary`` yaml format specifiers when appropriate"
+ },
+ "encoding": {
+ "type": "string",
+ "default": "raw",
+ "enum": ["raw", "base64", "b64", "gzip", "gz"],
+ "description": "Used to decode ``data`` provided. Allowed values are ``raw``, ``base64``, ``b64``, ``gzip``, or ``gz``. Default: ``raw``"
+ },
+ "command": {
+ "type": "array",
+ "items": {"type": "string"},
+ "description": "Execute this command to seed random. The command will have RANDOM_SEED_FILE in its environment set to the value of ``file`` above."
+ },
+ "command_required": {
+ "type": "boolean",
+ "default": false,
+ "description": "If true, and ``command`` is not available to be run then an exception is raised and cloud-init will record failure. Otherwise, only debug error is mentioned. Default: ``false``"
+ }
+ }
+ }
+ }
+ },
+ "cc_set_hostname": {
+ "type": "object",
+ "properties": {
+ "preserve_hostname": {
+ "type": "boolean",
+ "default": false,
+ "description": "If true, the hostname will not be changed. Default: ``false``"
+ },
+ "hostname": {
+ "type": "string",
+ "description": "The hostname to set"
+ },
+ "fqdn": {
+ "type": "string",
+ "description": "The fully qualified domain name to set"
+ },
+ "prefer_fqdn_over_hostname": {
+ "type": "boolean",
+ "description": "If true, the fqdn will be used if it is set. If false, the hostname will be used. If unset, the result is distro-dependent"
+ }
+ }
+ },
+ "cc_set_passwords": {
+ "type": "object",
+ "properties": {
+ "ssh_pwauth": {
+ "oneOf": [
+ {"type": "boolean"},
+ {"type": "string"}
+ ],
+ "description": "Sets whether or not to accept password authentication. ``true`` will enable password auth. ``false`` will disable. Default is to leave the value unchanged. Use of non-boolean values for this field is DEPRECATED and will result in an error in a future version of cloud-init."
+ },
+ "chpasswd": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "expire": {
+ "type": "boolean",
+ "default": true,
+ "description": "Whether to expire all user passwords such that a password will need to be reset on the user's next login. Default: ``true``"
+ },
+ "list": {
+ "oneOf": [
+ {"type": "string"},
+ {
+ "type": "array",
+ "items": {
+ "type": "string",
+ "pattern": "^.+:.+$"
+ }}
+ ],
+ "minItems": 1,
+ "description": "List of ``username:password`` pairs. Each user will have the corresponding password set. A password can be randomly generated by specifying ``RANDOM`` or ``R`` as a user's password. A hashed password, created by a tool like ``mkpasswd``, can be specified. A regex (``r'\\$(1|2a|2y|5|6)(\\$.+){2}'``) is used to determine if a password value should be treated as a hash.\n\nUse of a multiline string for this field is DEPRECATED and will result in an error in a future version of cloud-init."
+ }
+ }
+ },
+ "password": {
+ "type": "string",
+ "description": "Set the default user's password. Ignored if ``chpasswd`` ``list`` is used"
+ }
+ }
+ },
+ "cc_snap": {
+ "type": "object",
+ "properties": {
+ "snap": {
+ "type": "object",
+ "additionalProperties": false,
+ "minProperties": 1,
+ "properties": {
+ "assertions": {
+ "type": ["object", "array"],
+ "description": "Properly-signed snap assertions which will run before and snap ``commands``.",
+ "items": {"type": "string"},
+ "additionalItems": false,
+ "minItems": 1,
+ "minProperties": 1,
+ "uniqueItems": true,
+ "additionalProperties": {"type": "string"}
+ },
+ "commands": {
+ "type": ["object", "array"],
+ "description": "Snap commands to run on the target system",
+ "items": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "additionalItems": false,
+ "minItems": 1,
+ "minProperties": 1,
+ "additionalProperties": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ }
+ }
+ }
+ }
+ }
+ },
+ "cc_spacewalk": {
+ "type": "object",
+ "properties": {
+ "spacewalk": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "server": {
+ "type": "string",
+ "description": "The Spacewalk server to use"
+ },
+ "proxy": {
+ "type": "string",
+ "description": "The proxy to use when connecting to Spacewalk"
+ },
+ "activation_key": {
+ "type": "string",
+ "description": "The activation key to use when registering with Spacewalk"
+ }
+ }
+ }
+ }
+ },
+ "cc_ssh_authkey_fingerprints": {
+ "type": "object",
+ "properties": {
+ "no_ssh_fingerprints": {
+ "type": "boolean",
+ "default": false,
+ "description": "If true, SSH fingerprints will not be written. Default: ``false``"
+ },
+ "authkey_hash": {
+ "type": "string",
+ "default": "sha256",
+ "description": "The hash type to use when generating SSH fingerprints. Default: ``sha256``"
+ }
+ }
+ },
+ "cc_ssh_import_id": {
+ "type": "object",
+ "properties": {
+ "ssh_import_id": {
+ "type": "array",
+ "items": {
+ "type": "string",
+ "description": "The SSH public key to import"
+ }
+ }
+ }
+ },
+ "cc_ssh": {
+ "type": "object",
+ "properties": {
+ "ssh_keys": {
+ "type": "object",
+ "description": "A dictionary entries for the public and private host keys of each desired key type. Entries in the ``ssh_keys`` config dict should have keys in the format ``<key type>_private``, ``<key type>_public``, and, optionally, ``<key type>_certificate``, e.g. ``rsa_private: <key>``, ``rsa_public: <key>``, and ``rsa_certificate: <key>``. Not all key types have to be specified, ones left unspecified will not be used. If this config option is used, then separate keys will not be automatically generated. In order to specify multiline private host keys and certificates, use yaml multiline syntax.",
+ "patternProperties": {
+ "^(dsa|ecdsa|ed25519|rsa)_(public|private|certificate)$": {
+ "label": "<key_type>",
+ "type": "string"
+ }
+ },
+ "additionalProperties": false
+ },
+ "ssh_authorized_keys": {
+ "type": "array",
+ "minItems": 1,
+ "description": "The SSH public keys to add ``.ssh/authorized_keys`` in the default user's home directory",
+ "items": {
+ "type": "string"
+ }
+ },
+ "ssh_deletekeys" : {
+ "type": "boolean",
+ "default": true,
+ "description": "Remove host SSH keys. This prevents re-use of a private host key from an image with default host SSH keys. Default: ``true``"
+ },
+ "ssh_genkeytypes": {
+ "type": "array",
+ "description": "The SSH key types to generate. Default: ``[rsa, dsa, ecdsa, ed25519]``",
+ "default": ["dsa", "ecdsa", "ed25519", "rsa"],
+ "minItems": 1,
+ "items": {
+ "type": "string",
+ "enum": ["dsa", "ecdsa", "ed25519", "rsa"]
+ }
+ },
+ "disable_root": {
+ "type": "boolean",
+ "default": true,
+ "description": "Disable root login. Default: ``true``"
+ },
+ "disable_root_opts": {
+ "type": "string",
+ "default": "``no-port-forwarding,no-agent-forwarding,no-X11-forwarding,command=\"echo 'Please login as the user \\\"$USER\\\" rather than the user \\\"$DISABLE_USER\\\".';echo;sleep 10;exit 142\"``",
+ "description": "Disable root login options. If ``disable_root_opts`` is specified and contains the string ``$USER``, it will be replaced with the username of the default user. Default: ``no-port-forwarding,no-agent-forwarding,no-X11-forwarding,command=\"echo 'Please login as the user \\\"$USER\\\" rather than the user \\\"$DISABLE_USER\\\".';echo;sleep 10;exit 142\"``"
+ },
+ "allow_public_ssh_keys": {
+ "type": "boolean",
+ "default": true,
+ "description": "If ``true``, will import the public SSH keys from the datasource's metadata to the user's ``.ssh/authorized_keys`` file. Default: ``true``"
+ },
+ "ssh_quiet_keygen": {
+ "type": "boolean",
+ "default": false,
+ "description": "If ``true``, will suppress the output of key generation to the console. Default: ``false``"
+ },
+ "ssh_publish_hostkeys": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "enabled": {
+ "type": "boolean",
+ "default": true,
+ "description": "If true, will read host keys from ``/etc/ssh/*.pub`` and publish them to the datasource (if supported). Default: ``true``"
+ },
+ "blacklist": {
+ "type": "array",
+ "description": "The SSH key types to ignore when publishing. Default: ``[dsa]``",
+ "items": {
+ "type": "string"
+ }
+ }
+ }
+ }
+ }
+ },
+ "cc_timezone": {
+ "type": "object",
+ "properties": {
+ "timezone": {
+ "type": "string",
+ "description": "The timezone to use as represented in /usr/share/zoneinfo"
+ }
+ }
+ },
+ "cc_ubuntu_advantage": {
+ "type": "object",
+ "properties": {
+ "ubuntu_advantage": {
+ "type": "object",
+ "properties": {
+ "enable": {
+ "type": "array",
+ "items": {"type": "string"},
+ "description": "Optional list of ubuntu-advantage services to enable. Any of: cc-eal, cis, esm-infra, fips, fips-updates, livepatch. By default, a given contract token will automatically enable a number of services, use this list to supplement which services should additionally be enabled. Any service unavailable on a given Ubuntu release or unentitled in a given contract will remain disabled."
+ },
+ "token": {
+ "type": "string",
+ "description": "Required contract token obtained from https://ubuntu.com/advantage to attach."
+ }
+ },
+ "required": ["token"],
+ "additionalProperties": false
+ }
+ }
+ },
+ "cc_ubuntu_drivers": {
+ "type": "object",
+ "properties": {
+ "drivers": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "nvidia": {
+ "type": "object",
+ "additionalProperties": false,
+ "required": [
+ "license-accepted"
+ ],
+ "properties": {
+ "license-accepted": {
+ "type": "boolean",
+ "description": "Do you accept the NVIDIA driver license?"
+ },
+ "version": {
+ "type": "string",
+ "description": "The version of the driver to install (e.g. \"390\", \"410\"). Defaults to the latest version."
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "cc_update_etc_hosts": {
+ "type": "object",
+ "properties": {
+ "manage_etc_hosts": {
+ "default": false,
+ "description": "Whether to manage ``/etc/hosts`` on the system. If ``true``, render the hosts file using ``/etc/cloud/templates/hosts.tmpl`` replacing ``$hostname`` and ``$fdqn``. If ``localhost``, append a ``127.0.1.1`` entry that resolves from FQDN and hostname every boot. Default: ``false``. DEPRECATED value ``template`` will be dropped, use ``true`` instead.",
+ "enum": [true, false, "template", "localhost"]
+ },
+ "fqdn": {
+ "type": "string",
+ "description": "Optional fully qualified domain name to use when updating ``/etc/hosts``. Preferred over ``hostname`` if both are provided. In absence of ``hostname`` and ``fqdn`` in cloud-config, the ``local-hostname`` value will be used from datasource metadata."
+ },
+ "hostname": {
+ "type": "string",
+ "description": "Hostname to set when rendering ``/etc/hosts``. If ``fqdn`` is set, the hostname extracted from ``fqdn`` overrides ``hostname``."
+ }
+ }
+ },
+ "cc_update_hostname": {
+ "type": "object",
+ "properties": {
+ "preserve_hostname": {
+ "type": "boolean",
+ "default": false,
+ "description": "Do not update system hostname when ``true``. Default: ``false``."
+ },
+ "prefer_fqdn_over_hostname": {
+ "type": "boolean",
+ "default": null,
+ "description": "By default, it is distro-dependent whether cloud-init uses the short hostname or fully qualified domain name when both ``local-hostname` and ``fqdn`` are both present in instance metadata. When set ``true``, use fully qualified domain name if present as hostname instead of short hostname. When set ``false``, use ``hostname`` config value if present, otherwise fallback to ``fqdn``."
+ }
+ }
+ },
+ "cc_users_groups": {
+ "type": "object",
+ "properties": {
+ "groups": {
+ "type": ["string", "object", "array"],
+ "hidden": ["patternProperties"],
+ "$ref": "#/$defs/users_groups.groups_by_groupname",
+ "items": {
+ "type": ["string", "object"],
+ "$ref": "#/$defs/users_groups.groups_by_groupname"
+ },
+ "minItems": 1
+ },
+ "user": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "object", "$ref": "#/$defs/users_groups.user"}
+ ],
+ "description": "The ``user`` dictionary values override the ``default_user`` configuration from ``/etc/cloud/cloud.cfg``. The `user` dictionary keys supported for the default_user are the same as the ``users`` schema. DEPRECATED: string and types will be removed in a future release. Use ``users`` instead."
+ },
+ "users": {
+ "type": ["string", "array", "object"],
+ "items": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}},
+ {"type": "object", "$ref": "#/$defs/users_groups.user"}
+ ]
+ },
+ "minItems": 1
+ }
+ }
+ },
+ "cc_write_files": {
+ "type": "object",
+ "properties": {
+ "write_files": {
+ "type": "array",
+ "items": {
+ "type": "object",
+ "properties": {
+ "path": {
+ "type": "string",
+ "description": "Path of the file to which ``content`` is decoded and written"
+ },
+ "content": {
+ "type": "string",
+ "default": "",
+ "description": "Optional content to write to the provided ``path``. When content is present and encoding is not 'text/plain', decode the content prior to writing. Default: ``''``"
+ },
+ "owner": {
+ "type": "string",
+ "default": "root:root",
+ "description": "Optional owner:group to chown on the file. Default: ``root:root``"
+ },
+ "permissions": {
+ "type": "string",
+ "default": "0o644",
+ "description": "Optional file permissions to set on ``path`` represented as an octal string '0###'. Default: ``0o644``"
+ },
+ "encoding": {
+ "type": "string",
+ "default": "text/plain",
+ "enum": ["gz", "gzip", "gz+base64", "gzip+base64", "gz+b64", "gzip+b64", "b64", "base64", "text/plain"],
+ "description": "Optional encoding type of the content. Default is ``text/plain`` and no content decoding is performed. Supported encoding types are: gz, gzip, gz+base64, gzip+base64, gz+b64, gzip+b64, b64, base64"
+ },
+ "append": {
+ "type": "boolean",
+ "default": false,
+ "description": "Whether to append ``content`` to existing file if ``path`` exists. Default: ``false``."
+ },
+ "defer": {
+ "type": "boolean",
+ "default": false,
+ "description": "Defer writing the file until 'final' stage, after users were created, and packages were installed. Default: ``false``."
+ }
+ },
+ "required": ["path"],
+ "additionalProperties": false
+ },
+ "minItems": 1
+ }
+ }
+ },
+ "cc_yum_add_repo": {
+ "type": "object",
+ "properties": {
+ "yum_repo_dir": {
+ "type": "string",
+ "default": "/etc/yum.repos.d",
+ "description": "The repo parts directory where individual yum repo config files will be written. Default: ``/etc/yum.repos.d``"
+ },
+ "yum_repos": {
+ "type": "object",
+ "minProperties": 1,
+ "patternProperties": {
+ "^[0-9a-zA-Z -_]+$": {
+ "label": "<repo_name>",
+ "type": "object",
+ "description": "Object keyed on unique yum repo IDs. The key used will be used to write yum repo config files in ``yum_repo_dir``/<repo_key_id>.repo.",
+ "properties": {
+ "baseurl": {
+ "type": "string",
+ "format": "uri",
+ "description": "URL to the directory where the yum repository's 'repodata' directory lives"
+ },
+ "name": {
+ "type": "string",
+ "description": "Optional human-readable name of the yum repo."
+ },
+ "enabled": {
+ "type": "boolean",
+ "default": true,
+ "description": "Whether to enable the repo. Default: ``true``."
+ }
+ },
+ "patternProperties": {
+ "^[0-9a-zA-Z_]+$": {
+ "label": "<yum_config_option>",
+ "oneOf": [
+ {"type": "integer"},
+ {"type": "boolean"},
+ {"type": "string"}
+ ],
+ "description": "Any supported yum repository configuration options will be written to the yum repo config file. See: man yum.conf"
+ }
+ },
+ "required": ["baseurl"]
+ }
+ },
+ "additionalProperties": false
+ }
+ }
+ },
+ "cc_zypper_add_repo": {
+ "type": "object",
+ "properties": {
+ "zypper": {
+ "type": "object",
+ "properties": {
+ "repos": {
+ "type": "array",
+ "items": {
+ "type": "object",
+ "properties": {
+ "id": {
+ "type": "string",
+ "description": "The unique id of the repo, used when writing /etc/zypp/repos.d/<id>.repo."
+ },
+ "baseurl": {
+ "type": "string",
+ "format": "uri",
+ "description": "The base repositoy URL"
+ }
+ },
+ "required": [
+ "id",
+ "baseurl"
+ ],
+ "additionalProperties": true
+ },
+ "minItems": 1
+ },
+ "config": {
+ "type": "object",
+ "description": "Any supported zypo.conf key is written to ``/etc/zypp/zypp.conf``"
+ }
+ },
+ "minProperties": 1,
+ "additionalProperties": false
+ }
+ }
+ }
+ },
+ "allOf": [
+ { "$ref": "#/$defs/cc_apk_configure" },
+ { "$ref": "#/$defs/cc_apt_configure" },
+ { "$ref": "#/$defs/cc_apt_pipelining" },
+ { "$ref": "#/$defs/cc_bootcmd" },
+ { "$ref": "#/$defs/cc_byobu" },
+ { "$ref": "#/$defs/cc_ca_certs" },
+ { "$ref": "#/$defs/cc_chef" },
+ { "$ref": "#/$defs/cc_debug" },
+ { "$ref": "#/$defs/cc_disable_ec2_metadata" },
+ { "$ref": "#/$defs/cc_disk_setup" },
+ { "$ref": "#/$defs/cc_fan" },
+ { "$ref": "#/$defs/cc_final_message"},
+ { "$ref": "#/$defs/cc_growpart"},
+ { "$ref": "#/$defs/cc_grub_dpkg"},
+ { "$ref": "#/$defs/cc_install_hotplug"},
+ { "$ref": "#/$defs/cc_keyboard" },
+ { "$ref": "#/$defs/cc_keys_to_console" },
+ { "$ref": "#/$defs/cc_landscape" },
+ { "$ref": "#/$defs/cc_locale" },
+ { "$ref": "#/$defs/cc_lxd" },
+ { "$ref": "#/$defs/cc_mcollective" },
+ { "$ref": "#/$defs/cc_migrator" },
+ { "$ref": "#/$defs/cc_mounts" },
+ { "$ref": "#/$defs/cc_ntp" },
+ { "$ref": "#/$defs/cc_package_update_upgrade_install" },
+ { "$ref": "#/$defs/cc_phone_home" },
+ { "$ref": "#/$defs/cc_power_state_change"},
+ { "$ref": "#/$defs/cc_puppet"},
+ { "$ref": "#/$defs/cc_resizefs"},
+ { "$ref": "#/$defs/cc_resolv_conf"},
+ { "$ref": "#/$defs/cc_rh_subscription"},
+ { "$ref": "#/$defs/cc_rsyslog"},
+ { "$ref": "#/$defs/cc_runcmd"},
+ { "$ref": "#/$defs/cc_salt_minion"},
+ { "$ref": "#/$defs/cc_scripts_vendor"},
+ { "$ref": "#/$defs/cc_seed_random"},
+ { "$ref": "#/$defs/cc_set_hostname"},
+ { "$ref": "#/$defs/cc_set_passwords"},
+ { "$ref": "#/$defs/cc_snap"},
+ { "$ref": "#/$defs/cc_spacewalk"},
+ { "$ref": "#/$defs/cc_ssh_authkey_fingerprints"},
+ { "$ref": "#/$defs/cc_ssh_import_id"},
+ { "$ref": "#/$defs/cc_ssh"},
+ { "$ref": "#/$defs/cc_timezone"},
+ { "$ref": "#/$defs/cc_ubuntu_advantage"},
+ { "$ref": "#/$defs/cc_ubuntu_drivers"},
+ { "$ref": "#/$defs/cc_update_etc_hosts"},
+ { "$ref": "#/$defs/cc_update_hostname"},
+ { "$ref": "#/$defs/cc_users_groups"},
+ { "$ref": "#/$defs/cc_write_files"},
+ { "$ref": "#/$defs/cc_yum_add_repo"},
+ { "$ref": "#/$defs/cc_zypper_add_repo"}
+ ]
+}
diff --git a/cloudinit/config/schemas/versions.schema.cloud-config.json b/cloudinit/config/schemas/versions.schema.cloud-config.json
new file mode 100644
index 00000000..4ff3b4d1
--- /dev/null
+++ b/cloudinit/config/schemas/versions.schema.cloud-config.json
@@ -0,0 +1,18 @@
+{
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "$id": "https://raw.githubusercontent.com/canonical/cloud-init/main/cloudinit/config/schemas/versions.schema.cloud-config.json",
+ "oneOf": [
+ {
+ "allOf": [
+ {
+ "properties": {
+ "version": {
+ "enum": ["22.2", "v1"]
+ }
+ }
+ },
+ {"$ref": "./schema-cloud-config-v1.json"}
+ ]
+ }
+ ]
+}
diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py
index f2d9de10..b034e2c8 100755..100644
--- a/cloudinit/distros/__init__.py
+++ b/cloudinit/distros/__init__.py
@@ -253,9 +253,6 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta):
LOG.debug("Not bringing up newly configured network interfaces")
return False
- def apply_network_config_names(self, netconfig):
- net.apply_network_config_names(netconfig)
-
@abc.abstractmethod
def apply_locale(self, locale, out_fn=None):
raise NotImplementedError()
@@ -853,7 +850,7 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta):
args.append(message)
return args
- def manage_service(self, action, service):
+ def manage_service(self, action: str, service: str):
"""
Perform the requested action on a service. This handles the common
'systemctl' and 'service' cases and may be overridden in subclasses
@@ -870,6 +867,7 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta):
"restart": ["restart", service],
"reload": ["reload-or-restart", service],
"try-reload": ["reload-or-try-restart", service],
+ "status": ["status", service],
}
else:
cmds = {
@@ -879,6 +877,7 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta):
"restart": [service, "restart"],
"reload": [service, "restart"],
"try-reload": [service, "restart"],
+ "status": [service, "status"],
}
cmd = list(init_cmd) + list(cmds[action])
return subp.subp(cmd, capture=True)
@@ -1064,7 +1063,7 @@ def _get_arch_package_mirror_info(package_mirrors, arch):
return default
-def fetch(name):
+def fetch(name) -> Type[Distro]:
locs, looked_locs = importer.find_module(name, ["", __name__], ["Distro"])
if not locs:
raise ImportError(
diff --git a/cloudinit/distros/bsd.py b/cloudinit/distros/bsd.py
index 1b4498b3..bab222b5 100644
--- a/cloudinit/distros/bsd.py
+++ b/cloudinit/distros/bsd.py
@@ -133,6 +133,3 @@ class BSD(distros.Distro):
def apply_locale(self, locale, out_fn=None):
LOG.debug("Cannot set the locale.")
-
- def apply_network_config_names(self, netconfig):
- LOG.debug("Cannot rename network interface.")
diff --git a/cloudinit/distros/freebsd.py b/cloudinit/distros/freebsd.py
index 513abdc2..fa5c6616 100644
--- a/cloudinit/distros/freebsd.py
+++ b/cloudinit/distros/freebsd.py
@@ -13,6 +13,8 @@ from cloudinit import log as logging
from cloudinit import subp, util
from cloudinit.settings import PER_INSTANCE
+from .networking import FreeBSDNetworking
+
LOG = logging.getLogger(__name__)
@@ -23,6 +25,7 @@ class Distro(cloudinit.distros.bsd.BSD):
(N.B. DragonFlyBSD inherits from this class.)
"""
+ networking_cls = FreeBSDNetworking
usr_lib_exec = "/usr/local/lib"
login_conf_fn = "/etc/login.conf"
login_conf_fn_bak = "/etc/login.conf.orig"
@@ -153,13 +156,6 @@ class Distro(cloudinit.distros.bsd.BSD):
LOG, "Failed to restore %s backup", self.login_conf_fn
)
- def apply_network_config_names(self, netconfig):
- # This is handled by the freebsd network renderer. It writes in
- # /etc/rc.conf a line with the following format:
- # ifconfig_OLDNAME_name=NEWNAME
- # FreeBSD network script will rename the interface automatically.
- pass
-
def _get_pkg_cmd_environ(self):
"""Return environment vars used in *BSD package_command operations"""
e = os.environ.copy()
diff --git a/cloudinit/distros/gentoo.py b/cloudinit/distros/gentoo.py
index 4eb76da8..37217fe4 100644
--- a/cloudinit/distros/gentoo.py
+++ b/cloudinit/distros/gentoo.py
@@ -218,23 +218,26 @@ class Distro(distros.Distro):
distros.set_etc_timezone(tz=tz, tz_file=self._find_tz_file(tz))
def package_command(self, command, args=None, pkgs=None):
- if pkgs is None:
- pkgs = []
-
cmd = list("emerge")
# Redirect output
cmd.append("--quiet")
- if args and isinstance(args, str):
- cmd.append(args)
- elif args and isinstance(args, list):
- cmd.extend(args)
+ if command == "upgrade":
+ cmd.extend(["--update", "world"])
+ else:
+ if pkgs is None:
+ pkgs = []
+
+ if args and isinstance(args, str):
+ cmd.append(args)
+ elif args and isinstance(args, list):
+ cmd.extend(args)
- if command:
- cmd.append(command)
+ if command:
+ cmd.append(command)
- pkglist = util.expand_package_list("%s-%s", pkgs)
- cmd.extend(pkglist)
+ pkglist = util.expand_package_list("%s-%s", pkgs)
+ cmd.extend(pkglist)
# Allow the output of this to flow outwards (ie not be captured)
subp.subp(cmd, capture=False)
@@ -243,7 +246,7 @@ class Distro(distros.Distro):
self._runner.run(
"update-sources",
self.package_command,
- ["-u", "world"],
+ ["--sync"],
freq=PER_INSTANCE,
)
diff --git a/cloudinit/distros/net_util.py b/cloudinit/distros/net_util.py
index e37fb19b..8242c8f2 100644
--- a/cloudinit/distros/net_util.py
+++ b/cloudinit/distros/net_util.py
@@ -67,10 +67,7 @@
# }
# }
-from cloudinit.net.network_state import (
- mask_and_ipv4_to_bcast_addr,
- net_prefix_to_ipv4_mask,
-)
+from cloudinit.net import mask_and_ipv4_to_bcast_addr, net_prefix_to_ipv4_mask
def translate_network(settings):
diff --git a/cloudinit/distros/netbsd.py b/cloudinit/distros/netbsd.py
index 9c38ae51..c0d6390f 100644
--- a/cloudinit/distros/netbsd.py
+++ b/cloudinit/distros/netbsd.py
@@ -133,9 +133,6 @@ class NetBSD(cloudinit.distros.bsd.BSD):
def apply_locale(self, locale, out_fn=None):
LOG.debug("Cannot set the locale.")
- def apply_network_config_names(self, netconfig):
- LOG.debug("NetBSD cannot rename network interface.")
-
def _get_pkg_cmd_environ(self):
"""Return env vars used in NetBSD package_command operations"""
os_release = platform.release()
diff --git a/cloudinit/distros/networking.py b/cloudinit/distros/networking.py
index b24b6233..f14d678d 100644
--- a/cloudinit/distros/networking.py
+++ b/cloudinit/distros/networking.py
@@ -19,7 +19,7 @@ class Networking(metaclass=abc.ABCMeta):
This is part of an ongoing refactor in the cloud-init codebase, for more
details see "``cloudinit.net`` -> ``cloudinit.distros.networking``
- Hierarchy" in HACKING.rst for full details.
+ Hierarchy" in CONTRIBUTING.rst for full details.
"""
def __init__(self):
@@ -31,8 +31,9 @@ class Networking(metaclass=abc.ABCMeta):
def _rename_interfaces(self, renames: list, *, current_info=None) -> None:
return net._rename_interfaces(renames, current_info=current_info)
+ @abc.abstractmethod
def apply_network_config_names(self, netcfg: NetworkConfig) -> None:
- return net.apply_network_config_names(netcfg)
+ """Read the network config and rename devices accordingly."""
def device_devid(self, devname: DeviceName):
return net.device_devid(devname)
@@ -184,6 +185,9 @@ class Networking(metaclass=abc.ABCMeta):
class BSDNetworking(Networking):
"""Implementation of networking functionality shared across BSDs."""
+ def apply_network_config_names(self, netcfg: NetworkConfig) -> None:
+ LOG.debug("Cannot rename network interface.")
+
def is_physical(self, devname: DeviceName) -> bool:
raise NotImplementedError()
@@ -194,9 +198,33 @@ class BSDNetworking(Networking):
raise NotImplementedError()
+class FreeBSDNetworking(BSDNetworking):
+ def apply_network_config_names(self, netcfg: NetworkConfig) -> None:
+ # This is handled by the freebsd network renderer. It writes in
+ # /etc/rc.conf a line with the following format:
+ # ifconfig_OLDNAME_name=NEWNAME
+ # FreeBSD network script will rename the interface automatically.
+ pass
+
+
class LinuxNetworking(Networking):
"""Implementation of networking functionality common to Linux distros."""
+ def apply_network_config_names(self, netcfg: NetworkConfig) -> None:
+ """Read the network config and rename devices accordingly.
+
+ Renames are only attempted for interfaces of type 'physical'. It is
+ expected that the network system will create other devices with the
+ correct name in place.
+ """
+
+ try:
+ self._rename_interfaces(self.extract_physdevs(netcfg))
+ except RuntimeError as e:
+ raise RuntimeError(
+ "Failed to apply network config names: %s" % e
+ ) from e
+
def get_dev_features(self, devname: DeviceName) -> str:
return net.get_dev_features(devname)
diff --git a/cloudinit/distros/rhel.py b/cloudinit/distros/rhel.py
index 84744ece..320f4ba1 100644
--- a/cloudinit/distros/rhel.py
+++ b/cloudinit/distros/rhel.py
@@ -7,6 +7,7 @@
# Author: Joshua Harlow <harlowja@yahoo-inc.com>
#
# This file is part of cloud-init. See LICENSE file for license information.
+import os
from cloudinit import distros, helpers
from cloudinit import log as logging
@@ -57,11 +58,25 @@ class Distro(distros.Distro):
# should only happen say once per instance...)
self._runner = helpers.Runners(paths)
self.osfamily = "redhat"
+ self.default_locale = "en_US.UTF-8"
+ self.system_locale = None
cfg["ssh_svcname"] = "sshd"
def install_packages(self, pkglist):
self.package_command("install", pkgs=pkglist)
+ def get_locale(self):
+ """Return the default locale if set, else use system locale"""
+
+ # read system locale value
+ if not self.system_locale:
+ self.system_locale = self._read_system_locale()
+
+ # Return system_locale setting if valid, else use default locale
+ return (
+ self.system_locale if self.system_locale else self.default_locale
+ )
+
def apply_locale(self, locale, out_fn=None):
if self.uses_systemd():
if not out_fn:
@@ -75,6 +90,23 @@ class Distro(distros.Distro):
}
rhel_util.update_sysconfig_file(out_fn, locale_cfg)
+ def _read_system_locale(self, keyname="LANG"):
+ """Read system default locale setting, if present"""
+ if self.uses_systemd():
+ locale_fn = self.systemd_locale_conf_fn
+ else:
+ locale_fn = self.locale_conf_fn
+
+ if not locale_fn:
+ raise ValueError("Invalid path: %s" % locale_fn)
+
+ if os.path.exists(locale_fn):
+ (_exists, contents) = rhel_util.read_sysconfig_file(locale_fn)
+ if keyname in contents:
+ return contents[keyname]
+ else:
+ return None
+
def _write_hostname(self, hostname, filename):
# systemd will never update previous-hostname for us, so
# we need to do it ourselves
diff --git a/cloudinit/distros/ug_util.py b/cloudinit/distros/ug_util.py
index 72766392..e0a4d068 100755..100644
--- a/cloudinit/distros/ug_util.py
+++ b/cloudinit/distros/ug_util.py
@@ -174,6 +174,10 @@ def normalize_users_groups(cfg, distro):
# Translate it into a format that will be more useful going forward
if isinstance(old_user, str):
old_user = {"name": old_user}
+ LOG.warning(
+ "DEPRECATED: 'user' of type string is deprecated and will"
+ " be removed in a future release. Use 'users' list instead."
+ )
elif not isinstance(old_user, dict):
LOG.warning(
"Format for 'user' key must be a string or dictionary"
@@ -201,9 +205,15 @@ def normalize_users_groups(cfg, distro):
default_user_config = util.mergemanydict([old_user, distro_user_config])
base_users = cfg.get("users", [])
- if not isinstance(base_users, (list, dict, str)):
+ if isinstance(base_users, (dict, str)):
+ LOG.warning(
+ "DEPRECATED: 'users' of type %s is deprecated and will be removed"
+ " in a future release. Use 'users' as a list.",
+ type(base_users),
+ )
+ elif not isinstance(base_users, (list)):
LOG.warning(
- "Format for 'users' key must be a comma separated string"
+ "Format for 'users' key must be a comma-separated string"
" or a dictionary or a list but found %s",
type_utils.obj_name(base_users),
)
diff --git a/cloudinit/gpg.py b/cloudinit/gpg.py
index 8daa5e37..c820f329 100644
--- a/cloudinit/gpg.py
+++ b/cloudinit/gpg.py
@@ -41,7 +41,7 @@ def dearmor(key):
note: man gpg(1) makes no mention of an --armour spelling, only --armor
"""
- return subp.subp(["gpg", "--dearmor"], data=key, decode=False)[0]
+ return subp.subp(["gpg", "--dearmor"], data=key, decode=False).stdout
def list(key_file, human_output=False):
diff --git a/cloudinit/handlers/__init__.py b/cloudinit/handlers/__init__.py
index 7d8a9208..17755277 100644
--- a/cloudinit/handlers/__init__.py
+++ b/cloudinit/handlers/__init__.py
@@ -44,7 +44,6 @@ INCLUSION_TYPES_MAP = {
"#include-once": "text/x-include-once-url",
"#!": "text/x-shellscript",
"#cloud-config": "text/cloud-config",
- "#upstart-job": "text/upstart-job",
"#part-handler": "text/part-handler",
"#cloud-boothook": "text/cloud-boothook",
"#cloud-config-archive": "text/cloud-config-archive",
diff --git a/cloudinit/handlers/upstart_job.py b/cloudinit/handlers/upstart_job.py
deleted file mode 100644
index 4bc95f97..00000000
--- a/cloudinit/handlers/upstart_job.py
+++ /dev/null
@@ -1,107 +0,0 @@
-# Copyright (C) 2012 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This file is part of cloud-init. See LICENSE file for license information.
-
-import os
-import re
-
-from cloudinit import handlers
-from cloudinit import log as logging
-from cloudinit import subp, util
-from cloudinit.settings import PER_INSTANCE
-
-LOG = logging.getLogger(__name__)
-
-
-class UpstartJobPartHandler(handlers.Handler):
-
- prefixes = ["#upstart-job"]
-
- def __init__(self, paths, **_kwargs):
- handlers.Handler.__init__(self, PER_INSTANCE)
- self.upstart_dir = paths.upstart_conf_d
-
- def handle_part(self, data, ctype, filename, payload, frequency):
- if ctype in handlers.CONTENT_SIGNALS:
- return
-
- # See: https://bugs.launchpad.net/bugs/819507
- if frequency != PER_INSTANCE:
- return
-
- if not self.upstart_dir:
- return
-
- filename = util.clean_filename(filename)
- (_name, ext) = os.path.splitext(filename)
- if not ext:
- ext = ""
- ext = ext.lower()
- if ext != ".conf":
- filename = filename + ".conf"
-
- payload = util.dos2unix(payload)
- path = os.path.join(self.upstart_dir, filename)
- util.write_file(path, payload, 0o644)
-
- if SUITABLE_UPSTART:
- subp.subp(["initctl", "reload-configuration"], capture=False)
-
-
-def _has_suitable_upstart():
- # (LP: #1124384)
- # a bug in upstart means that invoking reload-configuration
- # at this stage in boot causes havoc. So, try to determine if upstart
- # is installed, and reloading configuration is OK.
- if not os.path.exists("/sbin/initctl"):
- return False
- try:
- (version_out, _err) = subp.subp(["initctl", "version"])
- except Exception:
- util.logexc(LOG, "initctl version failed")
- return False
-
- # expecting 'initctl version' to output something like: init (upstart X.Y)
- if re.match("upstart 1.[0-7][)]", version_out):
- return False
- if "upstart 0." in version_out:
- return False
- elif "upstart 1.8" in version_out:
- if not os.path.exists("/usr/bin/dpkg-query"):
- return False
- try:
- (dpkg_ver, _err) = subp.subp(
- ["dpkg-query", "--showformat=${Version}", "--show", "upstart"],
- rcs=[0, 1],
- )
- except Exception:
- util.logexc(LOG, "dpkg-query failed")
- return False
-
- try:
- good = "1.8-0ubuntu1.2"
- subp.subp(["dpkg", "--compare-versions", dpkg_ver, "ge", good])
- return True
- except subp.ProcessExecutionError as e:
- if e.exit_code == 1:
- pass
- else:
- util.logexc(
- LOG, "dpkg --compare-versions failed [%s]", e.exit_code
- )
- except Exception:
- util.logexc(LOG, "dpkg --compare-versions failed")
- return False
- else:
- return True
-
-
-SUITABLE_UPSTART = _has_suitable_upstart()
-
-# vi: ts=4 expandtab
diff --git a/cloudinit/helpers.py b/cloudinit/helpers.py
index c2c9e584..d0db4b5b 100644
--- a/cloudinit/helpers.py
+++ b/cloudinit/helpers.py
@@ -260,7 +260,7 @@ class ConfigMerger(object):
)
return i_cfgs
- def _read_cfg(self):
+ def _read_cfg(self) -> dict:
# Input config files override
# env config files which
# override instance configs
@@ -337,7 +337,6 @@ class Paths(persistence.CloudInitPickleMixin):
self.run_dir = path_cfgs.get("run_dir", "/run/cloud-init")
self.instance_link = os.path.join(self.cloud_dir, "instance")
self.boot_finished = os.path.join(self.instance_link, "boot-finished")
- self.upstart_conf_d = path_cfgs.get("upstart_dir")
self.seed_dir = os.path.join(self.cloud_dir, "seed")
# This one isn't joined, since it should just be read-only
template_dir = path_cfgs.get("templates_dir", "/etc/cloud/templates/")
diff --git a/cloudinit/importer.py b/cloudinit/importer.py
index 2bc210dd..c9fa9dc5 100644
--- a/cloudinit/importer.py
+++ b/cloudinit/importer.py
@@ -9,25 +9,6 @@
# This file is part of cloud-init. See LICENSE file for license information.
import sys
-import typing
-
-# annotations add value for development, but don't break old versions
-# pyver: 3.6 -> 3.8
-# pylint: disable=E1101
-if sys.version_info >= (3, 8):
-
- class MetaSchema(typing.TypedDict):
- name: str
- id: str
- title: str
- description: str
- distros: typing.List[str]
- examples: typing.List[str]
- frequency: str
-
-else:
- MetaSchema = dict
-# pylint: enable=E1101
def import_module(module_name):
diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py
index 3270e1f7..3297a318 100644
--- a/cloudinit/net/__init__.py
+++ b/cloudinit/net/__init__.py
@@ -11,15 +11,21 @@ import ipaddress
import logging
import os
import re
-from typing import Any, Dict
+from typing import Any, Callable, Dict, List, Optional
+from urllib.parse import urlparse
from cloudinit import subp, util
-from cloudinit.net.network_state import ipv4_mask_to_net_prefix
from cloudinit.url_helper import UrlError, readurl
LOG = logging.getLogger(__name__)
SYS_CLASS_NET = "/sys/class/net/"
DEFAULT_PRIMARY_INTERFACE = "eth0"
+IPV6_DYNAMIC_TYPES = [
+ "dhcp6",
+ "ipv6_slaac",
+ "ipv6_dhcpv6-stateless",
+ "ipv6_dhcpv6-stateful",
+]
OVS_INTERNAL_INTERFACE_LOOKUP_CMD = [
"ovs-vsctl",
"--format",
@@ -389,8 +395,25 @@ def is_disabled_cfg(cfg):
return cfg.get("config") == "disabled"
-def find_fallback_nic(blacklist_drivers=None):
- """Return the name of the 'fallback' network device."""
+def find_candidate_nics(
+ blacklist_drivers: Optional[List[str]] = None,
+) -> List[str]:
+ """Get the list of network interfaces viable for networking.
+
+ @return List of interfaces, sorted naturally.
+ """
+ if util.is_FreeBSD() or util.is_DragonFlyBSD():
+ return find_candidate_nics_on_freebsd(blacklist_drivers)
+ elif util.is_NetBSD() or util.is_OpenBSD():
+ return find_candidate_nics_on_netbsd_or_openbsd(blacklist_drivers)
+ else:
+ return find_candidate_nics_on_linux(blacklist_drivers)
+
+
+def find_fallback_nic(
+ blacklist_drivers: Optional[List[str]] = None,
+) -> Optional[str]:
+ """Get the name of the 'fallback' network device."""
if util.is_FreeBSD() or util.is_DragonFlyBSD():
return find_fallback_nic_on_freebsd(blacklist_drivers)
elif util.is_NetBSD() or util.is_OpenBSD():
@@ -399,37 +422,73 @@ def find_fallback_nic(blacklist_drivers=None):
return find_fallback_nic_on_linux(blacklist_drivers)
-def find_fallback_nic_on_netbsd_or_openbsd(blacklist_drivers=None):
- values = list(
- sorted(get_interfaces_by_mac().values(), key=natural_sort_key)
- )
- if values:
- return values[0]
+def find_candidate_nics_on_netbsd_or_openbsd(
+ blacklist_drivers: Optional[List[str]] = None,
+) -> List[str]:
+ """Get the names of the candidate network devices on NetBSD/OpenBSD.
+
+ @param blacklist_drivers: currently ignored
+ @return list of sorted interfaces
+ """
+ return sorted(get_interfaces_by_mac().values(), key=natural_sort_key)
-def find_fallback_nic_on_freebsd(blacklist_drivers=None):
- """Return the name of the 'fallback' network device on FreeBSD.
+def find_fallback_nic_on_netbsd_or_openbsd(
+ blacklist_drivers: Optional[List[str]] = None,
+) -> Optional[str]:
+ """Get the 'fallback' network device name on NetBSD/OpenBSD.
@param blacklist_drivers: currently ignored
@return default interface, or None
+ """
+ names = find_candidate_nics_on_netbsd_or_openbsd(blacklist_drivers)
+ if names:
+ return names[0]
+
+ return None
- we'll use the first interface from ``ifconfig -l -u ether``
+def find_candidate_nics_on_freebsd(
+ blacklist_drivers: Optional[List[str]] = None,
+) -> List[str]:
+ """Get the names of the candidate network devices on FreeBSD.
+
+ @param blacklist_drivers: Currently ignored.
+ @return List of sorted interfaces.
"""
stdout, _stderr = subp.subp(["ifconfig", "-l", "-u", "ether"])
values = stdout.split()
if values:
- return values[0]
+ return values
+
# On FreeBSD <= 10, 'ifconfig -l' ignores the interfaces with DOWN
# status
- values = list(get_interfaces_by_mac().values())
- values.sort()
- if values:
- return values[0]
+ return sorted(get_interfaces_by_mac().values(), key=natural_sort_key)
+
+
+def find_fallback_nic_on_freebsd(
+ blacklist_drivers: Optional[List[str]] = None,
+) -> Optional[str]:
+ """Get the 'fallback' network device name on FreeBSD.
+
+ @param blacklist_drivers: Currently ignored.
+ @return List of sorted interfaces.
+ """
+ names = find_candidate_nics_on_freebsd(blacklist_drivers)
+ if names:
+ return names[0]
+
+ return None
+
+def find_candidate_nics_on_linux(
+ blacklist_drivers: Optional[List[str]] = None,
+) -> List[str]:
+ """Get the names of the candidate network devices on Linux.
-def find_fallback_nic_on_linux(blacklist_drivers=None):
- """Return the name of the 'fallback' network device on Linux."""
+ @param blacklist_drivers: Filter out NICs with these drivers.
+ @return List of sorted interfaces.
+ """
if not blacklist_drivers:
blacklist_drivers = []
@@ -449,36 +508,39 @@ def find_fallback_nic_on_linux(blacklist_drivers=None):
msg = "Waiting for udev events to settle"
util.log_time(LOG.debug, msg, func=util.udevadm_settle)
- # get list of interfaces that could have connections
- invalid_interfaces = set(["lo"])
- potential_interfaces = set(
- [
- device
- for device in get_devicelist()
- if device_driver(device) not in blacklist_drivers
- ]
- )
- potential_interfaces = potential_interfaces.difference(invalid_interfaces)
# sort into interfaces with carrier, interfaces which could have carrier,
# and ignore interfaces that are definitely disconnected
connected = []
possibly_connected = []
- for interface in potential_interfaces:
+ for interface in get_devicelist():
+ if interface == "lo":
+ continue
+ driver = device_driver(interface)
+ if driver in blacklist_drivers:
+ LOG.debug(
+ "Ignoring interface with %s driver: %s", driver, interface
+ )
+ continue
+ if not read_sys_net_safe(interface, "address"):
+ LOG.debug("Ignoring interface without mac: %s", interface)
+ continue
if interface.startswith("veth"):
+ LOG.debug("Ignoring veth interface: %s", interface)
continue
if is_bridge(interface):
- # skip any bridges
+ LOG.debug("Ignoring bridge interface: %s", interface)
continue
if is_bond(interface):
- # skip any bonds
+ LOG.debug("Ignoring bond interface: %s", interface)
continue
if is_netfailover(interface):
- # ignore netfailover primary/standby interfaces
+ LOG.debug("Ignoring failover interface: %s", interface)
continue
carrier = read_sys_net_int(interface, "carrier")
if carrier:
connected.append(interface)
continue
+ LOG.debug("Interface has no carrier: %s", interface)
# check if nic is dormant or down, as this may make a nick appear to
# not have a carrier even though it could acquire one when brought
# online by dhclient
@@ -491,24 +553,36 @@ def find_fallback_nic_on_linux(blacklist_drivers=None):
possibly_connected.append(interface)
continue
- # don't bother with interfaces that might not be connected if there are
- # some that definitely are
- if connected:
- potential_interfaces = connected
- else:
- potential_interfaces = possibly_connected
-
- # if eth0 exists use it above anything else, otherwise get the interface
- # that we can read 'first' (using the sorted definition of first).
- names = list(sorted(potential_interfaces, key=natural_sort_key))
- if DEFAULT_PRIMARY_INTERFACE in names:
- names.remove(DEFAULT_PRIMARY_INTERFACE)
- names.insert(0, DEFAULT_PRIMARY_INTERFACE)
-
- # pick the first that has a mac-address
- for name in names:
- if read_sys_net_safe(name, "address"):
- return name
+ LOG.debug("Interface ignored: %s", interface)
+
+ # Order the NICs:
+ # 1. DEFAULT_PRIMARY_INTERFACE, if connected.
+ # 2. Remaining connected interfaces, naturally sorted.
+ # 3. DEFAULT_PRIMARY_INTERFACE, if possibly connected.
+ # 4. Remaining possibly connected interfaces, naturally sorted.
+ sorted_interfaces = []
+ for interfaces in [connected, possibly_connected]:
+ interfaces = sorted(interfaces, key=natural_sort_key)
+ if DEFAULT_PRIMARY_INTERFACE in interfaces:
+ interfaces.remove(DEFAULT_PRIMARY_INTERFACE)
+ interfaces.insert(0, DEFAULT_PRIMARY_INTERFACE)
+ sorted_interfaces += interfaces
+
+ return sorted_interfaces
+
+
+def find_fallback_nic_on_linux(
+ blacklist_drivers: Optional[List[str]] = None,
+) -> Optional[str]:
+ """Get the 'fallback' network device name on Linux.
+
+ @param blacklist_drivers: Ignore devices with these drivers.
+ @return List of sorted interfaces.
+ """
+ names = find_candidate_nics_on_linux(blacklist_drivers)
+ if names:
+ return names[0]
+
return None
@@ -586,24 +660,6 @@ def extract_physdevs(netcfg):
raise RuntimeError("Unknown network config version: %s" % version)
-def apply_network_config_names(netcfg, strict_present=True, strict_busy=True):
- """read the network config and rename devices accordingly.
- if strict_present is false, then do not raise exception if no devices
- match. if strict_busy is false, then do not raise exception if the
- device cannot be renamed because it is currently configured.
-
- renames are only attempted for interfaces of type 'physical'. It is
- expected that the network system will create other devices with the
- correct name in place."""
-
- try:
- _rename_interfaces(extract_physdevs(netcfg))
- except RuntimeError as e:
- raise RuntimeError(
- "Failed to apply network config names: %s" % e
- ) from e
-
-
def interface_has_own_mac(ifname, strict=False):
"""return True if the provided interface has its own address.
@@ -872,7 +928,7 @@ def get_interfaces_by_mac(blacklist_drivers=None) -> dict:
)
-def get_interfaces_by_mac_on_freebsd(blacklist_drivers=None) -> dict():
+def get_interfaces_by_mac_on_freebsd(blacklist_drivers=None) -> dict:
(out, _) = subp.subp(["ifconfig", "-a", "ether"])
# flatten each interface block in a single line
@@ -900,7 +956,7 @@ def get_interfaces_by_mac_on_freebsd(blacklist_drivers=None) -> dict():
return results
-def get_interfaces_by_mac_on_netbsd(blacklist_drivers=None) -> dict():
+def get_interfaces_by_mac_on_netbsd(blacklist_drivers=None) -> dict:
ret = {}
re_field_match = (
r"(?P<ifname>\w+).*address:\s"
@@ -916,7 +972,7 @@ def get_interfaces_by_mac_on_netbsd(blacklist_drivers=None) -> dict():
return ret
-def get_interfaces_by_mac_on_openbsd(blacklist_drivers=None) -> dict():
+def get_interfaces_by_mac_on_openbsd(blacklist_drivers=None) -> dict:
ret = {}
re_field_match = (
r"(?P<ifname>\w+).*lladdr\s"
@@ -946,16 +1002,42 @@ def get_interfaces_by_mac_on_linux(blacklist_drivers=None) -> dict:
% (name, ret[mac], mac)
)
ret[mac] = name
- # Try to get an Infiniband hardware address (in 6 byte Ethernet format)
- # for the interface.
+
+ # Pretend that an Infiniband GUID is an ethernet address for Openstack
+ # configuration purposes
+ # TODO: move this format to openstack
ib_mac = get_ib_interface_hwaddr(name, True)
if ib_mac:
- if ib_mac in ret:
- raise RuntimeError(
- "duplicate mac found! both '%s' and '%s' have mac '%s'"
- % (name, ret[ib_mac], ib_mac)
+
+ # If an Ethernet mac address happens to collide with a few bits in
+ # an IB GUID, prefer the ethernet address.
+ #
+ # Log a message in case a user is troubleshooting openstack, but
+ # don't fall over, since this really isn't _a_ problem, and
+ # openstack makes weird assumptions that cause it to fail it's
+ # really not _our_ problem.
+ #
+ # These few bits selected in get_ib_interface_hwaddr() are not
+ # guaranteed to be globally unique in InfiniBand, and really make
+ # no sense to compare them to Ethernet mac addresses. This appears
+ # to be a # workaround for openstack-specific behavior[1], and for
+ # now leave it to avoid breaking openstack
+ # but this should be removed from get_interfaces_by_mac_on_linux()
+ # because IB GUIDs are not mac addresses, and operate on a separate
+ # L2 protocol so address collision doesn't matter.
+ #
+ # [1] sources/helpers/openstack.py:convert_net_json() expects
+ # net.get_interfaces_by_mac() to return IB addresses in this format
+ if ib_mac not in ret:
+ ret[ib_mac] = name
+ else:
+ LOG.warning(
+ "Ethernet and InfiniBand interfaces have the same address"
+ " both '%s' and '%s' have address '%s'.",
+ name,
+ ret[ib_mac],
+ ib_mac,
)
- ret[ib_mac] = name
return ret
@@ -1037,12 +1119,16 @@ def has_url_connectivity(url_data: Dict[str, Any]) -> bool:
)
return False
url = url_data["url"]
- if not any([url.startswith("http://"), url.startswith("https://")]):
- LOG.warning(
- "Ignoring connectivity check. Expected URL beginning with http*://"
- " received '%s'",
- url,
- )
+ try:
+ result = urlparse(url)
+ if not any([result.scheme == "http", result.scheme == "https"]):
+ LOG.warning(
+ "Ignoring connectivity check. Invalid URL scheme %s",
+ url.scheme,
+ )
+ return False
+ except ValueError as err:
+ LOG.warning("Ignoring connectivity check. Invalid URL %s", err)
return False
if "timeout" not in url_data:
url_data["timeout"] = 5
@@ -1053,36 +1139,169 @@ def has_url_connectivity(url_data: Dict[str, Any]) -> bool:
return True
-def is_ip_address(s: str) -> bool:
- """Returns a bool indicating if ``s`` is an IP address.
+def network_validator(check_cb: Callable, address: str, **kwargs) -> bool:
+ """Use a function to determine whether address meets criteria.
- :param s:
+ :param check_cb:
+ Test function, must return a truthy value
+ :param address:
The string to test.
:return:
- A bool indicating if the string contains an IP address or not.
+ A bool indicating if the string passed the test.
+
"""
try:
- ipaddress.ip_address(s)
+ return bool(check_cb(address, **kwargs))
except ValueError:
return False
- return True
-def is_ipv4_address(s: str) -> bool:
+def is_ip_address(address: str) -> bool:
+ """Returns a bool indicating if ``s`` is an IP address.
+
+ :param address:
+ The string to test.
+
+ :return:
+ A bool indicating if the string is an IP address or not.
+ """
+ return network_validator(ipaddress.ip_address, address)
+
+
+def is_ipv4_address(address: str) -> bool:
"""Returns a bool indicating if ``s`` is an IPv4 address.
- :param s:
+ :param address:
+ The string to test.
+
+ :return:
+ A bool indicating if the string is an IPv4 address or not.
+ """
+ return network_validator(ipaddress.IPv4Address, address)
+
+
+def is_ipv6_address(address: str) -> bool:
+ """Returns a bool indicating if ``s`` is an IPv6 address.
+
+ :param address:
The string to test.
:return:
- A bool indicating if the string contains an IPv4 address or not.
+ A bool indicating if the string is an IPv4 address or not.
+ """
+ return network_validator(ipaddress.IPv6Address, address)
+
+
+def is_ip_network(address: str) -> bool:
+ """Returns a bool indicating if ``s`` is an IPv4 or IPv6 network.
+
+ :param address:
+ The string to test.
+
+ :return:
+ A bool indicating if the string is an IPv4 address or not.
+ """
+ return network_validator(ipaddress.ip_network, address, strict=False)
+
+
+def is_ipv4_network(address: str) -> bool:
+ """Returns a bool indicating if ``s`` is an IPv4 network.
+
+ :param address:
+ The string to test.
+
+ :return:
+ A bool indicating if the string is an IPv4 address or not.
+ """
+ return network_validator(ipaddress.IPv4Network, address, strict=False)
+
+
+def is_ipv6_network(address: str) -> bool:
+ """Returns a bool indicating if ``s`` is an IPv6 network.
+
+ :param address:
+ The string to test.
+
+ :return:
+ A bool indicating if the string is an IPv4 address or not.
+ """
+ return network_validator(ipaddress.IPv6Network, address, strict=False)
+
+
+def subnet_is_ipv6(subnet) -> bool:
+ """Common helper for checking network_state subnets for ipv6."""
+ # 'static6', 'dhcp6', 'ipv6_dhcpv6-stateful', 'ipv6_dhcpv6-stateless' or
+ # 'ipv6_slaac'
+ if subnet["type"].endswith("6") or subnet["type"] in IPV6_DYNAMIC_TYPES:
+ # This is a request either static6 type or DHCPv6.
+ return True
+ elif subnet["type"] == "static" and is_ipv6_address(subnet.get("address")):
+ return True
+ return False
+
+
+def net_prefix_to_ipv4_mask(prefix) -> str:
+ """Convert a network prefix to an ipv4 netmask.
+
+ This is the inverse of ipv4_mask_to_net_prefix.
+ 24 -> "255.255.255.0"
+ Also supports input as a string."""
+ return str(ipaddress.IPv4Network(f"0.0.0.0/{prefix}").netmask)
+
+
+def ipv4_mask_to_net_prefix(mask) -> int:
+ """Convert an ipv4 netmask into a network prefix length.
+
+ If the input is already an integer or a string representation of
+ an integer, then int(mask) will be returned.
+ "255.255.255.0" => 24
+ str(24) => 24
+ "24" => 24
+ """
+ return ipaddress.ip_network(f"0.0.0.0/{mask}").prefixlen
+
+
+def ipv6_mask_to_net_prefix(mask) -> int:
+ """Convert an ipv6 netmask (very uncommon) or prefix (64) to prefix.
+
+ If the input is already an integer or a string representation of
+ an integer, then int(mask) will be returned.
+ "ffff:ffff:ffff::" => 48
+ "48" => 48
"""
try:
- ipaddress.IPv4Address(s)
+ # In the case the mask is already a prefix
+ prefixlen = ipaddress.ip_network(f"::/{mask}").prefixlen
+ return prefixlen
except ValueError:
- return False
- return True
+ # ValueError means mask is an IPv6 address representation and need
+ # conversion.
+ pass
+
+ netmask = ipaddress.ip_address(mask)
+ mask_int = int(netmask)
+ # If the mask is all zeroes, just return it
+ if mask_int == 0:
+ return mask_int
+
+ trailing_zeroes = min(
+ ipaddress.IPV6LENGTH, (~mask_int & (mask_int - 1)).bit_length()
+ )
+ leading_ones = mask_int >> trailing_zeroes
+ prefixlen = ipaddress.IPV6LENGTH - trailing_zeroes
+ all_ones = (1 << prefixlen) - 1
+ if leading_ones != all_ones:
+ raise ValueError("Invalid network mask '%s'" % mask)
+
+ return prefixlen
+
+
+def mask_and_ipv4_to_bcast_addr(mask: str, ip: str) -> str:
+ """Get string representation of broadcast address from an ip/mask pair"""
+ return str(
+ ipaddress.IPv4Network(f"{ip}/{mask}", strict=False).broadcast_address
+ )
class EphemeralIPv4Network(object):
diff --git a/cloudinit/net/activators.py b/cloudinit/net/activators.py
index e80c26df..f2cc078f 100644
--- a/cloudinit/net/activators.py
+++ b/cloudinit/net/activators.py
@@ -1,15 +1,14 @@
# This file is part of cloud-init. See LICENSE file for license information.
import logging
-import os
from abc import ABC, abstractmethod
from typing import Iterable, List, Type
from cloudinit import subp, util
from cloudinit.net.eni import available as eni_available
from cloudinit.net.netplan import available as netplan_available
+from cloudinit.net.network_manager import available as nm_available
from cloudinit.net.network_state import NetworkState
from cloudinit.net.networkd import available as networkd_available
-from cloudinit.net.sysconfig import NM_CFG_FILE
LOG = logging.getLogger(__name__)
@@ -124,20 +123,24 @@ class IfUpDownActivator(NetworkActivator):
class NetworkManagerActivator(NetworkActivator):
@staticmethod
def available(target=None) -> bool:
- """Return true if network manager can be used on this system."""
- config_present = os.path.isfile(
- subp.target_path(target, path=NM_CFG_FILE)
- )
- nmcli_present = subp.which("nmcli", target=target)
- return config_present and bool(nmcli_present)
+ """Return true if NetworkManager can be used on this system."""
+ return nm_available(target=target)
@staticmethod
def bring_up_interface(device_name: str) -> bool:
- """Bring up interface using nmcli.
+ """Bring up connection using nmcli.
Return True is successful, otherwise return False
"""
- cmd = ["nmcli", "connection", "up", "ifname", device_name]
+ from cloudinit.net.network_manager import conn_filename
+
+ filename = conn_filename(device_name)
+ cmd = ["nmcli", "connection", "load", filename]
+ if _alter_interface(cmd, device_name):
+ cmd = ["nmcli", "connection", "up", "filename", filename]
+ else:
+ _alter_interface(["nmcli", "connection", "reload"], device_name)
+ cmd = ["nmcli", "connection", "up", "ifname", device_name]
return _alter_interface(cmd, device_name)
@staticmethod
@@ -146,7 +149,7 @@ class NetworkManagerActivator(NetworkActivator):
Return True is successful, otherwise return False
"""
- cmd = ["nmcli", "connection", "down", device_name]
+ cmd = ["nmcli", "device", "disconnect", device_name]
return _alter_interface(cmd, device_name)
@@ -252,8 +255,8 @@ class NetworkdActivator(NetworkActivator):
# version to encompass both seems overkill at this point
DEFAULT_PRIORITY = [
IfUpDownActivator,
- NetworkManagerActivator,
NetplanActivator,
+ NetworkManagerActivator,
NetworkdActivator,
]
diff --git a/cloudinit/net/cmdline.py b/cloudinit/net/cmdline.py
index eab86d9f..eab86d9f 100755..100644
--- a/cloudinit/net/cmdline.py
+++ b/cloudinit/net/cmdline.py
diff --git a/cloudinit/net/dhcp.py b/cloudinit/net/dhcp.py
index f9af18cf..53f8c686 100644
--- a/cloudinit/net/dhcp.py
+++ b/cloudinit/net/dhcp.py
@@ -20,15 +20,19 @@ from cloudinit.net import (
find_fallback_nic,
get_devicelist,
has_url_connectivity,
+ mask_and_ipv4_to_bcast_addr,
)
-from cloudinit.net.network_state import mask_and_ipv4_to_bcast_addr as bcip
LOG = logging.getLogger(__name__)
NETWORKD_LEASES_DIR = "/run/systemd/netif/leases"
-class InvalidDHCPLeaseFileError(Exception):
+class NoDHCPLeaseError(Exception):
+ """Raised when unable to get a DHCP lease."""
+
+
+class InvalidDHCPLeaseFileError(NoDHCPLeaseError):
"""Raised when parsing an empty or invalid dhcp.leases file.
Current uses are DataSourceAzure and DataSourceEc2 during ephemeral
@@ -36,8 +40,12 @@ class InvalidDHCPLeaseFileError(Exception):
"""
-class NoDHCPLeaseError(Exception):
- """Raised when unable to get a DHCP lease."""
+class NoDHCPLeaseInterfaceError(NoDHCPLeaseError):
+ """Raised when unable to find a viable interface for DHCP."""
+
+
+class NoDHCPLeaseMissingDhclientError(NoDHCPLeaseError):
+ """Raised when unable to find dhclient."""
class EphemeralDHCPv4(object):
@@ -89,12 +97,7 @@ class EphemeralDHCPv4(object):
"""
if self.lease:
return self.lease
- try:
- leases = maybe_perform_dhcp_discovery(
- self.iface, self.dhcp_log_func
- )
- except InvalidDHCPLeaseFileError as e:
- raise NoDHCPLeaseError() from e
+ leases = maybe_perform_dhcp_discovery(self.iface, self.dhcp_log_func)
if not leases:
raise NoDHCPLeaseError()
self.lease = leases[-1]
@@ -117,7 +120,9 @@ class EphemeralDHCPv4(object):
}
kwargs = self.extract_dhcp_options_mapping(nmap)
if not kwargs["broadcast"]:
- kwargs["broadcast"] = bcip(kwargs["prefix_or_mask"], kwargs["ip"])
+ kwargs["broadcast"] = mask_and_ipv4_to_bcast_addr(
+ kwargs["prefix_or_mask"], kwargs["ip"]
+ )
if kwargs["static_routes"]:
kwargs["static_routes"] = parse_static_routes(
kwargs["static_routes"]
@@ -165,16 +170,16 @@ def maybe_perform_dhcp_discovery(nic=None, dhcp_log_func=None):
nic = find_fallback_nic()
if nic is None:
LOG.debug("Skip dhcp_discovery: Unable to find fallback nic.")
- return []
+ raise NoDHCPLeaseInterfaceError()
elif nic not in get_devicelist():
LOG.debug(
"Skip dhcp_discovery: nic %s not found in get_devicelist.", nic
)
- return []
+ raise NoDHCPLeaseInterfaceError()
dhclient_path = subp.which("dhclient")
if not dhclient_path:
LOG.debug("Skip dhclient configuration: No dhclient command found.")
- return []
+ raise NoDHCPLeaseMissingDhclientError()
with temp_utils.tempdir(
rmtree_ignore_errors=True, prefix="cloud-init-dhcp-", needs_exe=True
) as tdir:
diff --git a/cloudinit/net/eni.py b/cloudinit/net/eni.py
index 99e3fbb0..b0ec67bd 100644
--- a/cloudinit/net/eni.py
+++ b/cloudinit/net/eni.py
@@ -7,9 +7,9 @@ import re
from cloudinit import log as logging
from cloudinit import subp, util
+from cloudinit.net import subnet_is_ipv6
from . import ParserError, renderer
-from .network_state import subnet_is_ipv6
LOG = logging.getLogger(__name__)
diff --git a/cloudinit/net/netplan.py b/cloudinit/net/netplan.py
index 57ba2d9a..2af0ee9b 100644
--- a/cloudinit/net/netplan.py
+++ b/cloudinit/net/netplan.py
@@ -5,16 +5,16 @@ import os
from cloudinit import log as logging
from cloudinit import safeyaml, subp, util
-from cloudinit.net import SYS_CLASS_NET, get_devicelist
-
-from . import renderer
-from .network_state import (
+from cloudinit.net import (
IPV6_DYNAMIC_TYPES,
- NET_CONFIG_TO_V2,
- NetworkState,
+ SYS_CLASS_NET,
+ get_devicelist,
subnet_is_ipv6,
)
+from . import renderer
+from .network_state import NET_CONFIG_TO_V2, NetworkState
+
KNOWN_SNAPD_CONFIG = b"""\
# This is the initial network config.
# It can be overwritten by cloud-init or console-conf.
diff --git a/cloudinit/net/network_manager.py b/cloudinit/net/network_manager.py
new file mode 100644
index 00000000..8fd15575
--- /dev/null
+++ b/cloudinit/net/network_manager.py
@@ -0,0 +1,390 @@
+# Copyright 2022 Red Hat, Inc.
+#
+# Author: Lubomir Rintel <lkundrak@v3.sk>
+# Fixes and suggestions contributed by James Falcon, Neal Gompa,
+# Zbigniew Jędrzejewski-Szmek and Emanuele Giuseppe Esposito.
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import configparser
+import io
+import itertools
+import os
+import uuid
+
+from cloudinit import log as logging
+from cloudinit import subp, util
+from cloudinit.net import is_ipv6_address, subnet_is_ipv6
+
+from . import renderer
+
+NM_RUN_DIR = "/etc/NetworkManager"
+NM_LIB_DIR = "/usr/lib/NetworkManager"
+NM_CFG_FILE = "/etc/NetworkManager/NetworkManager.conf"
+LOG = logging.getLogger(__name__)
+
+
+class NMConnection:
+ """Represents a NetworkManager connection profile."""
+
+ def __init__(self, con_id):
+ """
+ Initializes the connection with some very basic properties,
+ notably the UUID so that the connection can be referred to.
+ """
+
+ # Chosen by fair dice roll
+ CI_NM_UUID = uuid.UUID("a3924cb8-09e0-43e9-890b-77972a800108")
+
+ self.config = configparser.ConfigParser()
+ # Identity option name mapping, to achieve case sensitivity
+ self.config.optionxform = str
+
+ self.config["connection"] = {
+ "id": f"cloud-init {con_id}",
+ "uuid": str(uuid.uuid5(CI_NM_UUID, con_id)),
+ }
+
+ # This is not actually used anywhere, but may be useful in future
+ self.config["user"] = {
+ "org.freedesktop.NetworkManager.origin": "cloud-init"
+ }
+
+ def _set_default(self, section, option, value):
+ """
+ Sets a property unless it's already set, ensuring the section
+ exists.
+ """
+
+ if not self.config.has_section(section):
+ self.config[section] = {}
+ if not self.config.has_option(section, option):
+ self.config[section][option] = value
+
+ def _set_ip_method(self, family, subnet_type):
+ """
+ Ensures there's appropriate [ipv4]/[ipv6] for given family
+ appropriate for given configuration type
+ """
+
+ method_map = {
+ "static": "manual",
+ "dhcp6": "dhcp",
+ "ipv6_slaac": "auto",
+ "ipv6_dhcpv6-stateless": "auto",
+ "ipv6_dhcpv6-stateful": "auto",
+ "dhcp4": "auto",
+ "dhcp": "auto",
+ }
+
+ # Ensure we got an [ipvX] section
+ self._set_default(family, "method", "disabled")
+
+ try:
+ method = method_map[subnet_type]
+ except KeyError:
+ # What else can we do
+ method = "auto"
+ self.config[family]["may-fail"] = "true"
+
+ # Make sure we don't "downgrade" the method in case
+ # we got conflicting subnets (e.g. static along with dhcp)
+ if self.config[family]["method"] == "dhcp":
+ return
+ if self.config[family]["method"] == "auto" and method == "manual":
+ return
+
+ self.config[family]["method"] = method
+ self._set_default(family, "may-fail", "false")
+ if family == "ipv6":
+ self._set_default(family, "addr-gen-mode", "stable-privacy")
+
+ def _add_numbered(self, section, key_prefix, value):
+ """
+ Adds a numbered property, such as address<n> or route<n>, ensuring
+ the appropriate value gets used for <n>.
+ """
+
+ for index in itertools.count(1):
+ key = f"{key_prefix}{index}"
+ if not self.config.has_option(section, key):
+ self.config[section][key] = value
+ break
+
+ def _add_address(self, family, subnet):
+ """
+ Adds an ipv[46]address<n> property.
+ """
+
+ value = subnet["address"] + "/" + str(subnet["prefix"])
+ self._add_numbered(family, "address", value)
+
+ def _add_route(self, family, route):
+ """
+ Adds a ipv[46].route<n> property.
+ """
+
+ value = route["network"] + "/" + str(route["prefix"])
+ if "gateway" in route:
+ value = value + "," + route["gateway"]
+ self._add_numbered(family, "route", value)
+
+ def _add_nameserver(self, dns):
+ """
+ Extends the ipv[46].dns property with a name server.
+ """
+
+ # FIXME: the subnet contains IPv4 and IPv6 name server mixed
+ # together. We might be getting an IPv6 name server while
+ # we're dealing with an IPv4 subnet. Sort this out by figuring
+ # out the correct family and making sure a valid section exist.
+ family = "ipv6" if is_ipv6_address(dns) else "ipv4"
+ self._set_default(family, "method", "disabled")
+
+ self._set_default(family, "dns", "")
+ self.config[family]["dns"] = self.config[family]["dns"] + dns + ";"
+
+ def _add_dns_search(self, family, dns_search):
+ """
+ Extends the ipv[46].dns-search property with a name server.
+ """
+
+ self._set_default(family, "dns-search", "")
+ self.config[family]["dns-search"] = (
+ self.config[family]["dns-search"] + ";".join(dns_search) + ";"
+ )
+
+ def con_uuid(self):
+ """
+ Returns the connection UUID
+ """
+ return self.config["connection"]["uuid"]
+
+ def valid(self):
+ """
+ Can this be serialized into a meaningful connection profile?
+ """
+ return self.config.has_option("connection", "type")
+
+ @staticmethod
+ def mac_addr(addr):
+ """
+ Sanitize a MAC address.
+ """
+ return addr.replace("-", ":").upper()
+
+ def render_interface(self, iface, renderer):
+ """
+ Integrate information from network state interface information
+ into the connection. Most of the work is done here.
+ """
+
+ # Initialize type & connectivity
+ _type_map = {
+ "physical": "ethernet",
+ "vlan": "vlan",
+ "bond": "bond",
+ "bridge": "bridge",
+ "infiniband": "infiniband",
+ "loopback": None,
+ }
+
+ if_type = _type_map[iface["type"]]
+ if if_type is None:
+ return
+ if "bond-master" in iface:
+ slave_type = "bond"
+ else:
+ slave_type = None
+
+ self.config["connection"]["type"] = if_type
+ if slave_type is not None:
+ self.config["connection"]["slave-type"] = slave_type
+ self.config["connection"]["master"] = renderer.con_ref(
+ iface[slave_type + "-master"]
+ )
+
+ # Add type specific-section
+ self.config[if_type] = {}
+
+ # These are the interface properties that map nicely
+ # to NetworkManager properties
+ _prop_map = {
+ "bond": {
+ "mode": "bond-mode",
+ "miimon": "bond_miimon",
+ "xmit_hash_policy": "bond-xmit-hash-policy",
+ "num_grat_arp": "bond-num-grat-arp",
+ "downdelay": "bond-downdelay",
+ "updelay": "bond-updelay",
+ "fail_over_mac": "bond-fail-over-mac",
+ "primary_reselect": "bond-primary-reselect",
+ "primary": "bond-primary",
+ },
+ "bridge": {
+ "stp": "bridge_stp",
+ "priority": "bridge_bridgeprio",
+ },
+ "vlan": {
+ "id": "vlan_id",
+ },
+ "ethernet": {},
+ "infiniband": {},
+ }
+
+ device_mtu = iface["mtu"]
+ ipv4_mtu = None
+
+ # Deal with Layer 3 configuration
+ for subnet in iface["subnets"]:
+ family = "ipv6" if subnet_is_ipv6(subnet) else "ipv4"
+
+ self._set_ip_method(family, subnet["type"])
+ if "address" in subnet:
+ self._add_address(family, subnet)
+ if "gateway" in subnet:
+ self.config[family]["gateway"] = subnet["gateway"]
+ for route in subnet["routes"]:
+ self._add_route(family, route)
+ if "dns_nameservers" in subnet:
+ for nameserver in subnet["dns_nameservers"]:
+ self._add_nameserver(nameserver)
+ if "dns_search" in subnet:
+ self._add_dns_search(family, subnet["dns_search"])
+ if family == "ipv4" and "mtu" in subnet:
+ ipv4_mtu = subnet["mtu"]
+
+ if ipv4_mtu is None:
+ ipv4_mtu = device_mtu
+ if not ipv4_mtu == device_mtu:
+ LOG.warning(
+ "Network config: ignoring %s device-level mtu:%s"
+ " because ipv4 subnet-level mtu:%s provided.",
+ iface["name"],
+ device_mtu,
+ ipv4_mtu,
+ )
+
+ # Parse type-specific properties
+ for nm_prop, key in _prop_map[if_type].items():
+ if key not in iface:
+ continue
+ if iface[key] is None:
+ continue
+ if isinstance(iface[key], bool):
+ self.config[if_type][nm_prop] = (
+ "true" if iface[key] else "false"
+ )
+ else:
+ self.config[if_type][nm_prop] = str(iface[key])
+
+ # These ones need special treatment
+ if if_type == "ethernet":
+ if iface["wakeonlan"] is True:
+ # NM_SETTING_WIRED_WAKE_ON_LAN_MAGIC
+ self.config["ethernet"]["wake-on-lan"] = str(0x40)
+ if ipv4_mtu is not None:
+ self.config["ethernet"]["mtu"] = str(ipv4_mtu)
+ if iface["mac_address"] is not None:
+ self.config["ethernet"]["mac-address"] = self.mac_addr(
+ iface["mac_address"]
+ )
+ if if_type == "vlan" and "vlan-raw-device" in iface:
+ self.config["vlan"]["parent"] = renderer.con_ref(
+ iface["vlan-raw-device"]
+ )
+ if if_type == "bridge":
+ # Bridge is ass-backwards compared to bond
+ for port in iface["bridge_ports"]:
+ port = renderer.get_conn(port)
+ port._set_default("connection", "slave-type", "bridge")
+ port._set_default("connection", "master", self.con_uuid())
+ if iface["mac_address"] is not None:
+ self.config["bridge"]["mac-address"] = self.mac_addr(
+ iface["mac_address"]
+ )
+ if if_type == "infiniband" and ipv4_mtu is not None:
+ self.config["infiniband"]["transport-mode"] = "datagram"
+ self.config["infiniband"]["mtu"] = str(ipv4_mtu)
+ if iface["mac_address"] is not None:
+ self.config["infiniband"]["mac-address"] = self.mac_addr(
+ iface["mac_address"]
+ )
+
+ # Finish up
+ if if_type == "bridge" or not self.config.has_option(
+ if_type, "mac-address"
+ ):
+ self.config["connection"]["interface-name"] = iface["name"]
+
+ def dump(self):
+ """
+ Stringify.
+ """
+
+ buf = io.StringIO()
+ self.config.write(buf, space_around_delimiters=False)
+ header = "# Generated by cloud-init. Changes will be lost.\n\n"
+ return header + buf.getvalue()
+
+
+class Renderer(renderer.Renderer):
+ """Renders network information in a NetworkManager keyfile format."""
+
+ def __init__(self, config=None):
+ self.connections = {}
+
+ def get_conn(self, con_id):
+ return self.connections[con_id]
+
+ def con_ref(self, con_id):
+ if con_id in self.connections:
+ return self.connections[con_id].con_uuid()
+ else:
+ # Well, what can we do...
+ return con_id
+
+ def render_network_state(self, network_state, templates=None, target=None):
+ # First pass makes sure there's NMConnections for all known
+ # interfaces that have UUIDs that can be linked to from related
+ # interfaces
+ for iface in network_state.iter_interfaces():
+ self.connections[iface["name"]] = NMConnection(iface["name"])
+
+ # Now render the actual interface configuration
+ for iface in network_state.iter_interfaces():
+ conn = self.connections[iface["name"]]
+ conn.render_interface(iface, self)
+
+ # And finally write the files
+ for con_id, conn in self.connections.items():
+ if not conn.valid():
+ continue
+ name = conn_filename(con_id, target)
+ util.write_file(name, conn.dump(), 0o600)
+
+
+def conn_filename(con_id, target=None):
+ target_con_dir = subp.target_path(target, NM_RUN_DIR)
+ con_file = f"cloud-init-{con_id}.nmconnection"
+ return f"{target_con_dir}/system-connections/{con_file}"
+
+
+def available(target=None):
+ # TODO: Move `uses_systemd` to a more appropriate location
+ # It is imported here to avoid circular import
+ from cloudinit.distros import uses_systemd
+
+ config_present = os.path.isfile(subp.target_path(target, path=NM_CFG_FILE))
+ nmcli_present = subp.which("nmcli", target=target)
+ service_active = True
+ if uses_systemd():
+ try:
+ subp.subp(["systemctl", "is-enabled", "NetworkManager.service"])
+ except subp.ProcessExecutionError:
+ service_active = False
+
+ return config_present and bool(nmcli_present) and service_active
+
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py
index 7bac8adf..3c7ee5a3 100644
--- a/cloudinit/net/network_state.py
+++ b/cloudinit/net/network_state.py
@@ -6,22 +6,23 @@
import copy
import functools
-import ipaddress
import logging
-import socket
-import struct
from cloudinit import safeyaml, util
+from cloudinit.net import (
+ get_interfaces_by_mac,
+ ipv4_mask_to_net_prefix,
+ ipv6_mask_to_net_prefix,
+ is_ip_network,
+ is_ipv4_network,
+ is_ipv6_address,
+ is_ipv6_network,
+ net_prefix_to_ipv4_mask,
+)
LOG = logging.getLogger(__name__)
NETWORK_STATE_VERSION = 1
-IPV6_DYNAMIC_TYPES = [
- "dhcp6",
- "ipv6_slaac",
- "ipv6_dhcpv6-stateless",
- "ipv6_dhcpv6-stateful",
-]
NETWORK_STATE_REQUIRED_KEYS = {
1: ["version", "config", "network_state"],
}
@@ -363,7 +364,7 @@ class NetworkStateInterpreter(metaclass=CommandHandlerMeta):
# automatically set 'use_ipv6' if any addresses are ipv6
if not self.use_ipv6:
for subnet in subnets:
- if subnet.get("type").endswith("6") or is_ipv6_addr(
+ if subnet.get("type").endswith("6") or is_ipv6_address(
subnet.get("address")
):
self.use_ipv6 = True
@@ -668,10 +669,18 @@ class NetworkStateInterpreter(metaclass=CommandHandlerMeta):
]
}
"""
+
+ # Get the interfaces by MAC address to update an interface's
+ # device name to the name of the device that matches a provided
+ # MAC address when the set-name directive is not present.
+ #
+ # Please see https://bugs.launchpad.net/cloud-init/+bug/1855945
+ # for more information.
+ ifaces_by_mac = get_interfaces_by_mac()
+
for eth, cfg in command.items():
phy_cmd = {
"type": "physical",
- "name": cfg.get("set-name", eth),
}
match = cfg.get("match", {})
mac_address = match.get("macaddress", None)
@@ -683,6 +692,24 @@ class NetworkStateInterpreter(metaclass=CommandHandlerMeta):
str(cfg),
)
phy_cmd["mac_address"] = mac_address
+
+ # Determine the name of the interface by using one of the
+ # following in the order they are listed:
+ # * set-name
+ # * interface name looked up by mac
+ # * value of "eth" key from this loop
+ name = eth
+ set_name = cfg.get("set-name", None)
+ if set_name:
+ name = set_name
+ elif mac_address and ifaces_by_mac:
+ lcase_mac_address = mac_address.lower()
+ for iface_mac, iface_name in ifaces_by_mac.items():
+ if lcase_mac_address == iface_mac.lower():
+ name = iface_name
+ break
+ phy_cmd["name"] = name
+
driver = match.get("driver", None)
if driver:
phy_cmd["params"] = {"driver": driver}
@@ -920,27 +947,35 @@ def _normalize_net_keys(network, address_keys=()):
LOG.error(message)
raise ValueError(message)
- addr = net.get(addr_key)
- ipv6 = is_ipv6_addr(addr)
+ addr = str(net.get(addr_key))
+ if not is_ip_network(addr):
+ LOG.error("Address %s is not a valid ip network", addr)
+ raise ValueError(f"Address {addr} is not a valid ip address")
+
+ ipv6 = is_ipv6_network(addr)
+ ipv4 = is_ipv4_network(addr)
+
netmask = net.get("netmask")
if "/" in addr:
addr_part, _, maybe_prefix = addr.partition("/")
net[addr_key] = addr_part
- try:
- prefix = int(maybe_prefix)
- except ValueError:
- if ipv6:
- # this supports input of ffff:ffff:ffff::
- prefix = ipv6_mask_to_net_prefix(maybe_prefix)
- else:
- # this supports input of 255.255.255.0
- prefix = ipv4_mask_to_net_prefix(maybe_prefix)
- elif netmask and not ipv6:
+ if ipv6:
+ # this supports input of ffff:ffff:ffff::
+ prefix = ipv6_mask_to_net_prefix(maybe_prefix)
+ elif ipv4:
+ # this supports input of 255.255.255.0
+ prefix = ipv4_mask_to_net_prefix(maybe_prefix)
+ else:
+ # In theory this never happens, is_ip_network() should catch all
+ # invalid networks
+ LOG.error("Address %s is not a valid ip network", addr)
+ raise ValueError(f"Address {addr} is not a valid ip address")
+ elif "prefix" in net:
+ prefix = int(net["prefix"])
+ elif netmask and ipv4:
prefix = ipv4_mask_to_net_prefix(netmask)
elif netmask and ipv6:
prefix = ipv6_mask_to_net_prefix(netmask)
- elif "prefix" in net:
- prefix = int(net["prefix"])
else:
prefix = 64 if ipv6 else 24
@@ -957,7 +992,7 @@ def _normalize_net_keys(network, address_keys=()):
# 'netmask' for ipv6. We need a 'net_prefix_to_ipv6_mask' for that.
if "netmask" in net:
del net["netmask"]
- else:
+ elif ipv4:
net["netmask"] = net_prefix_to_ipv4_mask(net["prefix"])
return net
@@ -1003,96 +1038,6 @@ def _normalize_subnets(subnets):
return [_normalize_subnet(s) for s in subnets]
-def is_ipv6_addr(address):
- if not address:
- return False
- return ":" in str(address)
-
-
-def subnet_is_ipv6(subnet):
- """Common helper for checking network_state subnets for ipv6."""
- # 'static6', 'dhcp6', 'ipv6_dhcpv6-stateful', 'ipv6_dhcpv6-stateless' or
- # 'ipv6_slaac'
- if subnet["type"].endswith("6") or subnet["type"] in IPV6_DYNAMIC_TYPES:
- # This is a request either static6 type or DHCPv6.
- return True
- elif subnet["type"] == "static" and is_ipv6_addr(subnet.get("address")):
- return True
- return False
-
-
-def net_prefix_to_ipv4_mask(prefix):
- """Convert a network prefix to an ipv4 netmask.
-
- This is the inverse of ipv4_mask_to_net_prefix.
- 24 -> "255.255.255.0"
- Also supports input as a string."""
- mask = socket.inet_ntoa(
- struct.pack(">I", (0xFFFFFFFF << (32 - int(prefix)) & 0xFFFFFFFF))
- )
- return mask
-
-
-def ipv4_mask_to_net_prefix(mask):
- """Convert an ipv4 netmask into a network prefix length.
-
- If the input is already an integer or a string representation of
- an integer, then int(mask) will be returned.
- "255.255.255.0" => 24
- str(24) => 24
- "24" => 24
- """
- return ipaddress.ip_network(f"0.0.0.0/{mask}").prefixlen
-
-
-def ipv6_mask_to_net_prefix(mask):
- """Convert an ipv6 netmask (very uncommon) or prefix (64) to prefix.
-
- If the input is already an integer or a string representation of
- an integer, then int(mask) will be returned.
- "ffff:ffff:ffff::" => 48
- "48" => 48
- """
- try:
- # In the case the mask is already a prefix
- prefixlen = ipaddress.ip_network(f"::/{mask}").prefixlen
- return prefixlen
- except ValueError:
- # ValueError means mask is an IPv6 address representation and need
- # conversion.
- pass
-
- netmask = ipaddress.ip_address(mask)
- mask_int = int(netmask)
- # If the mask is all zeroes, just return it
- if mask_int == 0:
- return mask_int
-
- trailing_zeroes = min(
- ipaddress.IPV6LENGTH, (~mask_int & (mask_int - 1)).bit_length()
- )
- leading_ones = mask_int >> trailing_zeroes
- prefixlen = ipaddress.IPV6LENGTH - trailing_zeroes
- all_ones = (1 << prefixlen) - 1
- if leading_ones != all_ones:
- raise ValueError("Invalid network mask '%s'" % mask)
-
- return prefixlen
-
-
-def mask_and_ipv4_to_bcast_addr(mask, ip):
- """Calculate the broadcast address from the subnet mask and ip addr.
-
- Supports ipv4 only."""
- ip_bin = int("".join([bin(int(x) + 256)[3:] for x in ip.split(".")]), 2)
- mask_dec = ipv4_mask_to_net_prefix(mask)
- bcast_bin = ip_bin | (2 ** (32 - mask_dec) - 1)
- bcast_str = ".".join(
- [str(bcast_bin >> (i << 3) & 0xFF) for i in range(4)[::-1]]
- )
- return bcast_str
-
-
def parse_net_config_data(net_config, skip_broken=True) -> NetworkState:
"""Parses the config, returns NetworkState object
@@ -1114,7 +1059,8 @@ def parse_net_config_data(net_config, skip_broken=True) -> NetworkState:
if not state:
raise RuntimeError(
"No valid network_state object created from network config. "
- "Did you specify the correct version?"
+ "Did you specify the correct version? Network config:\n"
+ f"{net_config}"
)
return state
diff --git a/cloudinit/net/renderer.py b/cloudinit/net/renderer.py
index 34b74b80..da154731 100644
--- a/cloudinit/net/renderer.py
+++ b/cloudinit/net/renderer.py
@@ -8,7 +8,7 @@
import abc
import io
-from cloudinit.net.network_state import parse_net_config_data
+from cloudinit.net.network_state import NetworkState, parse_net_config_data
from cloudinit.net.udev import generate_udev_rule
@@ -32,7 +32,7 @@ class Renderer(object):
pass
@staticmethod
- def _render_persistent_net(network_state):
+ def _render_persistent_net(network_state: NetworkState):
"""Given state, emit udev rules to map mac to ifname."""
# TODO(harlowja): this seems shared between eni renderer and
# this, so move it to a shared location.
diff --git a/cloudinit/net/renderers.py b/cloudinit/net/renderers.py
index c755f04c..7edc34b5 100644
--- a/cloudinit/net/renderers.py
+++ b/cloudinit/net/renderers.py
@@ -8,6 +8,7 @@ from . import (
freebsd,
netbsd,
netplan,
+ network_manager,
networkd,
openbsd,
renderer,
@@ -19,6 +20,7 @@ NAME_TO_RENDERER = {
"freebsd": freebsd,
"netbsd": netbsd,
"netplan": netplan,
+ "network-manager": network_manager,
"networkd": networkd,
"openbsd": openbsd,
"sysconfig": sysconfig,
@@ -28,6 +30,7 @@ DEFAULT_PRIORITY = [
"eni",
"sysconfig",
"netplan",
+ "network-manager",
"freebsd",
"netbsd",
"openbsd",
diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py
index ba85c4f6..37c5d260 100644
--- a/cloudinit/net/sysconfig.py
+++ b/cloudinit/net/sysconfig.py
@@ -4,21 +4,20 @@ import copy
import io
import os
import re
-
-from configobj import ConfigObj
+from typing import Mapping
from cloudinit import log as logging
from cloudinit import subp, util
from cloudinit.distros.parsers import networkmanager_conf, resolv_conf
-from cloudinit.net import network_state
-
-from . import renderer
-from .network_state import (
+from cloudinit.net import (
IPV6_DYNAMIC_TYPES,
- is_ipv6_addr,
+ is_ipv6_address,
net_prefix_to_ipv4_mask,
subnet_is_ipv6,
)
+from cloudinit.net.network_state import NetworkState
+
+from . import renderer
LOG = logging.getLogger(__name__)
KNOWN_DISTROS = [
@@ -34,7 +33,6 @@ KNOWN_DISTROS = [
"suse",
"virtuozzo",
]
-NM_CFG_FILE = "/etc/NetworkManager/NetworkManager.conf"
def _make_header(sep="#"):
@@ -66,24 +64,6 @@ def _quote_value(value):
return value
-def enable_ifcfg_rh(path):
- """Add ifcfg-rh to NetworkManager.cfg plugins if main section is present"""
- config = ConfigObj(path)
- if "main" in config:
- if "plugins" in config["main"]:
- if "ifcfg-rh" in config["main"]["plugins"]:
- return
- else:
- config["main"]["plugins"] = []
-
- if isinstance(config["main"]["plugins"], list):
- config["main"]["plugins"].append("ifcfg-rh")
- else:
- config["main"]["plugins"] = [config["main"]["plugins"], "ifcfg-rh"]
- config.write()
- LOG.debug("Enabled ifcfg-rh NetworkManager plugins")
-
-
class ConfigMap(object):
"""Sysconfig like dictionary object."""
@@ -196,7 +176,6 @@ class Route(ConfigMap):
index = key.replace("ADDRESS", "")
address_value = str(self._conf[key])
- netmask_value = str(self._conf["NETMASK" + index])
gateway_value = str(self._conf["GATEWAY" + index])
# only accept combinations:
@@ -206,6 +185,7 @@ class Route(ConfigMap):
# do not add ipv4 routes if proto is ipv6
# (this array will contain a mix of ipv4 and ipv6)
if proto == "ipv4" and not self.is_ipv6_route(address_value):
+ netmask_value = str(self._conf["NETMASK" + index])
# increase IPv4 index
reindex = reindex + 1
buf.write(
@@ -228,9 +208,7 @@ class Route(ConfigMap):
% ("METRIC" + str(reindex), _quote_value(metric_value))
)
elif proto == "ipv6" and self.is_ipv6_route(address_value):
- prefix_value = network_state.ipv6_mask_to_net_prefix(
- netmask_value
- )
+ prefix_value = str(self._conf[f"PREFIX{index}"])
metric_value = (
"metric " + str(self._conf["METRIC" + index])
if "METRIC" + index in self._conf
@@ -610,7 +588,7 @@ class Renderer(renderer.Renderer):
if "gateway" in subnet and flavor != "suse":
iface_cfg["DEFROUTE"] = True
- if is_ipv6_addr(subnet["gateway"]):
+ if is_ipv6_address(subnet["gateway"]):
iface_cfg["IPV6_DEFAULTGW"] = subnet["gateway"]
else:
iface_cfg["GATEWAY"] = subnet["gateway"]
@@ -640,7 +618,9 @@ class Renderer(renderer.Renderer):
for _, subnet in enumerate(subnets, start=len(iface_cfg.children)):
subnet_type = subnet.get("type")
for route in subnet.get("routes", []):
- is_ipv6 = subnet.get("ipv6") or is_ipv6_addr(route["gateway"])
+ is_ipv6 = subnet.get("ipv6") or is_ipv6_address(
+ route["gateway"]
+ )
# Any dynamic configuration method, slaac, dhcpv6-stateful/
# stateless should get router information from router RA's.
@@ -657,12 +637,9 @@ class Renderer(renderer.Renderer):
"Duplicate declaration of default "
"route found for interface '%s'" % (iface_cfg.name)
)
- # NOTE(harlowja): ipv6 and ipv4 default gateways
- gw_key = "GATEWAY0"
- nm_key = "NETMASK0"
- addr_key = "ADDRESS0"
- # The owning interface provides the default route.
- #
+ # NOTE that instead of defining the route0 settings,
+ # the owning interface provides the default route.
+
# TODO(harlowja): add validation that no other iface has
# also provided the default route?
iface_cfg["DEFROUTE"] = True
@@ -679,21 +656,19 @@ class Renderer(renderer.Renderer):
iface_cfg["METRIC"] = route["metric"]
else:
- gw_key = "GATEWAY%s" % route_cfg.last_idx
- nm_key = "NETMASK%s" % route_cfg.last_idx
- addr_key = "ADDRESS%s" % route_cfg.last_idx
- metric_key = "METRIC%s" % route_cfg.last_idx
- route_cfg.last_idx += 1
# add default routes only to ifcfg files, not
# to route-* or route6-*
- for (old_key, new_key) in [
- ("gateway", gw_key),
- ("metric", metric_key),
- ("netmask", nm_key),
- ("network", addr_key),
+ for old_key, new_name in [
+ ("gateway", "GATEWAY"),
+ ("metric", "METRIC"),
+ ("prefix", "PREFIX"),
+ ("netmask", "NETMASK"),
+ ("network", "ADDRESS"),
]:
if old_key in route:
+ new_key = f"{new_name}{route_cfg.last_idx}"
route_cfg[new_key] = route[old_key]
+ route_cfg.last_idx += 1
@classmethod
def _render_bonding_opts(cls, iface_cfg, iface, flavor):
@@ -970,7 +945,7 @@ class Renderer(renderer.Renderer):
"""Given state, return /etc/sysconfig files + contents"""
if not templates:
templates = cls.templates
- iface_contents = {}
+ iface_contents: Mapping[str, NetInterface] = {}
for iface in network_state.iter_interfaces():
if iface["type"] == "loopback":
continue
@@ -1003,7 +978,9 @@ class Renderer(renderer.Renderer):
contents[cpath] = iface_cfg.routes.to_string(proto)
return contents
- def render_network_state(self, network_state, templates=None, target=None):
+ def render_network_state(
+ self, network_state: NetworkState, templates=None, target=None
+ ):
if not templates:
templates = self.templates
file_mode = 0o644
@@ -1032,8 +1009,6 @@ class Renderer(renderer.Renderer):
netrules_content = self._render_persistent_net(network_state)
netrules_path = subp.target_path(target, self.netrules_path)
util.write_file(netrules_path, netrules_content, file_mode)
- if available_nm(target=target):
- enable_ifcfg_rh(subp.target_path(target, path=NM_CFG_FILE))
sysconfig_path = subp.target_path(target, templates.get("control"))
# Distros configuring /etc/sysconfig/network as a file e.g. Centos
@@ -1063,14 +1038,9 @@ def _supported_vlan_names(rdev, vid):
def available(target=None):
- sysconfig = available_sysconfig(target=target)
- nm = available_nm(target=target)
- return util.system_info()["variant"] in KNOWN_DISTROS and any(
- [nm, sysconfig]
- )
-
+ if not util.system_info()["variant"] in KNOWN_DISTROS:
+ return False
-def available_sysconfig(target=None):
expected = ["ifup", "ifdown"]
search = ["/sbin", "/usr/sbin"]
for p in expected:
@@ -1087,10 +1057,4 @@ def available_sysconfig(target=None):
return False
-def available_nm(target=None):
- if not os.path.isfile(subp.target_path(target, path=NM_CFG_FILE)):
- return False
- return True
-
-
# vi: ts=4 expandtab
diff --git a/cloudinit/reporting/handlers.py b/cloudinit/reporting/handlers.py
index e163e168..e163e168 100755..100644
--- a/cloudinit/reporting/handlers.py
+++ b/cloudinit/reporting/handlers.py
diff --git a/cloudinit/safeyaml.py b/cloudinit/safeyaml.py
index ba0e88c8..eeb6f82b 100644
--- a/cloudinit/safeyaml.py
+++ b/cloudinit/safeyaml.py
@@ -4,14 +4,117 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
+from collections import namedtuple
+from itertools import chain
+from typing import Any, Dict, List, Tuple
+
import yaml
YAMLError = yaml.YAMLError
+# SchemaPathMarks track the path to an element within a loaded YAML file.
+# The start_mark and end_mark contain the row and column indicators
+# which represent the coordinates where the schema element begins and ends.
+SchemaPathMarks = namedtuple(
+ "SchemaPathMarks", ("path", "start_mark", "end_mark")
+)
+
class _CustomSafeLoader(yaml.SafeLoader):
def construct_python_unicode(self, node):
- return self.construct_scalar(node)
+ return super().construct_scalar(node)
+
+
+class _CustomSafeLoaderWithMarks(yaml.SafeLoader):
+ """A loader which provides line and column start and end marks for YAML.
+
+ If the YAML loaded represents a dictionary, get_single_data will inject
+ a top-level "schemamarks" key in that dictionary which can be used at
+ call-sites to process YAML paths schemamark metadata when annotating
+ YAML files for errors.
+
+ The schemamarks key is dictionary where each key is a dot-delimited path
+ into the YAML object. Each dot represents an element that is nested under
+ a parent and list items are represented with the format
+ `<parent>.<list-index>`.
+
+ The values in schemamarks will be the line number in the original content
+ where YAML element begins to aid in annotation when encountering schema
+ errors.
+
+ The example YAML shows expected schemamarks for both dicts and lists:
+
+ one: val1
+ two:
+ subtwo: val2
+ three: [val3, val4]
+
+ schemamarks == {
+ "one": 1, "two": 2, "two.subtwo": 3, "three": 4, "three.0": 4,
+ "three.1": 4
+ }
+ """
+
+ def __init__(self, stream):
+ super().__init__(stream)
+ self.schemamarks_by_line = {} # type: Dict[int, List[SchemaPathMarks]]
+
+ def _get_nested_path_prefix(self, node):
+ if node.start_mark.line in self.schemamarks_by_line:
+ return f"{self.schemamarks_by_line[node.start_mark.line][0][0]}."
+ for _line_num, schema_marks in sorted(
+ self.schemamarks_by_line.items(), reverse=True
+ ):
+ for mark in schema_marks[::-1]:
+ if ( # Is the node within the scope of the furthest mark
+ node.start_mark.line >= mark.start_mark.line
+ and node.start_mark.column >= mark.start_mark.column
+ and node.end_mark.line <= mark.end_mark.line
+ and node.end_mark.column <= mark.end_mark.column
+ ):
+ return f"{mark.path}."
+ return ""
+
+ def construct_mapping(self, node):
+ mapping = super().construct_mapping(node)
+ nested_path_prefix = self._get_nested_path_prefix(node)
+ for key_node, value_node in node.value:
+ node_key_path = f"{nested_path_prefix}{key_node.value}"
+ line_num = key_node.start_mark.line
+ mark = SchemaPathMarks(
+ node_key_path, key_node.start_mark, value_node.end_mark
+ )
+ if line_num not in self.schemamarks_by_line:
+ self.schemamarks_by_line[line_num] = [mark]
+ else:
+ self.schemamarks_by_line[line_num].append(mark)
+ return mapping
+
+ def construct_sequence(self, node, deep=False):
+ sequence = super().construct_sequence(node, deep=True)
+ nested_path_prefix = self._get_nested_path_prefix(node)
+ for index, sequence_item in enumerate(node.value):
+ line_num = sequence_item.start_mark.line
+ node_key_path = f"{nested_path_prefix}{index}"
+ marks = SchemaPathMarks(
+ node_key_path, sequence_item.start_mark, sequence_item.end_mark
+ )
+ if line_num not in self.schemamarks_by_line:
+ self.schemamarks_by_line[line_num] = [marks]
+ else:
+ self.schemamarks_by_line[line_num].append(marks)
+ return sequence
+
+ def get_single_data(self):
+ data = super().get_single_data()
+ if isinstance(data, dict): # valid cloud-config schema is a dict
+ data["schemamarks"] = dict(
+ [
+ (v.path, v.start_mark.line + 1) # 1-based human-readable
+ for v in chain(*self.schemamarks_by_line.values())
+ ]
+ )
+ return data
_CustomSafeLoader.add_constructor(
@@ -27,6 +130,27 @@ class NoAliasSafeDumper(yaml.dumper.SafeDumper):
return True
+def load_with_marks(blob) -> Tuple[Any, Dict[str, int]]:
+ """Perform YAML SafeLoad and track start and end marks during parse.
+
+ JSON schema errors come with an encoded object path such as:
+ <key1>.<key2>.<list_item_index>
+
+ YAML loader needs to preserve a mapping of schema path to line and column
+ marks to annotate original content with JSON schema error marks for the
+ command:
+ cloud-init devel schema --annotate
+
+
+ """
+ result = yaml.load(blob, Loader=_CustomSafeLoaderWithMarks)
+ if not isinstance(result, dict):
+ schemamarks = {}
+ else:
+ schemamarks = result.pop("schemamarks")
+ return result, schemamarks
+
+
def load(blob):
return yaml.load(blob, Loader=_CustomSafeLoader)
@@ -41,7 +165,7 @@ def dumps(obj, explicit_start=True, explicit_end=True, noalias=False):
explicit_start=explicit_start,
explicit_end=explicit_end,
default_flow_style=False,
- Dumper=(NoAliasSafeDumper if noalias else yaml.dumper.Dumper),
+ Dumper=(NoAliasSafeDumper if noalias else yaml.dumper.SafeDumper),
)
diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
index 294bcb4a..d1bec85c 100755..100644
--- a/cloudinit/sources/DataSourceAzure.py
+++ b/cloudinit/sources/DataSourceAzure.py
@@ -6,7 +6,6 @@
import base64
import crypt
-import datetime
import functools
import os
import os.path
@@ -24,7 +23,12 @@ from cloudinit import log as logging
from cloudinit import net, sources, ssh_util, subp, util
from cloudinit.event import EventScope, EventType
from cloudinit.net import device_driver
-from cloudinit.net.dhcp import EphemeralDHCPv4, NoDHCPLeaseError
+from cloudinit.net.dhcp import (
+ EphemeralDHCPv4,
+ NoDHCPLeaseError,
+ NoDHCPLeaseInterfaceError,
+ NoDHCPLeaseMissingDhclientError,
+)
from cloudinit.reporting import events
from cloudinit.sources.helpers import netlink
from cloudinit.sources.helpers.azure import (
@@ -35,6 +39,7 @@ from cloudinit.sources.helpers.azure import (
build_minimal_ovf,
dhcp_log_cb,
get_boot_telemetry,
+ get_ip_from_lease_value,
get_metadata_from_fabric,
get_system_info,
is_byte_swapped,
@@ -52,11 +57,9 @@ DEFAULT_METADATA = {"instance-id": "iid-AZURE-NODE"}
# azure systems will always have a resource disk, and 66-azure-ephemeral.rules
# ensures that it gets linked to this path.
RESOURCE_DISK_PATH = "/dev/disk/cloud/azure_resource"
-LEASE_FILE = "/var/lib/dhcp/dhclient.eth0.leases"
DEFAULT_FS = "ext4"
# DMI chassis-asset-tag is set static for all azure instances
AZURE_CHASSIS_ASSET_TAG = "7783-7084-3265-9085-8269-3286-77"
-REPROVISION_MARKER_FILE = "/var/lib/cloud/data/poll_imds"
REPORTED_READY_MARKER_FILE = "/var/lib/cloud/data/reported_ready"
AGENT_SEED_DIR = "/var/lib/waagent"
DEFAULT_PROVISIONING_ISO_DEV = "/dev/sr0"
@@ -205,7 +208,7 @@ def get_hv_netvsc_macs_normalized() -> List[str]:
def execute_or_debug(cmd, fail_ret=None) -> str:
try:
- return subp.subp(cmd)[0] # type: ignore
+ return subp.subp(cmd).stdout # type: ignore
except subp.ProcessExecutionError:
LOG.debug("Failed to execute: %s", " ".join(cmd))
return fail_ret
@@ -267,7 +270,6 @@ def get_resource_disk_on_freebsd(port_id) -> Optional[str]:
# update the FreeBSD specific information
if util.is_FreeBSD():
- LEASE_FILE = "/var/db/dhclient.leases.hn0"
DEFAULT_FS = "freebsd-ufs"
res_disk = get_resource_disk_on_freebsd(1)
if res_disk is not None:
@@ -281,7 +283,6 @@ if util.is_FreeBSD():
BUILTIN_DS_CONFIG = {
"data_dir": AGENT_SEED_DIR,
"disk_aliases": {"ephemeral0": RESOURCE_DISK_PATH},
- "dhclient_lease_file": LEASE_FILE,
"apply_network_config": True, # Use IMDS published network configuration
}
# RELEASE_BLOCKER: Xenial and earlier apply_network_config default is False
@@ -327,7 +328,6 @@ class DataSourceAzure(sources.DataSource):
self.ds_cfg = util.mergemanydict(
[util.get_cfg_by_path(sys_cfg, DS_CFG_PATH, {}), BUILTIN_DS_CONFIG]
)
- self.dhclient_lease_file = self.ds_cfg.get("dhclient_lease_file")
self._iso_dev = None
self._network_config = None
self._ephemeral_dhcp_ctx = None
@@ -358,7 +358,11 @@ class DataSourceAzure(sources.DataSource):
@azure_ds_telemetry_reporter
def _setup_ephemeral_networking(
- self, *, iface: Optional[str] = None, timeout_minutes: int = 5
+ self,
+ *,
+ iface: Optional[str] = None,
+ retry_sleep: int = 1,
+ timeout_minutes: int = 5,
) -> None:
"""Setup ephemeral networking.
@@ -376,30 +380,59 @@ class DataSourceAzure(sources.DataSource):
)
LOG.debug("Requested ephemeral networking (iface=%s)", iface)
-
- start = datetime.datetime.utcnow()
- timeout = start + datetime.timedelta(minutes=timeout_minutes)
-
self._ephemeral_dhcp_ctx = EphemeralDHCPv4(
iface=iface, dhcp_log_func=dhcp_log_cb
)
lease = None
+ timeout = timeout_minutes * 60 + time()
with events.ReportEventStack(
name="obtain-dhcp-lease",
description="obtain dhcp lease",
parent=azure_ds_reporter,
):
- while datetime.datetime.utcnow() < timeout:
+ while lease is None:
try:
lease = self._ephemeral_dhcp_ctx.obtain_lease()
- break
+ except NoDHCPLeaseInterfaceError:
+ # Interface not found, continue after sleeping 1 second.
+ report_diagnostic_event(
+ "Interface not found for DHCP", logger_func=LOG.warning
+ )
+ except NoDHCPLeaseMissingDhclientError:
+ # No dhclient, no point in retrying.
+ report_diagnostic_event(
+ "dhclient executable not found", logger_func=LOG.error
+ )
+ self._ephemeral_dhcp_ctx = None
+ raise
except NoDHCPLeaseError:
- continue
+ # Typical DHCP failure, continue after sleeping 1 second.
+ report_diagnostic_event(
+ "Failed to obtain DHCP lease (iface=%s)" % iface,
+ logger_func=LOG.error,
+ )
+ except subp.ProcessExecutionError as error:
+ # udevadm settle, ip link set dev eth0 up, etc.
+ report_diagnostic_event(
+ "Command failed: "
+ "cmd=%r stderr=%r stdout=%r exit_code=%s"
+ % (
+ error.cmd,
+ error.stderr,
+ error.stdout,
+ error.exit_code,
+ ),
+ logger_func=LOG.error,
+ )
+
+ # Sleep before retrying, otherwise break if we're past timeout.
+ if lease is None and time() + retry_sleep < timeout:
+ sleep(retry_sleep)
+ else:
+ break
if lease is None:
- msg = "Failed to obtain DHCP lease (iface=%s)" % iface
- report_diagnostic_event(msg, logger_func=LOG.error)
self._ephemeral_dhcp_ctx = None
raise NoDHCPLeaseError()
else:
@@ -408,7 +441,9 @@ class DataSourceAzure(sources.DataSource):
# Update wireserver IP from DHCP options.
if "unknown-245" in lease:
- self._wireserver_endpoint = lease["unknown-245"]
+ self._wireserver_endpoint = get_ip_from_lease_value(
+ lease["unknown-245"]
+ )
@azure_ds_telemetry_reporter
def _teardown_ephemeral_networking(self) -> None:
@@ -451,50 +486,42 @@ class DataSourceAzure(sources.DataSource):
cfg = {}
files = {}
- if os.path.isfile(REPROVISION_MARKER_FILE):
- metadata_source = "IMDS"
- report_diagnostic_event(
- "Reprovision marker file already present "
- "before crawling Azure metadata: %s" % REPROVISION_MARKER_FILE,
- logger_func=LOG.debug,
- )
- else:
- for src in list_possible_azure_ds(self.seed_dir, ddir):
- try:
- if src.startswith("/dev/"):
- if util.is_FreeBSD():
- md, userdata_raw, cfg, files = util.mount_cb(
- src, load_azure_ds_dir, mtype="udf"
- )
- else:
- md, userdata_raw, cfg, files = util.mount_cb(
- src, load_azure_ds_dir
- )
- # save the device for ejection later
- self._iso_dev = src
+ for src in list_possible_azure_ds(self.seed_dir, ddir):
+ try:
+ if src.startswith("/dev/"):
+ if util.is_FreeBSD():
+ md, userdata_raw, cfg, files = util.mount_cb(
+ src, load_azure_ds_dir, mtype="udf"
+ )
else:
- md, userdata_raw, cfg, files = load_azure_ds_dir(src)
- ovf_is_accessible = True
- metadata_source = src
- break
- except NonAzureDataSource:
- report_diagnostic_event(
- "Did not find Azure data source in %s" % src,
- logger_func=LOG.debug,
- )
- continue
- except util.MountFailedError:
- report_diagnostic_event(
- "%s was not mountable" % src, logger_func=LOG.debug
- )
- md = {"local-hostname": ""}
- cfg = {"system_info": {"default_user": {"name": ""}}}
- metadata_source = "IMDS"
- continue
- except BrokenAzureDataSource as exc:
- msg = "BrokenAzureDataSource: %s" % exc
- report_diagnostic_event(msg, logger_func=LOG.error)
- raise sources.InvalidMetaDataException(msg)
+ md, userdata_raw, cfg, files = util.mount_cb(
+ src, load_azure_ds_dir
+ )
+ # save the device for ejection later
+ self._iso_dev = src
+ else:
+ md, userdata_raw, cfg, files = load_azure_ds_dir(src)
+ ovf_is_accessible = True
+ metadata_source = src
+ break
+ except NonAzureDataSource:
+ report_diagnostic_event(
+ "Did not find Azure data source in %s" % src,
+ logger_func=LOG.debug,
+ )
+ continue
+ except util.MountFailedError:
+ report_diagnostic_event(
+ "%s was not mountable" % src, logger_func=LOG.debug
+ )
+ md = {"local-hostname": ""}
+ cfg = {"system_info": {"default_user": {"name": ""}}}
+ metadata_source = "IMDS"
+ continue
+ except BrokenAzureDataSource as exc:
+ msg = "BrokenAzureDataSource: %s" % exc
+ report_diagnostic_event(msg, logger_func=LOG.error)
+ raise sources.InvalidMetaDataException(msg)
report_diagnostic_event(
"Found provisioning metadata in %s" % metadata_source,
@@ -507,7 +534,7 @@ class DataSourceAzure(sources.DataSource):
# If we require IMDS metadata, try harder to obtain networking, waiting
# for at least 20 minutes. Otherwise only wait 5 minutes.
requires_imds_metadata = bool(self._iso_dev) or not ovf_is_accessible
- timeout_minutes = 5 if requires_imds_metadata else 20
+ timeout_minutes = 20 if requires_imds_metadata else 5
try:
self._setup_ephemeral_networking(timeout_minutes=timeout_minutes)
except NoDHCPLeaseError:
@@ -531,8 +558,6 @@ class DataSourceAzure(sources.DataSource):
report_diagnostic_event(msg, logger_func=LOG.error)
raise sources.InvalidMetaDataException(msg)
- self._write_reprovision_marker()
-
if pps_type == PPSType.SAVABLE:
self._wait_for_all_nics_ready()
@@ -902,70 +927,24 @@ class DataSourceAzure(sources.DataSource):
raise
@azure_ds_telemetry_reporter
- def wait_for_link_up(self, ifname):
- """In cases where the link state is still showing down after a nic is
- hot-attached, we can attempt to bring it up by forcing the hv_netvsc
- drivers to query the link state by unbinding and then binding the
- device. This function attempts infinitely until the link is up,
- because we cannot proceed further until we have a stable link."""
-
- if self.distro.networking.try_set_link_up(ifname):
- report_diagnostic_event(
- "The link %s is already up." % ifname, logger_func=LOG.info
- )
- return
-
- LOG.debug("Attempting to bring %s up", ifname)
-
- attempts = 0
- LOG.info("Unbinding and binding the interface %s", ifname)
- while True:
- device_id = net.read_sys_net(ifname, "device/device_id")
- if device_id is False or not isinstance(device_id, str):
- raise RuntimeError("Unable to read device ID: %s" % device_id)
- devicename = device_id.strip("{}")
- util.write_file(
- "/sys/bus/vmbus/drivers/hv_netvsc/unbind", devicename
- )
- util.write_file(
- "/sys/bus/vmbus/drivers/hv_netvsc/bind", devicename
- )
-
- attempts = attempts + 1
+ def wait_for_link_up(
+ self, ifname: str, retries: int = 100, retry_sleep: float = 0.1
+ ):
+ for i in range(retries):
if self.distro.networking.try_set_link_up(ifname):
- msg = "The link %s is up after %s attempts" % (
- ifname,
- attempts,
+ report_diagnostic_event(
+ "The link %s is up." % ifname, logger_func=LOG.info
)
- report_diagnostic_event(msg, logger_func=LOG.info)
- return
-
- if attempts % 10 == 0:
- msg = "Link is not up after %d attempts to rebind" % attempts
- report_diagnostic_event(msg, logger_func=LOG.info)
- LOG.info(msg)
-
- # It could take some time after rebind for the interface to be up.
- # So poll for the status for some time before attempting to rebind
- # again.
- sleep_duration = 0.5
- max_status_polls = 20
- LOG.debug(
- "Polling %d seconds for primary NIC link up after rebind.",
- sleep_duration * max_status_polls,
- )
+ break
- for i in range(0, max_status_polls):
- if self.distro.networking.is_up(ifname):
- msg = (
- "After %d attempts to rebind, link is up after "
- "polling the link status %d times" % (attempts, i)
- )
- report_diagnostic_event(msg, logger_func=LOG.info)
- LOG.debug(msg)
- return
- else:
- sleep(sleep_duration)
+ if (i + 1) < retries:
+ sleep(retry_sleep)
+ else:
+ report_diagnostic_event(
+ "The link %s is not up after %f seconds, continuing anyways."
+ % (ifname, retries * retry_sleep),
+ logger_func=LOG.info,
+ )
@azure_ds_telemetry_reporter
def _create_report_ready_marker(self):
@@ -1091,22 +1070,16 @@ class DataSourceAzure(sources.DataSource):
return is_primary, expected_nic_count
@azure_ds_telemetry_reporter
- def _wait_for_hot_attached_nics(self, nl_sock):
- """Wait until all the expected nics for the vm are hot-attached.
- The expected nic count is obtained by requesting the network metadata
- from IMDS.
- """
- LOG.info("Waiting for nics to be hot-attached")
+ def _wait_for_hot_attached_primary_nic(self, nl_sock):
+ """Wait until the primary nic for the vm is hot-attached."""
+ LOG.info("Waiting for primary nic to be hot-attached")
try:
- # Wait for nics to be attached one at a time, until we know for
- # sure that all nics have been attached.
nics_found = []
primary_nic_found = False
- expected_nic_count = -1
# Wait for netlink nic attach events. After the first nic is
# attached, we are already in the customer vm deployment path and
- # so eerything from then on should happen fast and avoid
+ # so everything from then on should happen fast and avoid
# unnecessary delays wherever possible.
while True:
ifname = None
@@ -1137,17 +1110,13 @@ class DataSourceAzure(sources.DataSource):
# won't be in primary_nic_found = false state for long.
if not primary_nic_found:
LOG.info("Checking if %s is the primary nic", ifname)
- (
- primary_nic_found,
- expected_nic_count,
- ) = self._check_if_nic_is_primary(ifname)
+ primary_nic_found, _ = self._check_if_nic_is_primary(
+ ifname
+ )
- # Exit criteria: check if we've discovered all nics
- if (
- expected_nic_count != -1
- and len(nics_found) >= expected_nic_count
- ):
- LOG.info("Found all the nics for this VM.")
+ # Exit criteria: check if we've discovered primary nic
+ if primary_nic_found:
+ LOG.info("Found primary nic for this VM.")
break
except AssertionError as error:
@@ -1167,7 +1136,7 @@ class DataSourceAzure(sources.DataSource):
self._report_ready_for_pps()
self._teardown_ephemeral_networking()
self._wait_for_nic_detach(nl_sock)
- self._wait_for_hot_attached_nics(nl_sock)
+ self._wait_for_hot_attached_primary_nic(nl_sock)
except netlink.NetlinkCreateSocketError as e:
report_diagnostic_event(str(e), logger_func=LOG.warning)
raise
@@ -1338,7 +1307,7 @@ class DataSourceAzure(sources.DataSource):
logger_func=LOG.debug,
)
report_failure_to_fabric(
- dhcp_opts=self._wireserver_endpoint,
+ endpoint=self._wireserver_endpoint,
description=description,
)
return True
@@ -1361,7 +1330,7 @@ class DataSourceAzure(sources.DataSource):
# Reporting failure will fail, but it will emit telemetry.
pass
report_failure_to_fabric(
- dhcp_opts=self._wireserver_endpoint, description=description
+ endpoint=self._wireserver_endpoint, description=description
)
return True
except Exception as e:
@@ -1386,8 +1355,7 @@ class DataSourceAzure(sources.DataSource):
"""
try:
data = get_metadata_from_fabric(
- fallback_lease_file=None,
- dhcp_opts=self._wireserver_endpoint,
+ endpoint=self._wireserver_endpoint,
iso_dev=self._iso_dev,
pubkey_info=pubkey_info,
)
@@ -1415,7 +1383,7 @@ class DataSourceAzure(sources.DataSource):
def _determine_pps_type(self, ovf_cfg: dict, imds_md: dict) -> PPSType:
"""Determine PPS type using OVF, IMDS data, and reprovision marker."""
- if os.path.isfile(REPROVISION_MARKER_FILE):
+ if os.path.isfile(REPORTED_READY_MARKER_FILE):
pps_type = PPSType.UNKNOWN
elif (
ovf_cfg.get("PreprovisionedVMType", None) == PPSType.SAVABLE.value
@@ -1437,16 +1405,6 @@ class DataSourceAzure(sources.DataSource):
)
return pps_type
- def _write_reprovision_marker(self):
- """Write reprovision marker file in case system is rebooted."""
- LOG.info(
- "Creating a marker file to poll imds: %s", REPROVISION_MARKER_FILE
- )
- util.write_file(
- REPROVISION_MARKER_FILE,
- "{pid}: {time}\n".format(pid=os.getpid(), time=time()),
- )
-
@azure_ds_telemetry_reporter
def _reprovision(self):
"""Initiate the reprovisioning workflow.
@@ -1484,7 +1442,6 @@ class DataSourceAzure(sources.DataSource):
def _cleanup_markers(self):
"""Cleanup any marker files."""
util.del_file(REPORTED_READY_MARKER_FILE)
- util.del_file(REPROVISION_MARKER_FILE)
@azure_ds_telemetry_reporter
def activate(self, cfg, is_new_instance):
diff --git a/cloudinit/sources/DataSourceCloudSigma.py b/cloudinit/sources/DataSourceCloudSigma.py
index de71c3e9..7d702137 100644
--- a/cloudinit/sources/DataSourceCloudSigma.py
+++ b/cloudinit/sources/DataSourceCloudSigma.py
@@ -10,7 +10,7 @@ from base64 import b64decode
from cloudinit import dmi
from cloudinit import log as logging
from cloudinit import sources
-from cloudinit.cs_utils import SERIAL_PORT, Cepko
+from cloudinit.sources.helpers.cloudsigma import SERIAL_PORT, Cepko
LOG = logging.getLogger(__name__)
diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py
index a742a5e6..32cc078b 100644
--- a/cloudinit/sources/DataSourceCloudStack.py
+++ b/cloudinit/sources/DataSourceCloudStack.py
@@ -17,12 +17,12 @@ import time
from socket import gaierror, getaddrinfo, inet_ntoa
from struct import pack
-from cloudinit import ec2_utils as ec2
from cloudinit import log as logging
from cloudinit import sources, subp
from cloudinit import url_helper as uhelp
from cloudinit import util
from cloudinit.net import dhcp
+from cloudinit.sources.helpers import ec2
LOG = logging.getLogger(__name__)
diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py
index f7c58b12..e47e5820 100644
--- a/cloudinit/sources/DataSourceConfigDrive.py
+++ b/cloudinit/sources/DataSourceConfigDrive.py
@@ -149,6 +149,14 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource):
LOG.warning("Invalid content in vendor-data: %s", e)
self.vendordata_raw = None
+ vd2 = results.get("vendordata2")
+ self.vendordata2_pure = vd2
+ try:
+ self.vendordata2_raw = sources.convert_vendordata(vd2)
+ except ValueError as e:
+ LOG.warning("Invalid content in vendor-data2: %s", e)
+ self.vendordata2_raw = None
+
# network_config is an /etc/network/interfaces formated file and is
# obsolete compared to networkdata (from network_data.json) but both
# might be present.
diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py
index 03b3870c..9c525d3d 100644
--- a/cloudinit/sources/DataSourceEc2.py
+++ b/cloudinit/sources/DataSourceEc2.py
@@ -13,13 +13,13 @@ import os
import time
from cloudinit import dmi
-from cloudinit import ec2_utils as ec2
from cloudinit import log as logging
from cloudinit import net, sources
from cloudinit import url_helper as uhelp
from cloudinit import util, warnings
from cloudinit.event import EventScope, EventType
from cloudinit.net.dhcp import EphemeralDHCPv4, NoDHCPLeaseError
+from cloudinit.sources.helpers import ec2
LOG = logging.getLogger(__name__)
@@ -55,14 +55,19 @@ class DataSourceEc2(sources.DataSource):
# Default metadata urls that will be used if none are provided
# They will be checked for 'resolveability' and some of the
# following may be discarded if they do not resolve
- metadata_urls = ["http://169.254.169.254", "http://instance-data.:8773"]
+ metadata_urls = [
+ "http://169.254.169.254",
+ "http://[fd00:ec2::254]",
+ "http://instance-data.:8773",
+ ]
# The minimum supported metadata_version from the ec2 metadata apis
min_metadata_version = "2009-04-04"
# Priority ordered list of additional metadata versions which will be tried
- # for extended metadata content. IPv6 support comes in 2016-09-02
- extended_metadata_versions = ["2018-09-24", "2016-09-02"]
+ # for extended metadata content. IPv6 support comes in 2016-09-02.
+ # Tags support comes in 2021-03-23.
+ extended_metadata_versions = ["2021-03-23", "2018-09-24", "2016-09-02"]
# Setup read_url parameters per get_url_params.
url_max_wait = 120
@@ -252,6 +257,7 @@ class DataSourceEc2(sources.DataSource):
exception_cb=self._imds_exception_cb,
request_method=request_method,
headers_redact=AWS_TOKEN_REDACT,
+ connect_synchronously=False,
)
except uhelp.UrlError:
# We use the raised exception to interupt the retry loop.
diff --git a/cloudinit/sources/DataSourceExoscale.py b/cloudinit/sources/DataSourceExoscale.py
index cc5136d7..23478e9e 100644
--- a/cloudinit/sources/DataSourceExoscale.py
+++ b/cloudinit/sources/DataSourceExoscale.py
@@ -3,11 +3,10 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit import dmi
-from cloudinit import ec2_utils as ec2
-from cloudinit import helpers
+from cloudinit import dmi, helpers
from cloudinit import log as logging
from cloudinit import sources, url_helper, util
+from cloudinit.sources.helpers import ec2
LOG = logging.getLogger(__name__)
diff --git a/cloudinit/sources/DataSourceLXD.py b/cloudinit/sources/DataSourceLXD.py
index 071ea87c..640348f4 100644
--- a/cloudinit/sources/DataSourceLXD.py
+++ b/cloudinit/sources/DataSourceLXD.py
@@ -71,7 +71,10 @@ def generate_fallback_network_config() -> dict:
err,
)
return network_v1
- if virt_type.strip() == "kvm": # instance.type VIRTUAL-MACHINE
+ if virt_type.strip() in (
+ "kvm",
+ "qemu",
+ ): # instance.type VIRTUAL-MACHINE
arch = util.system_info()["uname"][4]
if arch == "ppc64le":
network_v1["config"][0]["name"] = "enp0s5"
@@ -106,34 +109,6 @@ class LXDSocketAdapter(HTTPAdapter):
return SocketConnectionPool(LXD_SOCKET_PATH)
-def _maybe_remove_top_network(cfg):
- """If network-config contains top level 'network' key, then remove it.
-
- Some providers of network configuration may provide a top level
- 'network' key (LP: #1798117) even though it is not necessary.
-
- Be friendly and remove it if it really seems so.
-
- Return the original value if no change or the updated value if changed."""
- if "network" not in cfg:
- return cfg
- network_val = cfg["network"]
- bmsg = "Top level network key in network-config %s: %s"
- if not isinstance(network_val, dict):
- LOG.debug(bmsg, "was not a dict", cfg)
- return cfg
- if len(list(cfg.keys())) != 1:
- LOG.debug(bmsg, "had multiple top level keys", cfg)
- return cfg
- if network_val.get("config") == "disabled":
- LOG.debug(bmsg, "was config/disabled", cfg)
- elif not all(("config" in network_val, "version" in network_val)):
- LOG.debug(bmsg, "but missing 'config' or 'version'", cfg)
- return cfg
- LOG.debug(bmsg, "fixed by removing shifting network.", cfg)
- return network_val
-
-
def _raw_instance_data_to_dict(metadata_type: str, metadata_value) -> dict:
"""Convert raw instance data from str, bytes, YAML to dict
@@ -208,10 +183,8 @@ class DataSourceLXD(sources.DataSource):
if "user-data" in self._crawled_metadata:
self.userdata_raw = self._crawled_metadata["user-data"]
if "network-config" in self._crawled_metadata:
- self._network_config = _maybe_remove_top_network(
- _raw_instance_data_to_dict(
- "network-config", self._crawled_metadata["network-config"]
- )
+ self._network_config = _raw_instance_data_to_dict(
+ "network-config", self._crawled_metadata["network-config"]
)
if "vendor-data" in self._crawled_metadata:
self.vendordata_raw = self._crawled_metadata["vendor-data"]
diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py
index 56559630..fba6aaae 100644
--- a/cloudinit/sources/DataSourceNoCloud.py
+++ b/cloudinit/sources/DataSourceNoCloud.py
@@ -331,35 +331,6 @@ def parse_cmdline_data(ds_id, fill, cmdline=None):
return True
-def _maybe_remove_top_network(cfg):
- """If network-config contains top level 'network' key, then remove it.
-
- Some providers of network configuration may provide a top level
- 'network' key (LP: #1798117) even though it is not necessary.
-
- Be friendly and remove it if it really seems so.
-
- Return the original value if no change or the updated value if changed."""
- nullval = object()
- network_val = cfg.get("network", nullval)
- if network_val is nullval:
- return cfg
- bmsg = "Top level network key in network-config %s: %s"
- if not isinstance(network_val, dict):
- LOG.debug(bmsg, "was not a dict", cfg)
- return cfg
- if len(list(cfg.keys())) != 1:
- LOG.debug(bmsg, "had multiple top level keys", cfg)
- return cfg
- if network_val.get("config") == "disabled":
- LOG.debug(bmsg, "was config/disabled", cfg)
- elif not all(("config" in network_val, "version" in network_val)):
- LOG.debug(bmsg, "but missing 'config' or 'version'", cfg)
- return cfg
- LOG.debug(bmsg, "fixed by removing shifting network.", cfg)
- return network_val
-
-
def _merge_new_seed(cur, seeded):
ret = cur.copy()
@@ -369,9 +340,7 @@ def _merge_new_seed(cur, seeded):
ret["meta-data"] = util.mergemanydict([cur["meta-data"], newmd])
if seeded.get("network-config"):
- ret["network-config"] = _maybe_remove_top_network(
- util.load_yaml(seeded.get("network-config"))
- )
+ ret["network-config"] = util.load_yaml(seeded.get("network-config"))
if "user-data" in seeded:
ret["user-data"] = seeded["user-data"]
diff --git a/cloudinit/sources/DataSourceOracle.py b/cloudinit/sources/DataSourceOracle.py
index 6d81be1e..3fd8d753 100644
--- a/cloudinit/sources/DataSourceOracle.py
+++ b/cloudinit/sources/DataSourceOracle.py
@@ -16,6 +16,7 @@ Notes:
import base64
from collections import namedtuple
from contextlib import suppress as noop
+from typing import Tuple
from cloudinit import dmi
from cloudinit import log as logging
@@ -102,11 +103,11 @@ class DataSourceOracle(sources.DataSource):
dsname = "Oracle"
system_uuid = None
vendordata_pure = None
- network_config_sources = (
- sources.NetworkConfigSource.cmdline,
- sources.NetworkConfigSource.system_cfg,
- sources.NetworkConfigSource.ds,
- sources.NetworkConfigSource.initramfs,
+ network_config_sources: Tuple[sources.NetworkConfigSource, ...] = (
+ sources.NetworkConfigSource.CMD_LINE,
+ sources.NetworkConfigSource.DS,
+ sources.NetworkConfigSource.INITRAMFS,
+ sources.NetworkConfigSource.SYSTEM_CFG,
)
_network_config = sources.UNSET
diff --git a/cloudinit/sources/DataSourceScaleway.py b/cloudinit/sources/DataSourceScaleway.py
index 8e5dd82c..c47a8bf5 100644
--- a/cloudinit/sources/DataSourceScaleway.py
+++ b/cloudinit/sources/DataSourceScaleway.py
@@ -254,8 +254,14 @@ class DataSourceScaleway(sources.DataSource):
{
"type": "static",
"address": "%s" % self.metadata["ipv6"]["address"],
- "gateway": "%s" % self.metadata["ipv6"]["gateway"],
"netmask": "%s" % self.metadata["ipv6"]["netmask"],
+ "routes": [
+ {
+ "network": "::",
+ "prefix": "0",
+ "gateway": "%s" % self.metadata["ipv6"]["gateway"],
+ }
+ ],
}
]
netcfg["subnets"] = subnets
diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py
index 88028cfa..fff760f1 100644
--- a/cloudinit/sources/__init__.py
+++ b/cloudinit/sources/__init__.py
@@ -13,7 +13,8 @@ import copy
import json
import os
from collections import namedtuple
-from typing import Dict, List # noqa: F401
+from enum import Enum, unique
+from typing import Dict, List, Tuple
from cloudinit import dmi, importer
from cloudinit import log as logging
@@ -67,13 +68,22 @@ CLOUD_ID_REGION_PREFIX_MAP = {
"china": ("azure-china", lambda c: c == "azure"), # only change azure
}
-# NetworkConfigSource represents the canonical list of network config sources
-# that cloud-init knows about. (Python 2.7 lacks PEP 435, so use a singleton
-# namedtuple as an enum; see https://stackoverflow.com/a/6971002)
-_NETCFG_SOURCE_NAMES = ("cmdline", "ds", "system_cfg", "fallback", "initramfs")
-NetworkConfigSource = namedtuple("NetworkConfigSource", _NETCFG_SOURCE_NAMES)(
- *_NETCFG_SOURCE_NAMES
-)
+
+@unique
+class NetworkConfigSource(Enum):
+ """
+ Represents the canonical list of network config sources that cloud-init
+ knows about.
+ """
+
+ CMD_LINE = "cmdline"
+ DS = "ds"
+ SYSTEM_CFG = "system_cfg"
+ FALLBACK = "fallback"
+ INITRAMFS = "initramfs"
+
+ def __str__(self) -> str:
+ return self.value
class DatasourceUnpickleUserDataError(Exception):
@@ -177,11 +187,11 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta):
# configuration will be used without considering any that follow.) This
# should always be a subset of the members of NetworkConfigSource with no
# duplicate entries.
- network_config_sources = (
- NetworkConfigSource.cmdline,
- NetworkConfigSource.initramfs,
- NetworkConfigSource.system_cfg,
- NetworkConfigSource.ds,
+ network_config_sources: Tuple[NetworkConfigSource, ...] = (
+ NetworkConfigSource.CMD_LINE,
+ NetworkConfigSource.INITRAMFS,
+ NetworkConfigSource.SYSTEM_CFG,
+ NetworkConfigSource.DS,
)
# read_url_params
@@ -892,7 +902,9 @@ def normalize_pubkey_data(pubkey_data):
return keys
-def find_source(sys_cfg, distro, paths, ds_deps, cfg_list, pkg_list, reporter):
+def find_source(
+ sys_cfg, distro, paths, ds_deps, cfg_list, pkg_list, reporter
+) -> Tuple[DataSource, str]:
ds_list = list_sources(cfg_list, ds_deps, pkg_list)
ds_names = [type_utils.obj_name(f) for f in ds_list]
mode = "network" if DEP_NETWORK in ds_deps else "local"
diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py
index d07dc3c0..4bb8b8db 100755..100644
--- a/cloudinit/sources/helpers/azure.py
+++ b/cloudinit/sources/helpers/azure.py
@@ -16,24 +16,14 @@ from typing import List, Optional
from xml.etree import ElementTree
from xml.sax.saxutils import escape
-from cloudinit import (
- distros,
- stages,
- subp,
- temp_utils,
- url_helper,
- util,
- version,
-)
-from cloudinit.net import dhcp
+from cloudinit import distros, subp, temp_utils, url_helper, util, version
from cloudinit.reporting import events
from cloudinit.settings import CFG_BUILTIN
LOG = logging.getLogger(__name__)
-# This endpoint matches the format as found in dhcp lease files, since this
-# value is applied if the endpoint can't be found within a lease file
-DEFAULT_WIRESERVER_ENDPOINT = "a8:3f:81:10"
+# Default Wireserver endpoint (if not found in DHCP option 245).
+DEFAULT_WIRESERVER_ENDPOINT = "168.63.129.16"
BOOT_EVENT_TYPE = "boot-telemetry"
SYSTEMINFO_EVENT_TYPE = "system-info"
@@ -115,7 +105,7 @@ def get_boot_telemetry():
try:
out, _ = subp.subp(
- ["/bin/systemctl", "show", "-p", "UserspaceTimestampMonotonic"],
+ ["systemctl", "show", "-p", "UserspaceTimestampMonotonic"],
capture=True,
)
tsm = None
@@ -140,7 +130,7 @@ def get_boot_telemetry():
try:
out, _ = subp.subp(
[
- "/bin/systemctl",
+ "systemctl",
"show",
"cloud-init-local",
"-p",
@@ -325,43 +315,39 @@ def cd(newdir):
os.chdir(prevdir)
-def _get_dhcp_endpoint_option_name():
- if util.is_FreeBSD():
- azure_endpoint = "option-245"
+def get_ip_from_lease_value(fallback_lease_value):
+ unescaped_value = fallback_lease_value.replace("\\", "")
+ if len(unescaped_value) > 4:
+ hex_string = ""
+ for hex_pair in unescaped_value.split(":"):
+ if len(hex_pair) == 1:
+ hex_pair = "0" + hex_pair
+ hex_string += hex_pair
+ packed_bytes = struct.pack(">L", int(hex_string.replace(":", ""), 16))
else:
- azure_endpoint = "unknown-245"
- return azure_endpoint
+ packed_bytes = unescaped_value.encode("utf-8")
+ return socket.inet_ntoa(packed_bytes)
@azure_ds_telemetry_reporter
-def http_with_retries(url, **kwargs) -> url_helper.UrlResponse:
- """Wrapper around url_helper.readurl() with custom telemetry logging
- that url_helper.readurl() does not provide.
+def http_with_retries(
+ url: str, *, headers: dict, data: Optional[str] = None
+) -> url_helper.UrlResponse:
+ """Readurl wrapper for querying wireserver.
+
+ Retries up to 40 minutes:
+ 240 attempts * (5s timeout + 5s sleep)
"""
max_readurl_attempts = 240
- default_readurl_timeout = 5
+ readurl_timeout = 5
sleep_duration_between_retries = 5
periodic_logging_attempts = 12
- if "timeout" not in kwargs:
- kwargs["timeout"] = default_readurl_timeout
-
- # remove kwargs that cause url_helper.readurl to retry,
- # since we are already implementing our own retry logic.
- if kwargs.pop("retries", None):
- LOG.warning(
- "Ignoring retries kwarg passed in for "
- "communication with Azure endpoint."
- )
- if kwargs.pop("infinite", None):
- LOG.warning(
- "Ignoring infinite kwarg passed in for communication "
- "with Azure endpoint."
- )
-
for attempt in range(1, max_readurl_attempts + 1):
try:
- ret = url_helper.readurl(url, **kwargs)
+ ret = url_helper.readurl(
+ url, headers=headers, data=data, timeout=readurl_timeout
+ )
report_diagnostic_event(
"Successful HTTP request with Azure endpoint %s after "
@@ -808,191 +794,15 @@ class GoalStateHealthReporter:
class WALinuxAgentShim:
- def __init__(self, fallback_lease_file=None, dhcp_options=None):
- LOG.debug(
- "WALinuxAgentShim instantiated, fallback_lease_file=%s",
- fallback_lease_file,
- )
- self.dhcpoptions = dhcp_options
- self._endpoint = None
- self.openssl_manager = None
- self.azure_endpoint_client = None
- self.lease_file = fallback_lease_file
+ def __init__(self, endpoint: str):
+ self.endpoint = endpoint
+ self.openssl_manager: Optional[OpenSSLManager] = None
+ self.azure_endpoint_client: Optional[AzureEndpointHttpClient] = None
def clean_up(self):
if self.openssl_manager is not None:
self.openssl_manager.clean_up()
- @staticmethod
- def _get_hooks_dir():
- _paths = stages.Init()
- return os.path.join(_paths.paths.get_runpath(), "dhclient.hooks")
-
- @property
- def endpoint(self):
- if self._endpoint is None:
- self._endpoint = self.find_endpoint(
- self.lease_file, self.dhcpoptions
- )
- return self._endpoint
-
- @staticmethod
- def get_ip_from_lease_value(fallback_lease_value):
- unescaped_value = fallback_lease_value.replace("\\", "")
- if len(unescaped_value) > 4:
- hex_string = ""
- for hex_pair in unescaped_value.split(":"):
- if len(hex_pair) == 1:
- hex_pair = "0" + hex_pair
- hex_string += hex_pair
- packed_bytes = struct.pack(
- ">L", int(hex_string.replace(":", ""), 16)
- )
- else:
- packed_bytes = unescaped_value.encode("utf-8")
- return socket.inet_ntoa(packed_bytes)
-
- @staticmethod
- @azure_ds_telemetry_reporter
- def _networkd_get_value_from_leases(leases_d=None):
- return dhcp.networkd_get_option_from_leases(
- "OPTION_245", leases_d=leases_d
- )
-
- @staticmethod
- @azure_ds_telemetry_reporter
- def _get_value_from_leases_file(fallback_lease_file):
- leases = []
- try:
- content = util.load_file(fallback_lease_file)
- except IOError as ex:
- LOG.error("Failed to read %s: %s", fallback_lease_file, ex)
- return None
-
- LOG.debug("content is %s", content)
- option_name = _get_dhcp_endpoint_option_name()
- for line in content.splitlines():
- if option_name in line:
- # Example line from Ubuntu
- # option unknown-245 a8:3f:81:10;
- leases.append(line.strip(" ").split(" ", 2)[-1].strip(';\n"'))
- # Return the "most recent" one in the list
- if len(leases) < 1:
- return None
- else:
- return leases[-1]
-
- @staticmethod
- @azure_ds_telemetry_reporter
- def _load_dhclient_json():
- dhcp_options = {}
- hooks_dir = WALinuxAgentShim._get_hooks_dir()
- if not os.path.exists(hooks_dir):
- LOG.debug("%s not found.", hooks_dir)
- return None
- hook_files = [
- os.path.join(hooks_dir, x) for x in os.listdir(hooks_dir)
- ]
- for hook_file in hook_files:
- try:
- name = os.path.basename(hook_file).replace(".json", "")
- dhcp_options[name] = json.loads(util.load_file((hook_file)))
- except ValueError as e:
- raise ValueError(
- "{_file} is not valid JSON data".format(_file=hook_file)
- ) from e
- return dhcp_options
-
- @staticmethod
- @azure_ds_telemetry_reporter
- def _get_value_from_dhcpoptions(dhcp_options):
- if dhcp_options is None:
- return None
- # the MS endpoint server is given to us as DHPC option 245
- _value = None
- for interface in dhcp_options:
- _value = dhcp_options[interface].get("unknown_245", None)
- if _value is not None:
- LOG.debug("Endpoint server found in dhclient options")
- break
- return _value
-
- @staticmethod
- @azure_ds_telemetry_reporter
- def find_endpoint(fallback_lease_file=None, dhcp245=None):
- """Finds and returns the Azure endpoint using various methods.
-
- The Azure endpoint is searched in the following order:
- 1. Endpoint from dhcp options (dhcp option 245).
- 2. Endpoint from networkd.
- 3. Endpoint from dhclient hook json.
- 4. Endpoint from fallback lease file.
- 5. The default Azure endpoint.
-
- @param fallback_lease_file: Fallback lease file that will be used
- during endpoint search.
- @param dhcp245: dhcp options that will be used during endpoint search.
- @return: Azure endpoint IP address.
- """
- value = None
-
- if dhcp245 is not None:
- value = dhcp245
- LOG.debug("Using Azure Endpoint from dhcp options")
- if value is None:
- report_diagnostic_event(
- "No Azure endpoint from dhcp options. "
- "Finding Azure endpoint from networkd...",
- logger_func=LOG.debug,
- )
- value = WALinuxAgentShim._networkd_get_value_from_leases()
- if value is None:
- # Option-245 stored in /run/cloud-init/dhclient.hooks/<ifc>.json
- # a dhclient exit hook that calls cloud-init-dhclient-hook
- report_diagnostic_event(
- "No Azure endpoint from networkd. "
- "Finding Azure endpoint from hook json...",
- logger_func=LOG.debug,
- )
- dhcp_options = WALinuxAgentShim._load_dhclient_json()
- value = WALinuxAgentShim._get_value_from_dhcpoptions(dhcp_options)
- if value is None:
- # Fallback and check the leases file if unsuccessful
- report_diagnostic_event(
- "No Azure endpoint from dhclient logs. "
- "Unable to find endpoint in dhclient logs. "
- "Falling back to check lease files",
- logger_func=LOG.debug,
- )
- if fallback_lease_file is None:
- report_diagnostic_event(
- "No fallback lease file was specified.",
- logger_func=LOG.warning,
- )
- value = None
- else:
- report_diagnostic_event(
- "Looking for endpoint in lease file %s"
- % fallback_lease_file,
- logger_func=LOG.debug,
- )
- value = WALinuxAgentShim._get_value_from_leases_file(
- fallback_lease_file
- )
- if value is None:
- value = DEFAULT_WIRESERVER_ENDPOINT
- report_diagnostic_event(
- "No lease found; using default endpoint: %s" % value,
- logger_func=LOG.warning,
- )
-
- endpoint_ip_address = WALinuxAgentShim.get_ip_from_lease_value(value)
- report_diagnostic_event(
- "Azure endpoint found at %s" % endpoint_ip_address,
- logger_func=LOG.debug,
- )
- return endpoint_ip_address
-
@azure_ds_telemetry_reporter
def eject_iso(self, iso_dev) -> None:
try:
@@ -1088,7 +898,7 @@ class WALinuxAgentShim:
description="retrieve goalstate",
parent=azure_ds_reporter,
):
- response = self.azure_endpoint_client.get(url)
+ response = self.azure_endpoint_client.get(url) # type: ignore
except Exception as e:
report_diagnostic_event(
"failed to register with Azure and fetch GoalState XML: %s"
@@ -1112,7 +922,7 @@ class WALinuxAgentShim:
try:
goal_state = GoalState(
unparsed_goal_state_xml,
- self.azure_endpoint_client,
+ self.azure_endpoint_client, # type: ignore
need_certificate,
)
except Exception as e:
@@ -1161,7 +971,11 @@ class WALinuxAgentShim:
@return: A list of the VM user's authorized pubkey values.
"""
ssh_keys = []
- if goal_state.certificates_xml is not None and pubkey_info is not None:
+ if (
+ goal_state.certificates_xml is not None
+ and pubkey_info is not None
+ and self.openssl_manager is not None
+ ):
LOG.debug("Certificate XML found; parsing out public keys.")
keys_by_fingerprint = self.openssl_manager.parse_certificates(
goal_state.certificates_xml
@@ -1208,11 +1022,11 @@ class WALinuxAgentShim:
@azure_ds_telemetry_reporter
def get_metadata_from_fabric(
- fallback_lease_file=None, dhcp_opts=None, pubkey_info=None, iso_dev=None
+ endpoint: str,
+ pubkey_info: Optional[List[str]] = None,
+ iso_dev: Optional[str] = None,
):
- shim = WALinuxAgentShim(
- fallback_lease_file=fallback_lease_file, dhcp_options=dhcp_opts
- )
+ shim = WALinuxAgentShim(endpoint=endpoint)
try:
return shim.register_with_azure_and_fetch_data(
pubkey_info=pubkey_info, iso_dev=iso_dev
@@ -1222,12 +1036,8 @@ def get_metadata_from_fabric(
@azure_ds_telemetry_reporter
-def report_failure_to_fabric(
- fallback_lease_file=None, dhcp_opts=None, description=None
-):
- shim = WALinuxAgentShim(
- fallback_lease_file=fallback_lease_file, dhcp_options=dhcp_opts
- )
+def report_failure_to_fabric(endpoint: str, description: Optional[str] = None):
+ shim = WALinuxAgentShim(endpoint=endpoint)
if not description:
description = DEFAULT_REPORT_FAILURE_USER_VISIBLE_MESSAGE
try:
diff --git a/cloudinit/cs_utils.py b/cloudinit/sources/helpers/cloudsigma.py
index 6db7e117..6db7e117 100644
--- a/cloudinit/cs_utils.py
+++ b/cloudinit/sources/helpers/cloudsigma.py
diff --git a/cloudinit/ec2_utils.py b/cloudinit/sources/helpers/ec2.py
index d4019557..d4019557 100644
--- a/cloudinit/ec2_utils.py
+++ b/cloudinit/sources/helpers/ec2.py
diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py
index a42543e4..1f48dac6 100644
--- a/cloudinit/sources/helpers/openstack.py
+++ b/cloudinit/sources/helpers/openstack.py
@@ -12,10 +12,10 @@ import copy
import functools
import os
-from cloudinit import ec2_utils
from cloudinit import log as logging
from cloudinit import net, sources, subp, url_helper, util
from cloudinit.sources import BrokenMetadata
+from cloudinit.sources.helpers import ec2
# See https://docs.openstack.org/user-guide/cli-config-drive.html
@@ -515,7 +515,7 @@ class MetadataReader(BaseReader):
return url_helper.combine_url(base, *add_ons)
def _read_ec2_metadata(self):
- return ec2_utils.get_instance_metadata(
+ return ec2.get_instance_metadata(
ssl_details=self.ssl_details,
timeout=self.timeout,
retries=self.retries,
diff --git a/cloudinit/sources/helpers/vmware/imc/guestcust_util.py b/cloudinit/sources/helpers/vmware/imc/guestcust_util.py
index 08763e62..f7d676e8 100644
--- a/cloudinit/sources/helpers/vmware/imc/guestcust_util.py
+++ b/cloudinit/sources/helpers/vmware/imc/guestcust_util.py
@@ -141,7 +141,7 @@ def get_tools_config(section, key, defaultVal):
cmd = ["vmware-toolbox-cmd", "config", "get", section, key]
try:
- (outText, _) = subp.subp(cmd)
+ out = subp.subp(cmd)
except subp.ProcessExecutionError as e:
if e.exit_code == 69:
logger.debug(
@@ -156,7 +156,7 @@ def get_tools_config(section, key, defaultVal):
return defaultVal
retValue = defaultVal
- m = re.match(r"([^=]+)=(.*)", outText)
+ m = re.match(r"([^=]+)=(.*)", out.stdout)
if m:
retValue = m.group(2).strip()
logger.debug("Get tools config: [%s] %s = %s", section, key, retValue)
diff --git a/cloudinit/sources/helpers/vultr.py b/cloudinit/sources/helpers/vultr.py
index 88a21034..c8fb8420 100644
--- a/cloudinit/sources/helpers/vultr.py
+++ b/cloudinit/sources/helpers/vultr.py
@@ -5,9 +5,11 @@
import json
from functools import lru_cache
+from requests import exceptions
+
from cloudinit import dmi
from cloudinit import log as log
-from cloudinit import net, subp, url_helper, util
+from cloudinit import net, netinfo, subp, url_helper, util
from cloudinit.net.dhcp import EphemeralDHCPv4, NoDHCPLeaseError
# Get LOG
@@ -20,26 +22,60 @@ def get_metadata(url, timeout, retries, sec_between, agent):
exception = RuntimeError("Failed to DHCP")
# Seek iface with DHCP
- for iface in net.get_interfaces():
- # Skip dummy, lo interfaces
- if "dummy" in iface[0]:
- continue
- if "lo" == iface[0]:
- continue
+ for iface in get_interface_list():
try:
with EphemeralDHCPv4(
- iface=iface[0], connectivity_url_data={"url": url}
+ iface=iface, connectivity_url_data={"url": url}
):
+ # Check for the metadata route, skip if not there
+ if not check_route(url):
+ continue
+
# Fetch the metadata
v1 = read_metadata(url, timeout, retries, sec_between, agent)
return json.loads(v1)
- except (NoDHCPLeaseError, subp.ProcessExecutionError) as exc:
+ except (
+ NoDHCPLeaseError,
+ subp.ProcessExecutionError,
+ RuntimeError,
+ exceptions.RequestException,
+ ) as exc:
LOG.error("DHCP Exception: %s", exc)
exception = exc
raise exception
+# Get interface list, sort, and clean
+def get_interface_list():
+ ifaces = []
+ for iface in net.find_candidate_nics():
+ # Skip dummy
+ if "dummy" in iface:
+ continue
+ ifaces.append(iface)
+
+ return ifaces
+
+
+# Check for /32 route that our dhcp servers inject
+# in order to determine if this a customer-run dhcp server
+def check_route(url):
+ # Get routes, confirm entry exists
+ routes = netinfo.route_info()
+
+ # If no tools exist and empty dict is returned
+ if "ipv4" not in routes:
+ return False
+
+ # Parse each route into a more searchable format
+ for route in routes["ipv4"]:
+ if route.get("destination", None) in url:
+ return True
+
+ return False
+
+
# Read the system information from SMBIOS
def get_sysinfo():
return {
@@ -114,7 +150,12 @@ def get_interface_name(mac):
def generate_network_config(interfaces):
network = {
"version": 1,
- "config": [{"type": "nameserver", "address": ["108.61.10.10"]}],
+ "config": [
+ {
+ "type": "nameserver",
+ "address": ["108.61.10.10", "2001:19f0:300:1704::6"],
+ }
+ ],
}
# Prepare interface 0, public
@@ -124,7 +165,13 @@ def generate_network_config(interfaces):
# Prepare additional interfaces, private
for i in range(1, len(interfaces)):
- private = generate_interface(interfaces[i])
+ interface = interfaces[i]
+
+ # Skip interfaces set not to be configured
+ if interface.get("unconfigured"):
+ continue
+
+ private = generate_interface(interface)
network["config"].append(private)
return network
diff --git a/cloudinit/stages.py b/cloudinit/stages.py
index 3f17294b..27af6055 100644
--- a/cloudinit/stages.py
+++ b/cloudinit/stages.py
@@ -9,9 +9,9 @@ import os
import pickle
import sys
from collections import namedtuple
-from typing import Dict, Set # noqa: F401
+from typing import Dict, Iterable, List, Optional, Set
-from cloudinit import cloud, config, distros, handlers, helpers, importer
+from cloudinit import cloud, distros, handlers, helpers, importer
from cloudinit import log as logging
from cloudinit import net, sources, type_utils, util
from cloudinit.event import EventScope, EventType, userdata_to_events
@@ -24,12 +24,10 @@ from cloudinit.handlers.shell_script import ShellScriptPartHandler
from cloudinit.handlers.shell_script_by_frequency import (
ShellScriptByFreqPartHandler,
)
-from cloudinit.handlers.upstart_job import UpstartJobPartHandler
from cloudinit.net import cmdline
from cloudinit.reporting import events
from cloudinit.settings import (
CLOUD_CONFIG,
- FREQUENCIES,
PER_ALWAYS,
PER_INSTANCE,
PER_ONCE,
@@ -39,7 +37,6 @@ from cloudinit.sources import NetworkConfigSource
LOG = logging.getLogger(__name__)
-NULL_DATA_SOURCE = None
NO_PREVIOUS_INSTANCE_ID = "NO_PREVIOUS_INSTANCE_ID"
@@ -61,12 +58,12 @@ def update_event_enabled(
case, we only have the data source's `default_update_events`,
so an event that should be enabled in userdata may be denied.
"""
- default_events = (
- datasource.default_update_events
- ) # type: Dict[EventScope, Set[EventType]]
- user_events = userdata_to_events(
+ default_events: Dict[
+ EventScope, Set[EventType]
+ ] = datasource.default_update_events
+ user_events: Dict[EventScope, Set[EventType]] = userdata_to_events(
cfg.get("updates", {})
- ) # type: Dict[EventScope, Set[EventType]]
+ )
# A value in the first will override a value in the second
allowed = util.mergemanydict(
[
@@ -76,6 +73,7 @@ def update_event_enabled(
)
LOG.debug("Allowed events: %s", allowed)
+ scopes: Iterable[EventScope]
if not scope:
scopes = allowed.keys()
else:
@@ -98,17 +96,17 @@ def update_event_enabled(
class Init(object):
- def __init__(self, ds_deps=None, reporter=None):
+ def __init__(self, ds_deps: Optional[List[str]] = None, reporter=None):
if ds_deps is not None:
self.ds_deps = ds_deps
else:
self.ds_deps = [sources.DEP_FILESYSTEM, sources.DEP_NETWORK]
# Created on first use
- self._cfg = None
- self._paths = None
- self._distro = None
+ self._cfg: Optional[dict] = None
+ self._paths: Optional[helpers.Paths] = None
+ self._distro: Optional[distros.Distro] = None
# Changed only when a fetch occurs
- self.datasource = NULL_DATA_SOURCE
+ self.datasource: Optional[sources.DataSource] = None
self.ds_restored = False
self._previous_iid = None
@@ -126,7 +124,7 @@ class Init(object):
self._paths = None
self._distro = None
if reset_ds:
- self.datasource = NULL_DATA_SOURCE
+ self.datasource = None
self.ds_restored = False
@property
@@ -141,7 +139,7 @@ class Init(object):
# If we have an active datasource we need to adjust
# said datasource and move its distro/system config
# from whatever it was to a new set...
- if self.datasource is not NULL_DATA_SOURCE:
+ if self.datasource is not None:
self.datasource.distro = self._distro
self.datasource.sys_cfg = self.cfg
return self._distro
@@ -252,7 +250,7 @@ class Init(object):
return _pkl_load(self.paths.get_ipath_cur("obj_pkl"))
def _write_to_cache(self):
- if self.datasource is NULL_DATA_SOURCE:
+ if self.datasource is None:
return False
if util.get_cfg_option_bool(self.cfg, "manual_cache_clean", False):
# The empty file in instance/ dir indicates manual cleaning,
@@ -301,7 +299,7 @@ class Init(object):
return (None, "cache invalid in datasource: %s" % ds)
def _get_data_source(self, existing) -> sources.DataSource:
- if self.datasource is not NULL_DATA_SOURCE:
+ if self.datasource is not None:
return self.datasource
with events.ReportEventStack(
@@ -330,7 +328,7 @@ class Init(object):
self.reporter,
)
LOG.info("Loaded datasource %s - %s", dsname, ds)
- self.datasource = ds # type: sources.DataSource
+ self.datasource = ds
# Ensure we adjust our path members datasource
# now that we have one (thus allowing ipath to be used)
self._reset()
@@ -527,7 +525,6 @@ class Init(object):
ShellScriptByFreqPartHandler(PER_INSTANCE, **opts),
ShellScriptByFreqPartHandler(PER_ONCE, **opts),
BootHookPartHandler(**opts),
- UpstartJobPartHandler(**opts),
]
opts.update(
{"sub_handlers": [cloudconfig_handler, shellscript_handler]}
@@ -757,6 +754,13 @@ class Init(object):
LOG.debug("%s consumption is disabled.", vendor_source)
return
+ if isinstance(enabled, str):
+ LOG.debug(
+ "Use of string '%s' for 'vendor_data:enabled' field "
+ "is deprecated. Use boolean value instead",
+ enabled,
+ )
+
LOG.debug(
"%s will be consumed. disabled_handlers=%s",
vendor_source,
@@ -794,6 +798,16 @@ class Init(object):
# Run the handlers
self._do_handlers(user_data_msg, c_handlers_list, frequency)
+ def _remove_top_level_network_key(self, cfg):
+ """If network-config contains top level 'network' key, then remove it.
+
+ Some providers of network configuration skip the top-level network
+ key, so ensure both methods works.
+ """
+ if cfg and "network" in cfg:
+ return cfg["network"]
+ return cfg
+
def _find_networking_config(self):
disable_file = os.path.join(
self.paths.get_cpath("data"), "upgraded-network"
@@ -802,15 +816,15 @@ class Init(object):
return (None, disable_file)
available_cfgs = {
- NetworkConfigSource.cmdline: cmdline.read_kernel_cmdline_config(),
- NetworkConfigSource.initramfs: cmdline.read_initramfs_config(),
- NetworkConfigSource.ds: None,
- NetworkConfigSource.system_cfg: self.cfg.get("network"),
+ NetworkConfigSource.CMD_LINE: cmdline.read_kernel_cmdline_config(),
+ NetworkConfigSource.INITRAMFS: cmdline.read_initramfs_config(),
+ NetworkConfigSource.DS: None,
+ NetworkConfigSource.SYSTEM_CFG: self.cfg.get("network"),
}
if self.datasource and hasattr(self.datasource, "network_config"):
available_cfgs[
- NetworkConfigSource.ds
+ NetworkConfigSource.DS
] = self.datasource.network_config
if self.datasource:
@@ -818,7 +832,7 @@ class Init(object):
else:
order = sources.DataSource.network_config_sources
for cfg_source in order:
- if not hasattr(NetworkConfigSource, cfg_source):
+ if not isinstance(cfg_source, NetworkConfigSource):
LOG.warning(
"data source specifies an invalid network cfg_source: %s",
cfg_source,
@@ -831,7 +845,9 @@ class Init(object):
cfg_source,
)
continue
- ncfg = available_cfgs[cfg_source]
+ ncfg = self._remove_top_level_network_key(
+ available_cfgs[cfg_source]
+ )
if net.is_disabled_cfg(ncfg):
LOG.debug("network config disabled by %s", cfg_source)
return (None, cfg_source)
@@ -839,13 +855,13 @@ class Init(object):
return (ncfg, cfg_source)
return (
self.distro.generate_fallback_config(),
- NetworkConfigSource.fallback,
+ NetworkConfigSource.FALLBACK,
)
def _apply_netcfg_names(self, netcfg):
try:
LOG.debug("applying net config names for %s", netcfg)
- self.distro.apply_network_config_names(netcfg)
+ self.distro.networking.apply_network_config_names(netcfg)
except Exception as e:
LOG.warning("Failed to rename devices: %s", e)
@@ -871,15 +887,12 @@ class Init(object):
return
def event_enabled_and_metadata_updated(event_type):
- return (
- update_event_enabled(
- datasource=self.datasource,
- cfg=self.cfg,
- event_source_type=event_type,
- scope=EventScope.NETWORK,
- )
- and self.datasource.update_metadata_if_supported([event_type])
- )
+ return update_event_enabled(
+ datasource=self.datasource,
+ cfg=self.cfg,
+ event_source_type=event_type,
+ scope=EventScope.NETWORK,
+ ) and self.datasource.update_metadata_if_supported([event_type])
def should_run_on_boot_event():
return (
@@ -888,7 +901,7 @@ class Init(object):
)
if (
- self.datasource is not NULL_DATA_SOURCE
+ self.datasource is not None
and not self.is_new_instance()
and not should_run_on_boot_event()
and not event_enabled_and_metadata_updated(EventType.BOOT_LEGACY)
@@ -940,217 +953,6 @@ class Init(object):
return
-class Modules(object):
- def __init__(self, init, cfg_files=None, reporter=None):
- self.init = init
- self.cfg_files = cfg_files
- # Created on first use
- self._cached_cfg = None
- if reporter is None:
- reporter = events.ReportEventStack(
- name="module-reporter",
- description="module-desc",
- reporting_enabled=False,
- )
- self.reporter = reporter
-
- @property
- def cfg(self):
- # None check to avoid empty case causing re-reading
- if self._cached_cfg is None:
- merger = helpers.ConfigMerger(
- paths=self.init.paths,
- datasource=self.init.datasource,
- additional_fns=self.cfg_files,
- base_cfg=self.init.cfg,
- )
- self._cached_cfg = merger.cfg
- # LOG.debug("Loading 'module' config %s", self._cached_cfg)
- # Only give out a copy so that others can't modify this...
- return copy.deepcopy(self._cached_cfg)
-
- def _read_modules(self, name):
- module_list = []
- if name not in self.cfg:
- return module_list
- cfg_mods = self.cfg.get(name)
- if not cfg_mods:
- return module_list
- # Create 'module_list', an array of hashes
- # Where hash['mod'] = module name
- # hash['freq'] = frequency
- # hash['args'] = arguments
- for item in cfg_mods:
- if not item:
- continue
- if isinstance(item, str):
- module_list.append(
- {
- "mod": item.strip(),
- }
- )
- elif isinstance(item, (list)):
- contents = {}
- # Meant to fall through...
- if len(item) >= 1:
- contents["mod"] = item[0].strip()
- if len(item) >= 2:
- contents["freq"] = item[1].strip()
- if len(item) >= 3:
- contents["args"] = item[2:]
- if contents:
- module_list.append(contents)
- elif isinstance(item, (dict)):
- contents = {}
- valid = False
- if "name" in item:
- contents["mod"] = item["name"].strip()
- valid = True
- if "frequency" in item:
- contents["freq"] = item["frequency"].strip()
- if "args" in item:
- contents["args"] = item["args"] or []
- if contents and valid:
- module_list.append(contents)
- else:
- raise TypeError(
- "Failed to read '%s' item in config, unknown type %s"
- % (item, type_utils.obj_name(item))
- )
- return module_list
-
- def _fixup_modules(self, raw_mods):
- mostly_mods = []
- for raw_mod in raw_mods:
- raw_name = raw_mod["mod"]
- freq = raw_mod.get("freq")
- run_args = raw_mod.get("args") or []
- mod_name = config.form_module_name(raw_name)
- if not mod_name:
- continue
- if freq and freq not in FREQUENCIES:
- LOG.warning(
- "Config specified module %s has an unknown frequency %s",
- raw_name,
- freq,
- )
- # Reset it so when ran it will get set to a known value
- freq = None
- mod_locs, looked_locs = importer.find_module(
- mod_name, ["", type_utils.obj_name(config)], ["handle"]
- )
- if not mod_locs:
- LOG.warning(
- "Could not find module named %s (searched %s)",
- mod_name,
- looked_locs,
- )
- continue
- mod = config.fixup_module(importer.import_module(mod_locs[0]))
- mostly_mods.append([mod, raw_name, freq, run_args])
- return mostly_mods
-
- def _run_modules(self, mostly_mods):
- cc = self.init.cloudify()
- # Return which ones ran
- # and which ones failed + the exception of why it failed
- failures = []
- which_ran = []
- for (mod, name, freq, args) in mostly_mods:
- try:
- # Try the modules frequency, otherwise fallback to a known one
- if not freq:
- freq = mod.frequency
- if freq not in FREQUENCIES:
- freq = PER_INSTANCE
- LOG.debug(
- "Running module %s (%s) with frequency %s", name, mod, freq
- )
-
- # Use the configs logger and not our own
- # TODO(harlowja): possibly check the module
- # for having a LOG attr and just give it back
- # its own logger?
- func_args = [name, self.cfg, cc, config.LOG, args]
- # Mark it as having started running
- which_ran.append(name)
- # This name will affect the semaphore name created
- run_name = "config-%s" % (name)
-
- desc = "running %s with frequency %s" % (run_name, freq)
- myrep = events.ReportEventStack(
- name=run_name, description=desc, parent=self.reporter
- )
-
- with myrep:
- ran, _r = cc.run(
- run_name, mod.handle, func_args, freq=freq
- )
- if ran:
- myrep.message = "%s ran successfully" % run_name
- else:
- myrep.message = "%s previously ran" % run_name
-
- except Exception as e:
- util.logexc(LOG, "Running module %s (%s) failed", name, mod)
- failures.append((name, e))
- return (which_ran, failures)
-
- def run_single(self, mod_name, args=None, freq=None):
- # Form the users module 'specs'
- mod_to_be = {
- "mod": mod_name,
- "args": args,
- "freq": freq,
- }
- # Now resume doing the normal fixups and running
- raw_mods = [mod_to_be]
- mostly_mods = self._fixup_modules(raw_mods)
- return self._run_modules(mostly_mods)
-
- def run_section(self, section_name):
- raw_mods = self._read_modules(section_name)
- mostly_mods = self._fixup_modules(raw_mods)
- d_name = self.init.distro.name
-
- skipped = []
- forced = []
- overridden = self.cfg.get("unverified_modules", [])
- active_mods = []
- all_distros = set([distros.ALL_DISTROS])
- for (mod, name, _freq, _args) in mostly_mods:
- worked_distros = set(mod.distros) # Minimally [] per fixup_modules
- worked_distros.update(
- distros.Distro.expand_osfamily(mod.osfamilies)
- )
-
- # Skip only when the following conditions are all met:
- # - distros are defined in the module != ALL_DISTROS
- # - the current d_name isn't in distros
- # - and the module is unverified and not in the unverified_modules
- # override list
- if worked_distros and worked_distros != all_distros:
- if d_name not in worked_distros:
- if name not in overridden:
- skipped.append(name)
- continue
- forced.append(name)
- active_mods.append([mod, name, _freq, _args])
-
- if skipped:
- LOG.info(
- "Skipping modules '%s' because they are not verified "
- "on distro '%s'. To run anyway, add them to "
- "'unverified_modules' in config.",
- ",".join(skipped),
- d_name,
- )
- if forced:
- LOG.info("running unverified_modules: '%s'", ", ".join(forced))
-
- return self._run_modules(active_mods)
-
-
def read_runtime_config():
return util.read_conf(RUN_CLOUD_CONFIG)
@@ -1158,11 +960,11 @@ def read_runtime_config():
def fetch_base_config():
return util.mergemanydict(
[
- # builtin config
+ # builtin config, hardcoded in settings.py.
util.get_builtin_cfg(),
# Anything in your conf.d or 'default' cloud.cfg location.
util.read_conf_with_confd(CLOUD_CONFIG),
- # runtime config
+ # runtime config. I.e., /run/cloud-init/cloud.cfg
read_runtime_config(),
# Kernel/cmdline parameters override system config
util.read_conf_from_cmdline(),
diff --git a/cloudinit/subp.py b/cloudinit/subp.py
index 7693601d..5acfeccc 100644
--- a/cloudinit/subp.py
+++ b/cloudinit/subp.py
@@ -1,6 +1,7 @@
# This file is part of cloud-init. See LICENSE file for license information.
"""Common utility functions for interacting with subprocess."""
+import collections
import logging
import os
import subprocess
@@ -8,6 +9,8 @@ from errno import ENOEXEC
LOG = logging.getLogger(__name__)
+SubpResult = collections.namedtuple("SubpResult", ["stdout", "stderr"])
+
def prepend_base_command(base_command, commands):
"""Ensure user-provided commands start with base_command; warn otherwise.
@@ -211,9 +214,9 @@ def subp(
if not capturing, return is (None, None)
if capturing, stdout and stderr are returned.
if decode:
- entries in tuple will be python2 unicode or python3 string
+ entries in tuple will be string
if not decode:
- entries in tuple will be python2 string or python3 bytes
+ entries in tuple will be bytes
"""
# not supported in cloud-init (yet), for now kept in the call signature
@@ -334,7 +337,7 @@ def subp(
)
if status_cb:
status_cb("End run command: exit({code})\n".format(code=rc))
- return (out, err)
+ return SubpResult(out, err)
def target_path(target, path=None):
diff --git a/cloudinit/templater.py b/cloudinit/templater.py
index 1e147d4a..298eaf6b 100644
--- a/cloudinit/templater.py
+++ b/cloudinit/templater.py
@@ -15,13 +15,6 @@ import re
import sys
try:
- from Cheetah.Template import Template as CTemplate
-
- CHEETAH_AVAILABLE = True
-except (ImportError, AttributeError):
- CHEETAH_AVAILABLE = False
-
-try:
from jinja2 import DebugUndefined as JUndefined
from jinja2 import Template as JTemplate
@@ -96,15 +89,15 @@ def basic_render(content, params):
def detect_template(text):
- def cheetah_render(content, params):
- return CTemplate(content, searchList=[params]).respond()
-
def jinja_render(content, params):
# keep_trailing_newline is in jinja2 2.7+, not 2.6
add = "\n" if content.endswith("\n") else ""
return (
JTemplate(
- content, undefined=UndefinedJinjaVariable, trim_blocks=True
+ content,
+ undefined=UndefinedJinjaVariable,
+ trim_blocks=True,
+ extensions=["jinja2.ext.do"],
).render(**params)
+ add
)
@@ -116,14 +109,10 @@ def detect_template(text):
rest = ""
type_match = TYPE_MATCHER.match(ident)
if not type_match:
- if CHEETAH_AVAILABLE:
- LOG.debug("Using Cheetah as the renderer for unknown template.")
- return ("cheetah", cheetah_render, text)
- else:
- return ("basic", basic_render, text)
+ return ("basic", basic_render, text)
else:
template_type = type_match.group(1).lower().strip()
- if template_type not in ("jinja", "cheetah", "basic"):
+ if template_type not in ("jinja", "basic"):
raise ValueError(
"Unknown template rendering type '%s' requested"
% template_type
@@ -136,14 +125,6 @@ def detect_template(text):
return ("basic", basic_render, rest)
elif template_type == "jinja" and JINJA_AVAILABLE:
return ("jinja", jinja_render, rest)
- if template_type == "cheetah" and not CHEETAH_AVAILABLE:
- LOG.warning(
- "Cheetah not available as the selected renderer for"
- " desired template, reverting to the basic renderer."
- )
- return ("basic", basic_render, rest)
- elif template_type == "cheetah" and CHEETAH_AVAILABLE:
- return ("cheetah", cheetah_render, rest)
# Only thing left over is the basic renderer (it is always available).
return ("basic", basic_render, rest)
@@ -151,12 +132,7 @@ def detect_template(text):
def render_from_file(fn, params):
if not params:
params = {}
- # jinja in python2 uses unicode internally. All py2 str will be decoded.
- # If it is given a str that has non-ascii then it will raise a
- # UnicodeDecodeError. So we explicitly convert to unicode type here.
- template_type, renderer, content = detect_template(
- util.load_file(fn, decode=False).decode("utf-8")
- )
+ template_type, renderer, content = detect_template(util.load_file(fn))
LOG.debug("Rendering content of '%s' using renderer %s", fn, template_type)
return renderer(content, params)
@@ -167,15 +143,13 @@ def render_to_file(fn, outfn, params, mode=0o644):
def render_string_to_file(content, outfn, params, mode=0o644):
- """Render string (or py2 unicode) to file.
- Warning: py2 str with non-ascii chars will cause UnicodeDecodeError."""
+ """Render string"""
contents = render_string(content, params)
util.write_file(outfn, contents, mode=mode)
def render_string(content, params):
- """Render string (or py2 unicode).
- Warning: py2 str with non-ascii chars will cause UnicodeDecodeError."""
+ """Render string"""
if not params:
params = {}
_template_type, renderer, content = detect_template(content)
@@ -193,6 +167,3 @@ def render_cloudcfg(variant, template, output):
sys.stdout.write(contents)
else:
write_file(output, contents, omode="w")
-
-
-# vi: ts=4 expandtab
diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py
index c577e8da..04643895 100644
--- a/cloudinit/url_helper.py
+++ b/cloudinit/url_helper.py
@@ -11,12 +11,15 @@
import copy
import json
import os
+import threading
import time
+from concurrent.futures import ThreadPoolExecutor, TimeoutError, as_completed
from email.utils import parsedate
from errno import ENOENT
from functools import partial
from http.client import NOT_FOUND
from itertools import count
+from typing import Any, Callable, List, Tuple
from urllib.parse import quote, urlparse, urlunparse
import requests
@@ -187,14 +190,15 @@ def readurl(
session=None,
infinite=False,
log_req_resp=True,
- request_method=None,
+ request_method="",
) -> UrlResponse:
"""Wrapper around requests.Session to read the url and retry if necessary
:param url: Mandatory url to request.
:param data: Optional form data to post the URL. Will set request_method
to 'POST' if present.
- :param timeout: Timeout in seconds to wait for a response
+ :param timeout: Timeout in seconds to wait for a response. May be a tuple
+ if specifying (connection timeout, read timeout).
:param retries: Number of times to retry on exception if exception_cb is
None or exception_cb returns True for the exception caught. Default is
to fail with 0 retries on exception.
@@ -229,7 +233,10 @@ def readurl(
request_method = "POST" if data else "GET"
req_args["method"] = request_method
if timeout is not None:
- req_args["timeout"] = max(float(timeout), 0)
+ if isinstance(timeout, tuple):
+ req_args["timeout"] = timeout
+ else:
+ req_args["timeout"] = max(float(timeout), 0)
if headers_redact is None:
headers_redact = []
manual_tries = 1
@@ -343,17 +350,120 @@ def readurl(
raise excps[-1]
+def _run_func_with_delay(
+ func: Callable[..., Any],
+ addr: str,
+ timeout: int,
+ event: threading.Event,
+ delay: float = None,
+) -> Any:
+ """Execute func with optional delay"""
+ if delay:
+
+ # event returns True iff the flag is set to true: indicating that
+ # another thread has already completed successfully, no need to try
+ # again - exit early
+ if event.wait(timeout=delay):
+ return
+ return func(addr, timeout)
+
+
+def dual_stack(
+ func: Callable[..., Any],
+ addresses: List[str],
+ stagger_delay: float = 0.150,
+ timeout: int = 10,
+) -> Tuple:
+ """execute multiple callbacks in parallel
+
+ Run blocking func against two different addresses staggered with a
+ delay. The first call to return successfully is returned from this
+ function and remaining unfinished calls are cancelled if they have not
+ yet started
+ """
+ return_result = None
+ returned_address = None
+ last_exception = None
+ exceptions = []
+ is_done = threading.Event()
+
+ # future work: add cancel_futures to Python stdlib ThreadPoolExecutor
+ # context manager implementation
+ #
+ # for now we don't use this feature since it only supports python >3.8
+ # and doesn't provide a context manager and only marginal benefit
+ executor = ThreadPoolExecutor(max_workers=len(addresses))
+ try:
+ futures = {
+ executor.submit(
+ _run_func_with_delay,
+ func=func,
+ addr=addr,
+ timeout=timeout,
+ event=is_done,
+ delay=(i * stagger_delay),
+ ): addr
+ for i, addr in enumerate(addresses)
+ }
+
+ # handle returned requests in order of completion
+ for future in as_completed(futures, timeout=timeout):
+
+ returned_address = futures[future]
+ return_exception = future.exception()
+ if return_exception:
+ last_exception = return_exception
+ exceptions.append(last_exception)
+ else:
+ return_result = future.result()
+ if return_result:
+
+ # communicate to other threads that they do not need to
+ # try: this thread has already succeeded
+ is_done.set()
+ return (returned_address, return_result)
+
+ # No success, return the last exception but log them all for
+ # debugging
+ if last_exception:
+ LOG.warning(
+ "Exception(s) %s during request to "
+ "%s, raising last exception",
+ exceptions,
+ returned_address,
+ )
+ raise last_exception
+ else:
+ LOG.error("Empty result for address %s", returned_address)
+ raise ValueError("No result returned")
+
+ # when max_wait expires, log but don't throw (retries happen)
+ except TimeoutError:
+ LOG.warning(
+ "Timed out waiting for addresses: %s, "
+ "exception(s) raised while waiting: %s",
+ " ".join(addresses),
+ " ".join(exceptions),
+ )
+ finally:
+ executor.shutdown(wait=False)
+
+ return (returned_address, return_result)
+
+
def wait_for_url(
urls,
max_wait=None,
timeout=None,
- status_cb=None,
- headers_cb=None,
+ status_cb: Callable = LOG.debug, # some sources use different log levels
+ headers_cb: Callable = None,
headers_redact=None,
- sleep_time=1,
- exception_cb=None,
- sleep_time_cb=None,
- request_method=None,
+ sleep_time: int = 1,
+ exception_cb: Callable = None,
+ sleep_time_cb: Callable = None,
+ request_method: str = "",
+ connect_synchronously: bool = True,
+ async_delay: float = 0.150,
):
"""
urls: a list of urls to try
@@ -371,6 +481,8 @@ def wait_for_url(
sleep_time_cb: call method with 2 arguments (response, loop_n) that
generates the next sleep time.
request_method: indicate the type of HTTP request, GET, PUT, or POST
+ connect_synchronously: if false, enables executing requests in parallel
+ async_delay: delay before parallel metadata requests, see RFC 6555
returns: tuple of (url, response contents), on failure, (False, None)
the idea of this routine is to wait for the EC2 metadata service to
@@ -390,31 +502,94 @@ def wait_for_url(
A value of None for max_wait will retry indefinitely.
"""
- start_time = time.time()
- def log_status_cb(msg, exc=None):
- LOG.debug(msg)
-
- if status_cb is None:
- status_cb = log_status_cb
+ def default_sleep_time(_, loop_number: int):
+ return int(loop_number / 5) + 1
def timeup(max_wait, start_time):
+ """Check if time is up based on start time and max wait"""
if max_wait is None:
return False
return (max_wait <= 0) or (time.time() - start_time > max_wait)
- loop_n = 0
- response = None
- while True:
- if sleep_time_cb is not None:
- sleep_time = sleep_time_cb(response, loop_n)
+ def handle_url_response(response, url):
+ """Map requests response code/contents to internal "UrlError" type"""
+ if not response.contents:
+ reason = "empty response [%s]" % (response.code)
+ url_exc = UrlError(
+ ValueError(reason),
+ code=response.code,
+ headers=response.headers,
+ url=url,
+ )
+ elif not response.ok():
+ reason = "bad status code [%s]" % (response.code)
+ url_exc = UrlError(
+ ValueError(reason),
+ code=response.code,
+ headers=response.headers,
+ url=url,
+ )
else:
- sleep_time = int(loop_n / 5) + 1
+ reason = ""
+ url_exc = None
+ return (url_exc, reason)
+
+ def read_url_handle_exceptions(
+ url_reader_cb, urls, start_time, exc_cb, log_cb
+ ):
+ """Execute request, handle response, optionally log exception"""
+ reason = ""
+ url = None
+ try:
+ url, response = url_reader_cb(urls)
+ url_exc, reason = handle_url_response(response, url)
+ if not url_exc:
+ return (url, response)
+ except UrlError as e:
+ reason = "request error [%s]" % e
+ url_exc = e
+ except Exception as e:
+ reason = "unexpected error [%s]" % e
+ url_exc = e
+ time_taken = int(time.time() - start_time)
+ max_wait_str = "%ss" % max_wait if max_wait else "unlimited"
+ status_msg = "Calling '%s' failed [%s/%s]: %s" % (
+ url,
+ time_taken,
+ max_wait_str,
+ reason,
+ )
+ log_cb(status_msg)
+ if exc_cb:
+ # This can be used to alter the headers that will be sent
+ # in the future, for example this is what the MAAS datasource
+ # does.
+ exc_cb(msg=status_msg, exception=url_exc)
+
+ def read_url_cb(url, timeout):
+ return readurl(
+ url,
+ headers={} if headers_cb is None else headers_cb(url),
+ headers_redact=headers_redact,
+ timeout=timeout,
+ check_status=False,
+ request_method=request_method,
+ )
+
+ def read_url_serial(start_time, timeout, exc_cb, log_cb):
+ """iterate over list of urls, request each one and handle responses
+ and thrown exceptions individually per url
+ """
+
+ def url_reader_serial(url):
+ return (url, read_url_cb(url, timeout))
+
for url in urls:
now = time.time()
if loop_n != 0:
if timeup(max_wait, start_time):
- break
+ return
if (
max_wait is not None
and timeout
@@ -423,61 +598,52 @@ def wait_for_url(
# shorten timeout to not run way over max_time
timeout = int((start_time + max_wait) - now)
- reason = ""
- url_exc = None
- try:
- if headers_cb is not None:
- headers = headers_cb(url)
- else:
- headers = {}
-
- response = readurl(
- url,
- headers=headers,
- headers_redact=headers_redact,
- timeout=timeout,
- check_status=False,
- request_method=request_method,
- )
- if not response.contents:
- reason = "empty response [%s]" % (response.code)
- url_exc = UrlError(
- ValueError(reason),
- code=response.code,
- headers=response.headers,
- url=url,
- )
- elif not response.ok():
- reason = "bad status code [%s]" % (response.code)
- url_exc = UrlError(
- ValueError(reason),
- code=response.code,
- headers=response.headers,
- url=url,
- )
- else:
- return url, response.contents
- except UrlError as e:
- reason = "request error [%s]" % e
- url_exc = e
- except Exception as e:
- reason = "unexpected error [%s]" % e
- url_exc = e
-
- time_taken = int(time.time() - start_time)
- max_wait_str = "%ss" % max_wait if max_wait else "unlimited"
- status_msg = "Calling '%s' failed [%s/%s]: %s" % (
- url,
- time_taken,
- max_wait_str,
- reason,
+ out = read_url_handle_exceptions(
+ url_reader_serial, url, start_time, exc_cb, log_cb
)
- status_cb(status_msg)
- if exception_cb:
- # This can be used to alter the headers that will be sent
- # in the future, for example this is what the MAAS datasource
- # does.
- exception_cb(msg=status_msg, exception=url_exc)
+ if out:
+ return out
+
+ def read_url_parallel(start_time, timeout, exc_cb, log_cb):
+ """pass list of urls to dual_stack which sends requests in parallel
+ handle response and exceptions of the first endpoint to respond
+ """
+ url_reader_parallel = partial(
+ dual_stack,
+ read_url_cb,
+ stagger_delay=async_delay,
+ timeout=timeout,
+ )
+ out = read_url_handle_exceptions(
+ url_reader_parallel, urls, start_time, exc_cb, log_cb
+ )
+ if out:
+ return out
+
+ start_time = time.time()
+
+ # Dual-stack support factored out serial and parallel execution paths to
+ # allow the retry loop logic to exist separately from the http calls.
+ # Serial execution should be fundamentally the same as before, but with a
+ # layer of indirection so that the parallel dual-stack path may use the
+ # same max timeout logic.
+ do_read_url = (
+ read_url_serial if connect_synchronously else read_url_parallel
+ )
+
+ calculate_sleep_time = (
+ default_sleep_time if not sleep_time_cb else sleep_time_cb
+ )
+
+ loop_n: int = 0
+ response = None
+ while True:
+ sleep_time = calculate_sleep_time(response, loop_n)
+
+ url = do_read_url(start_time, timeout, exception_cb, status_cb)
+ if url:
+ address, response = url
+ return (address, response.contents)
if timeup(max_wait, start_time):
break
@@ -488,6 +654,11 @@ def wait_for_url(
)
time.sleep(sleep_time)
+ # shorten timeout to not run way over max_time
+ # timeout=0.0 causes exceptions in urllib, set to None if zero
+ timeout = int((start_time + max_wait) - time.time()) or None
+
+ LOG.error("Timed out, no response from urls: %s", urls)
return False, None
diff --git a/cloudinit/util.py b/cloudinit/util.py
index 1fadd196..2639478a 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -32,7 +32,8 @@ import subprocess
import sys
import time
from base64 import b64decode, b64encode
-from errno import ENOENT
+from collections import deque
+from errno import EACCES, ENOENT
from functools import lru_cache
from typing import List
from urllib import parse
@@ -74,10 +75,10 @@ def get_dpkg_architecture(target=None):
N.B. This function is wrapped in functools.lru_cache, so repeated calls
won't shell out every time.
"""
- out, _ = subp.subp(
+ out = subp.subp(
["dpkg", "--print-architecture"], capture=True, target=target
)
- return out.strip()
+ return out.stdout.strip()
@lru_cache()
@@ -91,10 +92,8 @@ def lsb_release(target=None):
data = {}
try:
- out, _ = subp.subp(
- ["lsb_release", "--all"], capture=True, target=target
- )
- for line in out.splitlines():
+ out = subp.subp(["lsb_release", "--all"], capture=True, target=target)
+ for line in out.stdout.splitlines():
fname, _, val = line.partition(":")
if fname in fmap:
data[fmap[fname]] = val.strip()
@@ -381,6 +380,12 @@ def find_modules(root_dir) -> dict:
return entries
+def write_to_console(conpath, text):
+ with open(conpath, "w") as wfh:
+ wfh.write(text)
+ wfh.flush()
+
+
def multi_log(
text,
console=True,
@@ -393,16 +398,27 @@ def multi_log(
sys.stderr.write(text)
if console:
conpath = "/dev/console"
+ writing_to_console_worked = False
if os.path.exists(conpath):
- with open(conpath, "w") as wfh:
- wfh.write(text)
- wfh.flush()
- elif fallback_to_stdout:
- # A container may lack /dev/console (arguably a container bug). If
- # it does not exist, then write output to stdout. this will result
- # in duplicate stderr and stdout messages if stderr was True.
+ try:
+ write_to_console(conpath, text)
+ writing_to_console_worked = True
+ except OSError:
+ console_error = "Failed to write to /dev/console"
+ sys.stdout.write(f"{console_error}\n")
+ if log:
+ log.log(logging.WARNING, console_error)
+
+ if fallback_to_stdout and not writing_to_console_worked:
+ # A container may lack /dev/console (arguably a container bug).
+ # Additionally, /dev/console may not be writable to on a VM (again
+ # likely a VM bug or virtualization bug).
+ #
+ # If either of these is the case, then write output to stdout.
+ # This will result in duplicate stderr and stdout messages if
+ # stderr was True.
#
- # even though upstart or systemd might have set up output to go to
+ # even though systemd might have set up output to go to
# /dev/console, the user may have configured elsewhere via
# cloud-config 'output'. If there is /dev/console, messages will
# still get there.
@@ -806,10 +822,10 @@ def make_url(
return parse.urlunparse(pieces)
-def mergemanydict(srcs, reverse=False):
+def mergemanydict(srcs, reverse=False) -> dict:
if reverse:
srcs = reversed(srcs)
- merged_cfg = {}
+ merged_cfg: dict = {}
for cfg in srcs:
if cfg:
# Figure out which mergers to apply...
@@ -983,6 +999,7 @@ def read_seeded(base="", ext="", timeout=5, retries=10, file_retries=0):
def read_conf_d(confd):
+ """Read configuration directory."""
# Get reverse sorted list (later trumps newer)
confs = sorted(os.listdir(confd), reverse=True)
@@ -995,13 +1012,27 @@ def read_conf_d(confd):
# Load them all so that they can be merged
cfgs = []
for fn in confs:
- cfgs.append(read_conf(os.path.join(confd, fn)))
+ try:
+ cfgs.append(read_conf(os.path.join(confd, fn)))
+ except OSError as e:
+ if e.errno == EACCES:
+ LOG.warning(
+ "REDACTED config part %s/%s for non-root user", confd, fn
+ )
return mergemanydict(cfgs)
def read_conf_with_confd(cfgfile):
- cfg = read_conf(cfgfile)
+ cfgs = deque()
+ cfg: dict = {}
+ try:
+ cfg = read_conf(cfgfile)
+ except OSError as e:
+ if e.errno == EACCES:
+ LOG.warning("REDACTED config part %s for non-root user", cfgfile)
+ else:
+ cfgs.append(cfg)
confd = False
if "conf_d" in cfg:
@@ -1017,12 +1048,12 @@ def read_conf_with_confd(cfgfile):
elif os.path.isdir("%s.d" % cfgfile):
confd = "%s.d" % cfgfile
- if not confd or not os.path.isdir(confd):
- return cfg
+ if confd and os.path.isdir(confd):
+ # Conf.d settings override input configuration
+ confd_cfg = read_conf_d(confd)
+ cfgs.appendleft(confd_cfg)
- # Conf.d settings override input configuration
- confd_cfg = read_conf_d(confd)
- return mergemanydict([confd_cfg, cfg])
+ return mergemanydict(cfgs)
def read_conf_from_cmdline(cmdline=None):
@@ -1276,8 +1307,8 @@ def find_devs_with_netbsd(
label = criteria.lstrip("LABEL=")
if criteria.startswith("TYPE="):
_type = criteria.lstrip("TYPE=")
- out, _err = subp.subp(["sysctl", "-n", "hw.disknames"], rcs=[0])
- for dev in out.split():
+ out = subp.subp(["sysctl", "-n", "hw.disknames"], rcs=[0])
+ for dev in out.stdout.split():
if label or _type:
mscdlabel_out, _ = subp.subp(["mscdlabel", dev], rcs=[0, 1])
if label and not ('label "%s"' % label) in mscdlabel_out:
@@ -1293,9 +1324,9 @@ def find_devs_with_netbsd(
def find_devs_with_openbsd(
criteria=None, oformat="device", tag=None, no_cache=False, path=None
):
- out, _err = subp.subp(["sysctl", "-n", "hw.disknames"], rcs=[0])
+ out = subp.subp(["sysctl", "-n", "hw.disknames"], rcs=[0])
devlist = []
- for entry in out.rstrip().split(","):
+ for entry in out.stdout.rstrip().split(","):
if not entry.endswith(":"):
# ffs partition with a serial, not a config-drive
continue
@@ -1310,10 +1341,10 @@ def find_devs_with_openbsd(
def find_devs_with_dragonflybsd(
criteria=None, oformat="device", tag=None, no_cache=False, path=None
):
- out, _err = subp.subp(["sysctl", "-n", "kern.disks"], rcs=[0])
+ out = subp.subp(["sysctl", "-n", "kern.disks"], rcs=[0])
devlist = [
i
- for i in sorted(out.split(), reverse=True)
+ for i in sorted(out.stdout.split(), reverse=True)
if not i.startswith("md") and not i.startswith("vn")
]
@@ -1417,9 +1448,9 @@ def blkid(devs=None, disable_cache=False):
# we have to decode with 'replace' as shelx.split (called by
# load_shell_content) can't take bytes. So this is potentially
# lossy of non-utf-8 chars in blkid output.
- out, _ = subp.subp(cmd, capture=True, decode="replace")
+ out = subp.subp(cmd, capture=True, decode="replace")
ret = {}
- for line in out.splitlines():
+ for line in out.stdout.splitlines():
dev, _, data = line.partition(":")
ret[dev] = load_shell_content(data)
ret[dev]["DEVNAME"] = dev
@@ -1754,8 +1785,8 @@ def mounts():
mount_locs = load_file("/proc/mounts").splitlines()
method = "proc"
else:
- (mountoutput, _err) = subp.subp("mount")
- mount_locs = mountoutput.splitlines()
+ out = subp.subp("mount")
+ mount_locs = out.stdout.splitlines()
method = "mount"
mountre = r"^(/dev/[\S]+) on (/.*) \((.+), .+, (.+)\)$"
for mpline in mount_locs:
@@ -1887,7 +1918,12 @@ def is_link(path):
def sym_link(source, link, force=False):
LOG.debug("Creating symbolic link from %r => %r", link, source)
if force and os.path.lexists(link):
- del_file(link)
+ # Provide atomic update of symlink to avoid races with status --wait
+ # LP: #1962150
+ tmp_link = os.path.join(os.path.dirname(link), "tmp" + rand_str(8))
+ os.symlink(source, tmp_link)
+ os.replace(tmp_link, link)
+ return
os.symlink(source, link)
@@ -2061,7 +2097,7 @@ def write_file(
omode="wb",
preserve_mode=False,
*,
- ensure_dir_exists=True
+ ensure_dir_exists=True,
):
"""
Writes a file with the given content and sets the file mode as specified.
@@ -2200,10 +2236,6 @@ def _is_container_systemd():
return _cmd_exits_zero(["systemd-detect-virt", "--quiet", "--container"])
-def _is_container_upstart():
- return _cmd_exits_zero(["running-in-container"])
-
-
def _is_container_old_lxc():
return _cmd_exits_zero(["lxc-is-container"])
@@ -2226,7 +2258,6 @@ def is_container():
checks = (
_is_container_systemd,
_is_container_freebsd,
- _is_container_upstart,
_is_container_old_lxc,
)
@@ -2664,7 +2695,7 @@ def pathprefix2dict(base, required=None, optional=None, delim=os.path.sep):
def read_meminfo(meminfo="/proc/meminfo", raw=False):
# read a /proc/meminfo style file and return
# a dict with 'total', 'free', and 'available'
- mpliers = {"kB": 2 ** 10, "mB": 2 ** 20, "B": 1, "gB": 2 ** 30}
+ mpliers = {"kB": 2**10, "mB": 2**20, "B": 1, "gB": 2**30}
kmap = {
"MemTotal:": "total",
"MemFree:": "free",
@@ -2694,7 +2725,7 @@ def human2bytes(size):
if size.endswith("B"):
size = size[:-1]
- mpliers = {"B": 1, "K": 2 ** 10, "M": 2 ** 20, "G": 2 ** 30, "T": 2 ** 40}
+ mpliers = {"B": 1, "K": 2**10, "M": 2**20, "G": 2**30, "T": 2**40}
num = size
mplier = "B"
@@ -2731,10 +2762,10 @@ def message_from_string(string):
def get_installed_packages(target=None):
- (out, _) = subp.subp(["dpkg-query", "--list"], target=target, capture=True)
+ out = subp.subp(["dpkg-query", "--list"], target=target, capture=True)
pkgs_inst = set()
- for line in out.splitlines():
+ for line in out.stdout.splitlines():
try:
(state, pkg, _) = line.split(None, 2)
except ValueError:
diff --git a/cloudinit/version.py b/cloudinit/version.py
index fa51cb9e..061ea419 100644
--- a/cloudinit/version.py
+++ b/cloudinit/version.py
@@ -4,7 +4,7 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-__VERSION__ = "22.1"
+__VERSION__ = "22.2"
_PACKAGED_VERSION = "@@PACKAGED_VERSION@@"
FEATURES = [
diff --git a/config/cloud.cfg.tmpl b/config/cloud.cfg.tmpl
index fb4b456c..6951a0e3 100644
--- a/config/cloud.cfg.tmpl
+++ b/config/cloud.cfg.tmpl
@@ -34,7 +34,11 @@ disable_root: true
{% if variant in ["almalinux", "alpine", "amazon", "centos", "cloudlinux", "eurolinux",
"fedora", "miraclelinux", "openEuler", "rhel", "rocky", "virtuozzo"] %}
+{% if variant == "rhel" %}
+mount_default_fields: [~, ~, 'auto', 'defaults,nofail,x-systemd.requires=cloud-init.service,_netdev', '0', '2']
+{% else %}
mount_default_fields: [~, ~, 'auto', 'defaults,nofail', '0', '2']
+{% endif %}
{% if variant == "amazon" %}
resize_rootfs: noblock
{% endif %}
@@ -66,6 +70,14 @@ network:
config: disabled
{% endif %}
+{% if variant == "rhel" %}
+# Default redhat settings:
+ssh_deletekeys: true
+ssh_genkeytypes: ['rsa', 'ecdsa', 'ed25519']
+syslog_fix_perms: ~
+disable_vmware_customization: false
+{% endif %}
+
# The modules that run in the 'init' stage
cloud_init_modules:
- migrator
@@ -100,17 +112,19 @@ cloud_init_modules:
# The modules that run in the 'config' stage
cloud_config_modules:
{% if variant in ["ubuntu", "unknown", "debian"] %}
-# Emit the cloud config ready event
-# this can be used by upstart jobs for 'start on cloud-config'.
- - emit_upstart
- snap
{% endif %}
{% if variant not in ["photon"] %}
- ssh-import-id
+{% if variant not in ["rhel"] %}
- keyboard
+{% endif %}
- locale
{% endif %}
- set-passwords
+{% if variant in ["rhel"] %}
+ - rh_subscription
+{% endif %}
{% if variant in ["rhel", "fedora", "photon"] %}
{% if variant not in ["photon"] %}
- spacewalk
@@ -204,7 +218,6 @@ system_info:
paths:
cloud_dir: /var/lib/cloud/
templates_dir: /etc/cloud/templates/
- upstart_dir: /etc/init/
package_mirrors:
- arches: [i386, amd64]
failsafe:
@@ -239,6 +252,10 @@ system_info:
name: ec2-user
lock_passwd: True
gecos: EC2 Default User
+{% elif variant == "rhel" %}
+ name: cloud-user
+ lock_passwd: true
+ gecos: Cloud User
{% else %}
name: {{ variant }}
lock_passwd: True
@@ -254,6 +271,8 @@ system_info:
groups: [adm, sudo]
{% elif variant == "arch" %}
groups: [wheel, users]
+{% elif variant == "rhel" %}
+ groups: [adm, systemd-journal]
{% else %}
groups: [wheel, adm, systemd-journal]
{% endif %}
diff --git a/debian/changelog b/debian/changelog
index 5e59bdab..d3c97cb4 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,188 @@
+cloud-init (22.2-0ubuntu1~22.10.1) kinetic; urgency=medium
+
+ * d/control:
+ - Build-Depends: add python3-responses and python3-pytest-mock for unittests
+ - Suggests: add openssh-server and ssh-import-id
+ * d/cloud-init.postinst:
+ - remove deprecated emit_upstart from cloud.cfg on upgrade
+ * New upstream release.
+ - Release 22.2 (LP: #1960939)
+ - Fix test due to caplog incompatibility (#1461) [Alberto Contreras]
+ - Align rhel custom files with upstream (#1431)
+ [Emanuele Giuseppe Esposito]
+ - cc_write_files: Improve schema. (#1460) [Alberto Contreras]
+ - cli: Redact files with permission errors in commands (#1440)
+ [Alberto Contreras] (LP: #1953430)
+ - Improve cc_set_passwords. (#1456) [Alberto Contreras]
+ - testing: make fake cloud-init wait actually wait (#1459)
+ - Scaleway: Fix network configuration for netplan 0.102 and later (#1455)
+ [Maxime Corbin]
+ - Fix 'ephmeral' typos in disk names(#1452) [Mike Hucka]
+ - schema: version schema-cloud-config-v1.json (#1424)
+ - cc_modules: set default meta frequency value when no config available
+ (#1457)
+ - Log generic warning on non-systemd systems. (#1450) [Alberto Contreras]
+ - cc_snap.maybe_install_squashfuse no longer needed in Bionic++. (#1448)
+ [Alberto Contreras]
+ - Drop support of *-sk keys in cc_ssh (#1451) [Alberto Contreras]
+ - testing: Fix console_log tests (#1437)
+ - tests: cc_set_passoword update for systemd, non-systemd distros (#1449)
+ - Fix bug in url_helper/dual_stack() logging (#1426)
+ - schema: render schema paths from _CustomSafeLoaderWithMarks (#1391)
+ - testing: Make integration tests kinetic friendly (#1441)
+ - Handle error if SSH service no present. (#1422)
+ [Alberto Contreras] (LP: #1969526)
+ - Fix network-manager activator availability and order (#1438)
+ - sources/azure: remove reprovisioning marker (#1414) [Chris Patterson]
+ - upstart: drop vestigial support for upstart (#1421)
+ - testing: Ensure NoCloud detected in test (#1439)
+ - Update .github-cla-signers kallioli [Kevin Allioli]
+ - Consistently strip top-level network key (#1417) (LP: #1906187)
+ - testing: Fix LXD VM metadata test (#1430)
+ - testing: Add NoCloud setup for NoCloud test (#1425)
+ - Update linters and adapt code for compatibility (#1434) [Paride Legovini]
+ - run-container: add support for LXD VMs (#1428) [Paride Legovini]
+ - integration-reqs: bump pycloudlib pinned commit (#1427) [Paride Legovini]
+ - Fix NoCloud docs (#1423)
+ - Docs fixes (#1406)
+ - docs: Add docs for module creation (#1415)
+ - Remove cheetah from templater (#1416)
+ - tests: verify_ordered_items fallback to re.escape if needed (#1420)
+ - Misc module cleanup (#1418)
+ - docs: Fix doc warnings and enable errors (#1419)
+ [Alberto Contreras] (LP: #1876341)
+ - Refactor cloudinit.sources.NetworkConfigSource to enum (#1413)
+ [Alberto Contreras] (LP: #1874875)
+ - Don't fail if IB and Ethernet devices 'collide' (#1411)
+ - Use cc_* module meta defintion over hardcoded vars (SC-888) (#1385)
+ - Fix cc_rsyslog.py initialization (#1404) [Alberto Contreras]
+ - Promote cloud-init schema from devel to top level subcommand (#1402)
+ - mypy: disable missing imports warning for httpretty (#1412)
+ [Chris Patterson]
+ - users: error when home should not be created AND ssh keys provided
+ [Jeffrey 'jf' Lim]
+ - Allow growpart to resize encrypted partitions (#1316)
+ - Fix typo in integration_test.rst (#1405) [Alberto Contreras]
+ - cloudinit.net refactor: apply_network_config_names (#1388)
+ [Alberto Contreras] (LP: #1884602)
+ - tests/azure: add fixtures for hardcoded paths (markers and data_dir)
+ (#1399) [Chris Patterson]
+ - testing: Add responses workaround for focal/impish (#1403)
+ - cc_ssh_import_id: fix is_key_in_nested_dict to avoid early False
+ - Fix ds-identify not detecting NoCloud seed in config (#1381)
+ (LP: #1876375)
+ - sources/azure: retry dhcp for failed processes (#1401) [Chris Patterson]
+ - Move notes about refactorization out of CONTRIBUTING.rst (#1389)
+ - Shave ~8ms off generator runtime (#1387)
+ - Fix provisioning dhcp timeout to 20 minutes (#1394) [Chris Patterson]
+ - schema: module example strict testing fix seed_random
+ - cc_set_hostname: examples small typo (perserve vs preserve)
+ [Wouter Schoot]
+ - sources/azure: refactor http_with_retries to remove **kwargs (#1392)
+ [Chris Patterson]
+ - declare dependency on ssh-import-id (#1334)
+ - drop references to old dependencies and old centos script
+ - sources/azure: only wait for primary nic to be attached during restore
+ (#1378) [Anh Vo]
+ - cc_ntp: migrated legacy schema to cloud-init-schema.json (#1384)
+ - Network functions refactor and bugfixes (#1383)
+ - schema: add JSON defs for modules cc_users_groups (#1379) (LP: #1858930)
+ - Fix doc typo (#1382) [Alberto Contreras]
+ - Add support for dual stack IPv6/IPv4 IMDS to Ec2 (#1160)
+ - Fix KeyError when rendering sysconfig IPv6 routes (#1380) (LP: #1958506)
+ - Return a namedtuple from subp() (#1376)
+ - Mypy stubs and other tox maintenance (SC-920) (#1374)
+ - Distro Compatibility Fixes (#1375)
+ - Pull in Gentoo patches (#1372)
+ - schema: add json defs for modules U-Z (#1360)
+ (LP: #1858928, #1858929, #1858931, #1858932)
+ - util: atomically update sym links to avoid Suppress FileNotFoundError
+ when reading status (#1298) [Adam Collard] (LP: LP:1962150)
+ - schema: add json defs for modules scripts-timezone (SC-801) (#1365)
+ - docs: Add first tutorial (SC-900) (#1368)
+ - BUG 1473527: module ssh-authkey-fingerprints fails Input/output error…
+ (#1340) [Andrew Lee] (LP: #1473527)
+ - add arch hosts template (#1371)
+ - ds-identify: detect LXD for VMs launched from host with > 5.10 kernel
+ (#1370) (LP: #1968085)
+ - Support EC2 tags in instance metadata (#1309) [Eduardo Dobay]
+ - schema: add json defs for modules e-install (SC-651) (#1366)
+ - Improve "(no_create_home|system): true" test (#1367) [Jeffrey 'jf' Lim]
+ - Expose https_proxy env variable to ssh-import-id cmd (#1333)
+ [Michael Rommel]
+ - sources/azure: remove bind/unbind logic for hot attached nic (#1332)
+ [Chris Patterson]
+ - tox: add types-* packages to check_format env (#1362)
+ - tests: python 3.10 is showing up in cloudimages (#1364)
+ - testing: add additional mocks to test_net tests (#1356) [yangzz-97]
+ - schema: add JSON schema for mcollective, migrator and mounts modules
+ (#1358)
+ - Honor system locale for RHEL (#1355) [Wei Shi]
+ - doc: Fix typo in cloud-config-run-cmds.txt example (#1359) [Ali Shirvani]
+ - ds-identify: also discover LXD by presence from DMI board_name = LXD
+ (#1311)
+ - black: bump pinned version to 22.3.0 to avoid click dependency issues
+ (#1357)
+ - Various doc fixes (#1330)
+ - testing: Add missing is_FreeBSD mock to networking test (#1353)
+ - Add --no-update to add-apt-repostory call (SC-880) (#1337)
+ - schema: add json defs for modules K-L (#1321)
+ (LP: #1858899, #1858900, #1858901, #1858902)
+ - docs: Re-order readthedocs install (#1354)
+ - Stop cc_ssh_authkey_fingerprints from ALWAYS creating home (#1343)
+ [Jeffrey 'jf' Lim]
+ - docs: add jinja2 pin (#1352)
+ - Vultr: Use find_candidate_nics, use ipv6 dns (#1344) [eb3095]
+ - sources/azure: move get_ip_from_lease_value out of shim (#1324)
+ [Chris Patterson]
+ - Fix cloud-init status --wait when no datasource found (#1349)
+ (LP: #1966085)
+ - schema: add JSON defs for modules resize-salt (SC-654) (#1341)
+ - Add myself as a future contributor (#1345) [Neal Gompa (ニール・ゴンパ)]
+ - Update .github-cla-signers (#1342) [Jeffrey 'jf' Lim]
+ - add Requires=cloud-init-hotplugd.socket in cloud-init-hotplugd.service
+ file (#1335) [yangzz-97]
+ - Fix sysconfig render when set-name is missing (#1327)
+ [Andrew Kutz] (LP: #1855945)
+ - Refactoring helper funcs out of NetworkState (#1336) [Andrew Kutz]
+ - url_helper: add tuple support for readurl timeout (#1328)
+ [Chris Patterson]
+ - Make fs labels match for ds-identify and docs (#1329)
+ - Work around bug in LXD VM detection (#1325)
+ - Remove redundant generator logs (#1318)
+ - tox: set verbose flags for integration tests (#1323) [Chris Patterson]
+ - net: introduce find_candidate_nics() (#1313) [Chris Patterson]
+ - Revert "Ensure system_cfg read before ds net config on Oracle (#1174)"
+ (#1326)
+ - Add vendor_data2 support for ConfigDrive source (#1307) [cvstealth]
+ - Make VMWare data source test host independent and expand testing (#1308)
+ [Robert Schweikert]
+ - Add json schemas for modules starting with P
+ - sources/azure: remove lease file parsing (#1302) [Chris Patterson]
+ - remove flaky test from ci (#1322)
+ - ci: Switch to python 3.10 in Travis CI (#1320)
+ - Better interface handling for Vultr, expect unexpected DHCP servers
+ (#1297) [eb3095]
+ - Remove unused init local artifact (#1315)
+ - Doc cleanups (#1317)
+ - docs improvements (#1312)
+ - add support for jinja do statements, add unit test (#1314)
+ [Paul Bruno] (LP: #1962759)
+ - sources/azure: prevent tight loops for DHCP retries (#1285)
+ [Chris Patterson]
+ - net/dhcp: surface type of DHCP lease failure to caller (#1276)
+ [Chris Patterson]
+ - Stop hardcoding systemctl location (#1278) [Robert Schweikert]
+ - Remove python2 syntax from docs (#1310)
+ - [tools/migrate-lp-user-to-github] Rename master branch to main (#1301)
+ [Adam Collard]
+ - redhat: Depend on "hostname" package (#1288) [Lubomir Rintel]
+ - Add native NetworkManager support (#1224) [Lubomir Rintel]
+ - Fix link in CLA check to point to contribution guide. (#1299)
+ [Adam Collard]
+
+ -- Brett Holman <brett.holman@canonical.com> Wed, 18 May 2022 11:23:42 -0600
+
cloud-init (22.1-14-g2e17a0d6-0ubuntu1~22.04.5) jammy; urgency=medium
* d/p/cpick-be9389c6-Work-around-bug-in-LXD-VM-detection-1325:
diff --git a/debian/cloud-init.postinst b/debian/cloud-init.postinst
index 683ba86d..f7926c91 100644
--- a/debian/cloud-init.postinst
+++ b/debian/cloud-init.postinst
@@ -208,6 +208,22 @@ cleanup_lp1552999() {
"$edir/cloud-init-local.service" "$edir/cloud-init.service"
}
+remove_deprecated_cloud_config_modules() {
+ local oldver="$1" last_bad_ver="22.1-14-g2e17a0d6-0ubuntu1~22.04.5"
+ if dpkg --compare-versions "$oldver" le "$last_bad_ver"; then
+ if grep -q emit_upstart /etc/cloud/cloud.cfg; then
+ # Redact emit_upstart if present in locally modified cloud.cfg:
+ # 1. "- emit_upstart" on a single line as defined in upstream
+ # 2. upstart_dir config which is no longer used
+ sed -i -e '/- emit_upstart/d' -e '/upstart_dir/d' /etc/cloud/cloud.cfg || true
+ if grep -q emit_upstart /etc/cloud/cloud.cfg; then
+ echo "DEPRECATION: unable to remove emit_upstart from /etc/cloud/cloud.cfg."
+ echo "This module is deprecated, you may remove it to eliminate warnings in cloud-init logs."
+ fi
+ fi
+ fi
+}
+
disable_network_config_on_upgrade() {
local oldver="$1" last_without_net="0.7.7~bzr1182-0ubuntu1"
if [ ! -f /var/lib/cloud/instance/obj.pkl ]; then
@@ -370,6 +386,8 @@ EOF
# make upgrades disable network changes by cloud-init
disable_network_config_on_upgrade "$2"
+ remove_deprecated_cloud_config_modules "$2"
+
fix_azure_upgrade_1611074 "$2"
cleanup_ureadahead "$2"
diff --git a/debian/control b/debian/control
index 8fae4be8..0ad1aee4 100644
--- a/debian/control
+++ b/debian/control
@@ -18,10 +18,12 @@ Build-Depends: debhelper-compat (= 13),
python3-netifaces,
python3-oauthlib,
python3-pytest,
+ python3-pytest-mock,
python3-requests,
python3-serial,
python3-setuptools,
- python3-yaml
+ python3-yaml,
+ python3-responses
XS-Python-Version: all
Vcs-Browser: https://github.com/canonical/cloud-init/tree/ubuntu/devel
Vcs-Git: https://github.com/canonical/cloud-init -b ubuntu/devel
@@ -42,6 +44,7 @@ Depends: cloud-guest-utils | cloud-utils,
${misc:Depends},
${python3:Depends}
Recommends: eatmydata, gdisk, gnupg, software-properties-common
+Suggests: openssh-server, ssh-import-id
XB-Python-Version: ${python:Versions}
Description: initialization and customization tool for cloud instances
Cloud-init is the industry standard multi-distribution method for
diff --git a/debian/patches/cpick-30ccd51a-ds-identify-also-discover-LXD-by-presence-from-DMI b/debian/patches/cpick-30ccd51a-ds-identify-also-discover-LXD-by-presence-from-DMI
deleted file mode 100644
index 7054cb8c..00000000
--- a/debian/patches/cpick-30ccd51a-ds-identify-also-discover-LXD-by-presence-from-DMI
+++ /dev/null
@@ -1,307 +0,0 @@
-From 30ccd51acdff1b06452bb690c7a283abd1fb2e99 Mon Sep 17 00:00:00 2001
-From: Chad Smith <chad.smith@canonical.com>
-Date: Tue, 29 Mar 2022 14:25:33 -0600
-Subject: [PATCH] ds-identify: also discover LXD by presence from DMI
- board_name = LXD (#1311)
-
-VMs will not start lxd-agent.service in systemd generator timeframe
-which means /dev/lxd/sock will not exist yet on LXD VM.
-
-For VM support, ds-identify will return DS_FOUND when
-/sys/class/dmi/id/board_name == "LXD" which exists at
-early boot regardless of LXD socket status.
-
-Later, in cloud-init init-local boot stage, cloud-init will only
-discover the LXDDatasource if the /dev/lxd/sock is active.
-This allows consumers to disable the LXD datasource
-behavior by running:
- lxc config set MACHINE_NAME security.devlxd=false
----
- doc/rtd/topics/datasources/lxd.rst | 24 ++++++++---
- .../datasources/test_lxd_discovery.py | 36 ++++++++++++++--
- tests/unittests/test_ds_identify.py | 41 +++++++++++++------
- tools/ds-identify | 25 ++++++++++-
- 4 files changed, 102 insertions(+), 24 deletions(-)
-
---- a/doc/rtd/topics/datasources/lxd.rst
-+++ b/doc/rtd/topics/datasources/lxd.rst
-@@ -20,18 +20,22 @@ The LXD socket device ``/dev/lxd/sock``
- when the instance configuration has ``security.devlxd=true`` (default).
- Disabling ``security.devlxd`` configuration setting at initial launch will
- ensure that cloud-init uses the :ref:`datasource_nocloud` datasource.
--Disabling ``security.devlxd`` ove the life of the container will result in
-+Disabling ``security.devlxd`` over the life of the container will result in
- warnings from cloud-init and cloud-init will keep the originally detected LXD
- datasource.
-
--The LXD datasource provides cloud-init the opportunity to react to meta-data,
-+The LXD datasource is detected as viable by ``ds-identify`` during systemd
-+generator time when either ``/dev/lxd/sock`` exists or
-+``/sys/class/dmi/id/board_name`` matches "LXD".
-+
-+The LXD datasource provides cloud-init the ability to react to meta-data,
- vendor-data, user-data and network-config changes and render the updated
- configuration across a system reboot.
-
--One can manipulate what meta-data, vendor-data or user-data is provided to
--the launched container using the LXD profiles or
--``lxc launch ... -c <key>="<value>"`` at initial container launch using one of
--the following keys:
-+To modify what meta-data, vendor-data or user-data are provided to the
-+launched container, use either LXD profiles or
-+``lxc launch ... -c <key>="<value>"`` at initial container launch setting one
-+of the following keys:
-
- - user.meta-data: YAML metadata which will be appended to base meta-data
- - user.vendor-data: YAML which overrides any meta-data values
-@@ -44,6 +48,14 @@ the following keys:
- used by both `#template: jinja` #cloud-config templates and
- the `cloud-init query` command.
-
-+Note: LXD version 4.22 introduced a new scope of config keys prefaced by
-+``cloud-init.`` which are preferred above the related ``user.*`` keys:
-+
-+ - cloud-init.meta-data
-+ - cloud-init.vendor-data
-+ - cloud-init.network-config
-+ - cloud-init.user-data
-+
-
- By default, network configuration from this datasource will be:
-
---- a/tests/integration_tests/datasources/test_lxd_discovery.py
-+++ b/tests/integration_tests/datasources/test_lxd_discovery.py
-@@ -9,9 +9,34 @@ from tests.integration_tests.util import
-
-
- def _customize_envionment(client: IntegrationInstance):
-+ # Assert our platform can detect LXD during sytemd generator timeframe.
-+ ds_id_log = client.execute("cat /run/cloud-init/ds-identify.log").stdout
-+ assert "check for 'LXD' returned found" in ds_id_log
-+
-+ # At some point Jammy will fail this test. We want to be informed
-+ # when Jammy images no longer ship NoCloud template files (LP: #1958460).
-+ assert "check for 'NoCloud' returned found" in ds_id_log
-+ if client.settings.PLATFORM == "lxd_vm":
-+ # ds-identify runs at systemd generator time before /dev/lxd/sock.
-+ # Assert we can expected artifact which indicates LXD is viable.
-+ result = client.execute("cat /sys/class/dmi/id/board_name")
-+ if not result.ok:
-+ raise AssertionError(
-+ "Missing expected /sys/class/dmi/id/board_name"
-+ )
-+ if "LXD" != result.stdout:
-+ raise AssertionError(f"DMI board_name is not LXD: {result.stdout}")
-+
-+ # Having multiple datasources prevents ds-identify from short-circuiting
-+ # detection logic with a log like:
-+ # single entry in datasource_list (LXD) use that.
-+ # Also, NoCloud is detected during init-local timeframe.
-+
-+ # If there is a race on VMs where /dev/lxd/sock is not setup in init-local
-+ # cloud-init will fallback to NoCloud and fail this test.
- client.write_to_file(
-- "/etc/cloud/cloud.cfg.d/99-detect-lxd.cfg",
-- "datasource_list: [LXD]\n",
-+ "/etc/cloud/cloud.cfg.d/99-detect-lxd-first.cfg",
-+ "datasource_list: [LXD, NoCloud]\n",
- )
- client.execute("cloud-init clean --logs")
- client.restart()
-@@ -24,9 +49,9 @@ def _customize_envionment(client: Integr
- @pytest.mark.ubuntu # Because netplan
- def test_lxd_datasource_discovery(client: IntegrationInstance):
- """Test that DataSourceLXD is detected instead of NoCloud."""
-+
- _customize_envionment(client)
-- nic_dev = "enp5s0" if client.settings.PLATFORM == "lxd_vm" else "eth0"
-- result = client.execute("cloud-init status --long")
-+ result = client.execute("cloud-init status --wait --long")
- if not result.ok:
- raise AssertionError("cloud-init failed:\n%s", result.stderr)
- if "DataSourceLXD" not in result.stdout:
-@@ -35,6 +60,9 @@ def test_lxd_datasource_discovery(client
- )
- netplan_yaml = client.execute("cat /etc/netplan/50-cloud-init.yaml")
- netplan_cfg = yaml.safe_load(netplan_yaml)
-+
-+ platform = client.settings.PLATFORM
-+ nic_dev = "eth0" if platform == "lxd_container" else "enp5s0"
- assert {
- "network": {"ethernets": {nic_dev: {"dhcp4": True}}, "version": 2}
- } == netplan_cfg
---- a/tests/unittests/test_ds_identify.py
-+++ b/tests/unittests/test_ds_identify.py
-@@ -77,6 +77,7 @@ RC_FOUND = 0
- RC_NOT_FOUND = 1
- DS_NONE = "None"
-
-+P_BOARD_NAME = "sys/class/dmi/id/board_name"
- P_CHASSIS_ASSET_TAG = "sys/class/dmi/id/chassis_asset_tag"
- P_PRODUCT_NAME = "sys/class/dmi/id/product_name"
- P_PRODUCT_SERIAL = "sys/class/dmi/id/product_serial"
-@@ -101,8 +102,6 @@ MOCK_VIRT_IS_XEN = {"name": "detect_virt
- MOCK_UNAME_IS_PPC64 = {"name": "uname", "out": UNAME_PPC64EL, "ret": 0}
- MOCK_UNAME_IS_FREEBSD = {"name": "uname", "out": UNAME_FREEBSD, "ret": 0}
-
--DEFAULT_MOCKS = [MOCK_NOT_LXD_DATASOURCE]
--
- shell_true = 0
- shell_false = 1
-
-@@ -119,6 +118,7 @@ class DsIdentifyBase(CiTestCase):
- self,
- rootd=None,
- mocks=None,
-+ no_mocks=None,
- func="main",
- args=None,
- files=None,
-@@ -165,7 +165,8 @@ class DsIdentifyBase(CiTestCase):
- return SHELL_MOCK_TMPL % ddata
-
- mocklines = []
-- defaults = [
-+ default_mocks = [
-+ MOCK_NOT_LXD_DATASOURCE,
- {"name": "detect_virt", "RET": "none", "ret": 1},
- {"name": "uname", "out": UNAME_MYSYS},
- {"name": "blkid", "out": BLKID_EFI_ROOT},
-@@ -189,7 +190,9 @@ class DsIdentifyBase(CiTestCase):
- written = [d["name"] for d in mocks]
- for data in mocks:
- mocklines.append(write_mock(data))
-- for d in defaults:
-+ for d in default_mocks:
-+ if no_mocks and d["name"] in no_mocks:
-+ continue
- if d["name"] not in written:
- mocklines.append(write_mock(d))
-
-@@ -221,6 +224,7 @@ class DsIdentifyBase(CiTestCase):
- # return output of self.call with a dict input like VALID_CFG[item]
- xwargs = {"rootd": rootd}
- passthrough = (
-+ "no_mocks", # named mocks to ignore
- "mocks",
- "func",
- "args",
-@@ -233,14 +237,6 @@ class DsIdentifyBase(CiTestCase):
- xwargs[k] = data[k]
- if k in kwargs:
- xwargs[k] = kwargs[k]
-- if "mocks" not in xwargs:
-- xwargs["mocks"] = DEFAULT_MOCKS
-- else:
-- mocked_funcs = [m["name"] for m in xwargs["mocks"]]
-- for default_mock in DEFAULT_MOCKS:
-- if default_mock["name"] not in mocked_funcs:
-- xwargs["mocks"].append(default_mock)
--
- return self.call(**xwargs)
-
- def _test_ds_found(self, name):
-@@ -338,6 +334,14 @@ class TestDsIdentify(DsIdentifyBase):
- """Older gce compute instances must be identified by serial."""
- self._test_ds_found("GCE-serial")
-
-+ def test_lxd_kvm(self):
-+ """LXD KVM has race on absent /dev/lxd/socket. Use DMI board_name."""
-+ self._test_ds_found("LXD-kvm")
-+
-+ def test_lxd_containers(self):
-+ """LXD containers will have /dev/lxd/socket at generator time."""
-+ self._test_ds_found("LXD")
-+
- def test_config_drive(self):
- """ConfigDrive datasource has a disk with LABEL=config-2."""
- self._test_ds_found("ConfigDrive")
-@@ -1020,6 +1024,19 @@ VALID_CFG = {
- "files": {P_PRODUCT_SERIAL: "GoogleCloud-8f2e88f\n"},
- "mocks": [MOCK_VIRT_IS_KVM],
- },
-+ "LXD-kvm": {
-+ "ds": "LXD",
-+ "files": {P_BOARD_NAME: "LXD\n"},
-+ # /dev/lxd/sock does not exist and KVM virt-type
-+ "mocks": [{"name": "is_socket_file", "ret": 1}, MOCK_VIRT_IS_KVM],
-+ "no_mocks": ["dscheck_LXD"], # Don't default mock dscheck_LXD
-+ },
-+ "LXD": {
-+ "ds": "LXD",
-+ # /dev/lxd/sock exists
-+ "mocks": [{"name": "is_socket_file", "ret": 0}],
-+ "no_mocks": ["dscheck_LXD"], # Don't default mock dscheck_LXD
-+ },
- "NoCloud": {
- "ds": "NoCloud",
- "mocks": [
---- a/tools/ds-identify
-+++ b/tools/ds-identify
-@@ -96,6 +96,7 @@ DI_BLKID_EXPORT_OUT=""
- DI_GEOM_LABEL_STATUS_OUT=""
- DI_DEFAULT_POLICY="search,found=all,maybe=all,notfound=${DI_DISABLED}"
- DI_DEFAULT_POLICY_NO_DMI="search,found=all,maybe=all,notfound=${DI_ENABLED}"
-+DI_DMI_BOARD_NAME=""
- DI_DMI_CHASSIS_ASSET_TAG=""
- DI_DMI_PRODUCT_NAME=""
- DI_DMI_SYS_VENDOR=""
-@@ -460,6 +461,10 @@ is_container() {
- esac
- }
-
-+is_socket_file() {
-+ [ -S $1 ] && return 0 || return 1
-+}
-+
- read_kernel_cmdline() {
- cached "${DI_KERNEL_CMDLINE}" && return
- local cmdline="" fpath="${PATH_PROC_CMDLINE}"
-@@ -477,6 +482,12 @@ read_kernel_cmdline() {
- DI_KERNEL_CMDLINE="$cmdline"
- }
-
-+read_dmi_board_name() {
-+ cached "${DI_DMI_BOARD_NAME}" && return
-+ get_dmi_field board_name
-+ DI_DMI_BOARD_NAME="$_RET"
-+}
-+
- read_dmi_chassis_asset_tag() {
- cached "${DI_DMI_CHASSIS_ASSET_TAG}" && return
- get_dmi_field chassis_asset_tag
-@@ -806,7 +817,16 @@ dscheck_MAAS() {
- # LXD datasource requires active /dev/lxd/sock
- # https://linuxcontainers.org/lxd/docs/master/dev-lxd
- dscheck_LXD() {
-- [ -S /dev/lxd/sock ] && return ${DS_FOUND} || return ${DS_NOT_FOUND}
-+ if is_socket_file /dev/lxd/sock; then
-+ return ${DS_FOUND}
-+ fi
-+ # On LXD KVM instances, /dev/lxd/sock is not yet setup by
-+ # lxd-agent-loader's systemd lxd-agent.service.
-+ # Rely on DMI product information that is present on all LXD images.
-+ if [ "${DI_VIRT}" = "kvm" ]; then
-+ [ "${DI_DMI_BOARD_NAME}" = "LXD" ] && return ${DS_FOUND}
-+ fi
-+ return ${DS_NOT_FOUND}
- }
-
- dscheck_NoCloud() {
-@@ -1466,6 +1486,7 @@ collect_info() {
- read_config
- read_datasource_list
- read_dmi_sys_vendor
-+ read_dmi_board_name
- read_dmi_chassis_asset_tag
- read_dmi_product_name
- read_dmi_product_serial
-@@ -1482,7 +1503,7 @@ _print_info() {
- local n="" v="" vars=""
- vars="DMI_PRODUCT_NAME DMI_SYS_VENDOR DMI_PRODUCT_SERIAL"
- vars="$vars DMI_PRODUCT_UUID PID_1_PRODUCT_NAME DMI_CHASSIS_ASSET_TAG"
-- vars="$vars FS_LABELS ISO9660_DEVS KERNEL_CMDLINE VIRT"
-+ vars="$vars DMI_BOARD_NAME FS_LABELS ISO9660_DEVS KERNEL_CMDLINE VIRT"
- vars="$vars UNAME_KERNEL_NAME UNAME_KERNEL_RELEASE UNAME_KERNEL_VERSION"
- vars="$vars UNAME_MACHINE UNAME_NODENAME UNAME_OPERATING_SYSTEM"
- vars="$vars DSNAME DSLIST"
diff --git a/debian/patches/cpick-5e347d25-Revert-Ensure-system_cfg-read-before-ds-net-config-on b/debian/patches/cpick-5e347d25-Revert-Ensure-system_cfg-read-before-ds-net-config-on
deleted file mode 100644
index 7d668e88..00000000
--- a/debian/patches/cpick-5e347d25-Revert-Ensure-system_cfg-read-before-ds-net-config-on
+++ /dev/null
@@ -1,82 +0,0 @@
-From 5e347d2506aea32b528c480e8dcd981183548ee4 Mon Sep 17 00:00:00 2001
-From: James Falcon <james.falcon@canonical.com>
-Date: Fri, 11 Mar 2022 11:02:16 -0600
-Subject: [PATCH] Revert "Ensure system_cfg read before ds net config on Oracle
- (#1174)" (#1326)
-
-This reverts commit b306633fd17e5ba0173ad3c41add59cb11884757.
-
-While this ultimately seems like a better solution, currently the
-file /etc/cloud/cloud.cfg.d/99-disable-network-config.cfg exists on
-all Oracle launched instances which will prevent networking from
-being properly initialized.
----
- cloudinit/sources/DataSourceOracle.py | 2 +-
- .../network/test_net_config_load.py | 27 -------------------
- tests/unittests/sources/test_oracle.py | 8 +++---
- 3 files changed, 4 insertions(+), 33 deletions(-)
- delete mode 100644 tests/integration_tests/network/test_net_config_load.py
-
---- a/cloudinit/sources/DataSourceOracle.py
-+++ b/cloudinit/sources/DataSourceOracle.py
-@@ -104,9 +104,9 @@ class DataSourceOracle(sources.DataSourc
- vendordata_pure = None
- network_config_sources = (
- sources.NetworkConfigSource.cmdline,
-- sources.NetworkConfigSource.system_cfg,
- sources.NetworkConfigSource.ds,
- sources.NetworkConfigSource.initramfs,
-+ sources.NetworkConfigSource.system_cfg,
- )
-
- _network_config = sources.UNSET
---- a/tests/integration_tests/network/test_net_config_load.py
-+++ /dev/null
-@@ -1,27 +0,0 @@
--"""Test loading the network config"""
--import pytest
--
--from tests.integration_tests.instances import IntegrationInstance
--
--
--def _customize_envionment(client: IntegrationInstance):
-- # Insert our "disable_network_config" file here
-- client.write_to_file(
-- "/etc/cloud/cloud.cfg.d/99-disable-network-config.cfg",
-- "network: {config: disabled}\n",
-- )
-- client.execute("cloud-init clean --logs")
-- client.restart()
--
--
--def test_network_disabled_via_etc_cloud(client: IntegrationInstance):
-- """Test that network can be disabled via config file in /etc/cloud"""
-- if client.settings.CLOUD_INIT_SOURCE == "IN_PLACE":
-- pytest.skip(
-- "IN_PLACE not supported as we mount /etc/cloud contents into the "
-- "container"
-- )
-- _customize_envionment(client)
--
-- log = client.read_from_file("/var/log/cloud-init.log")
-- assert "network config is disabled by system_cfg" in log
---- a/tests/unittests/sources/test_oracle.py
-+++ b/tests/unittests/sources/test_oracle.py
-@@ -920,14 +920,12 @@ class TestNetworkConfig:
- assert network_config == m_read_initramfs_config.return_value
- assert "Failed to parse secondary network configuration" in caplog.text
-
-- def test_ds_network_cfg_order(self, _m):
-- """Ensure that DS net config is preferred over initramfs config
-- but less than system config."""
-+ def test_ds_network_cfg_preferred_over_initramfs(self, _m):
-+ """Ensure that DS net config is preferred over initramfs config"""
- config_sources = oracle.DataSourceOracle.network_config_sources
-- system_idx = config_sources.index(NetworkConfigSource.system_cfg)
- ds_idx = config_sources.index(NetworkConfigSource.ds)
- initramfs_idx = config_sources.index(NetworkConfigSource.initramfs)
-- assert system_idx < ds_idx < initramfs_idx
-+ assert ds_idx < initramfs_idx
-
-
- # vi: ts=4 expandtab
diff --git a/debian/patches/cpick-be9389c6-Work-around-bug-in-LXD-VM-detection-1325 b/debian/patches/cpick-be9389c6-Work-around-bug-in-LXD-VM-detection-1325
deleted file mode 100644
index 8c64998b..00000000
--- a/debian/patches/cpick-be9389c6-Work-around-bug-in-LXD-VM-detection-1325
+++ /dev/null
@@ -1,31 +0,0 @@
-From be9389c62c6b7b2ecb5ea7cf7ffefc80a2d31210 Mon Sep 17 00:00:00 2001
-From: Brett Holman <bholman.devel@gmail.com>
-Date: Mon, 14 Mar 2022 16:00:47 +0100
-Subject: [PATCH] Work around bug in LXD VM detection (#1325)
-
-On kernels >=5.10, LXD starts qemu with kvm and hv_passthrough.
-This causes `systemd-detect-virt` to identify the host as "qemu", rather than "kvm".
-
-Cloud-init treats emulated (TCG) virtualization the same way as virtualized (KVM).
-If systemd (see issue #22709) decides to report this as something other than
-kvm/qemu, we'll need to extend our list of accepted types to include that as well.
-
-https://github.com/systemd/systemd/issues/22709
----
- cloudinit/sources/DataSourceLXD.py | 5 ++++-
- 1 file changed, 4 insertions(+), 1 deletion(-)
-
---- a/cloudinit/sources/DataSourceLXD.py
-+++ b/cloudinit/sources/DataSourceLXD.py
-@@ -71,7 +71,10 @@ def generate_fallback_network_config() -
- err,
- )
- return network_v1
-- if virt_type.strip() == "kvm": # instance.type VIRTUAL-MACHINE
-+ if virt_type.strip() in (
-+ "kvm",
-+ "qemu",
-+ ): # instance.type VIRTUAL-MACHINE
- arch = util.system_info()["uname"][4]
- if arch == "ppc64le":
- network_v1["config"][0]["name"] = "enp0s5"
diff --git a/debian/patches/cpick-e3307e4d-ds-identify-detect-LXD-for-VMs-launched-from-host-with b/debian/patches/cpick-e3307e4d-ds-identify-detect-LXD-for-VMs-launched-from-host-with
deleted file mode 100644
index 079344cf..00000000
--- a/debian/patches/cpick-e3307e4d-ds-identify-detect-LXD-for-VMs-launched-from-host-with
+++ /dev/null
@@ -1,86 +0,0 @@
-From e3307e4d8cd7ccf352c7662783b472545e8ecc56 Mon Sep 17 00:00:00 2001
-From: Chad Smith <chad.smith@canonical.com>
-Date: Wed, 6 Apr 2022 15:15:21 -0600
-Subject: [PATCH] ds-identify: detect LXD for VMs launched from host with >
- 5.10 kernel (#1370)
-
-Launching KVM instances from a host system with > 5.10 kernel results
-in LXC passing `hv_passthrough` to the kvm instance being launched.
-Systemd < 251 will incorrectly detect the CPU in this case as
-"qemu" instead of "kvm".
-
-ds-identify needs to properly interpret systems with
-sytemd-detect-virt="qemu" and /sys/class/dmi/id/board_name="LXD"
-
-This functionality can be dropped once systemd 251 support is
-available on all supported distributions.
-
-LP: #1968085
----
- tests/unittests/test_ds_identify.py | 25 +++++++++++++++++++++++++
- tools/ds-identify | 6 +++++-
- 2 files changed, 30 insertions(+), 1 deletion(-)
-
---- a/tests/unittests/test_ds_identify.py
-+++ b/tests/unittests/test_ds_identify.py
-@@ -95,6 +95,10 @@ MOCK_VIRT_IS_CONTAINER_OTHER = {
- }
- MOCK_NOT_LXD_DATASOURCE = {"name": "dscheck_LXD", "ret": 1}
- MOCK_VIRT_IS_KVM = {"name": "detect_virt", "RET": "kvm", "ret": 0}
-+# qemu support for LXD is only for host systems > 5.10 kernel as lxd
-+# passed `hv_passthrough` which causes systemd < v.251 to misinterpret CPU
-+# as "qemu" instead of "kvm"
-+MOCK_VIRT_IS_KVM_QEMU = {"name": "detect_virt", "RET": "qemu", "ret": 0}
- MOCK_VIRT_IS_VMWARE = {"name": "detect_virt", "RET": "vmware", "ret": 0}
- # currenty' SmartOS hypervisor "bhyve" is unknown by systemd-detect-virt.
- MOCK_VIRT_IS_VM_OTHER = {"name": "detect_virt", "RET": "vm-other", "ret": 0}
-@@ -338,6 +342,20 @@ class TestDsIdentify(DsIdentifyBase):
- """LXD KVM has race on absent /dev/lxd/socket. Use DMI board_name."""
- self._test_ds_found("LXD-kvm")
-
-+ def test_lxd_kvm_jammy(self):
-+ """LXD KVM on host systems with a kernel > 5.10 need to match "qemu".
-+ LXD provides `hv_passthrough` when launching kvm instances when host
-+ kernel is > 5.10. This results in systemd being unable to detect the
-+ virtualized CPUID="Linux KVM Hv" as type "kvm" and results in
-+ systemd-detect-virt returning "qemu" in this case.
-+
-+ Assert ds-identify can match systemd-detect-virt="qemu" and
-+ /sys/class/dmi/id/board_name = LXD.
-+ Once systemd 251 is available on a target distro, the virtualized
-+ CPUID will be represented properly as "kvm"
-+ """
-+ self._test_ds_found("LXD-kvm-qemu-kernel-gt-5.10")
-+
- def test_lxd_containers(self):
- """LXD containers will have /dev/lxd/socket at generator time."""
- self._test_ds_found("LXD")
-@@ -1031,6 +1049,13 @@ VALID_CFG = {
- "mocks": [{"name": "is_socket_file", "ret": 1}, MOCK_VIRT_IS_KVM],
- "no_mocks": ["dscheck_LXD"], # Don't default mock dscheck_LXD
- },
-+ "LXD-kvm-qemu-kernel-gt-5.10": { # LXD host > 5.10 kvm launch virt==qemu
-+ "ds": "LXD",
-+ "files": {P_BOARD_NAME: "LXD\n"},
-+ # /dev/lxd/sock does not exist and KVM virt-type
-+ "mocks": [{"name": "is_socket_file", "ret": 1}, MOCK_VIRT_IS_KVM_QEMU],
-+ "no_mocks": ["dscheck_LXD"], # Don't default mock dscheck_LXD
-+ },
- "LXD": {
- "ds": "LXD",
- # /dev/lxd/sock exists
---- a/tools/ds-identify
-+++ b/tools/ds-identify
-@@ -823,7 +823,11 @@ dscheck_LXD() {
- # On LXD KVM instances, /dev/lxd/sock is not yet setup by
- # lxd-agent-loader's systemd lxd-agent.service.
- # Rely on DMI product information that is present on all LXD images.
-- if [ "${DI_VIRT}" = "kvm" ]; then
-+ # Note "qemu" is returned on kvm instances launched from a host kernel
-+ # kernels >=5.10, due to `hv_passthrough` option.
-+ # systemd v. 251 should properly return "kvm" in this scenario
-+ # https://github.com/systemd/systemd/issues/22709
-+ if [ "${DI_VIRT}" = "kvm" -o "${DI_VIRT}" = "qemu" ]; then
- [ "${DI_DMI_BOARD_NAME}" = "LXD" ] && return ${DS_FOUND}
- fi
- return ${DS_NOT_FOUND}
diff --git a/debian/patches/cpick-eee60329-Fix-cloud-init-status-wait-when-no-datasource-found b/debian/patches/cpick-eee60329-Fix-cloud-init-status-wait-when-no-datasource-found
deleted file mode 100644
index 3227517e..00000000
--- a/debian/patches/cpick-eee60329-Fix-cloud-init-status-wait-when-no-datasource-found
+++ /dev/null
@@ -1,172 +0,0 @@
-From eee603294f120cf98696351433e7e6dbc9a3dbc2 Mon Sep 17 00:00:00 2001
-From: James Falcon <james.falcon@canonical.com>
-Date: Wed, 23 Mar 2022 18:03:00 -0500
-Subject: [PATCH] Fix cloud-init status --wait when no datasource found (#1349)
-
-* Fix cloud-init status --wait when no datasource found
-
-In 0de7acb1, we modified status checks to wait until we get an "enabled"
-or "disabled" file from ds-identiy. ds-identify never outputs a
-"disabled" file, so "status --wait" will wait indefinitely if no
-datasource is found.
-
-LP: #1966085
----
- systemd/cloud-init-generator.tmpl | 7 +++
- tests/integration_tests/clouds.py | 16 ++++--
- tests/integration_tests/cmd/test_status.py | 65 ++++++++++++++++++++++
- 3 files changed, 82 insertions(+), 6 deletions(-)
- create mode 100644 tests/integration_tests/cmd/test_status.py
-
-Origin: backport, https://github.com/canonical/cloud-init/commit/eee60329
-Bug-Ubuntu: https://bugs.launchpad.net/bugs/1966085
-Last-Update: 2022-03-24
-Index: cloud-init/systemd/cloud-init-generator.tmpl
-===================================================================
---- cloud-init.orig/systemd/cloud-init-generator.tmpl
-+++ cloud-init/systemd/cloud-init-generator.tmpl
-@@ -10,6 +10,7 @@ DISABLE="disabled"
- FOUND="found"
- NOTFOUND="notfound"
- RUN_ENABLED_FILE="$LOG_D/$ENABLE"
-+RUN_DISABLED_FILE="$LOG_D/$DISABLE"
- {% if variant in ["suse"] %}
- CLOUD_SYSTEM_TARGET="/usr/lib/systemd/system/cloud-init.target"
- {% else %}
-@@ -154,6 +155,10 @@ main() {
- "ln $CLOUD_SYSTEM_TARGET $link_path"
- fi
- fi
-+ if [ -e $RUN_DISABLED_FILE ]; then
-+ debug 1 "removing $RUN_DISABLED_FILE and creating $RUN_ENABLED_FILE"
-+ rm -f $RUN_DISABLED_FILE
-+ fi
- : > "$RUN_ENABLED_FILE"
- elif [ "$result" = "$DISABLE" ]; then
- if [ -f "$link_path" ]; then
-@@ -167,8 +172,10 @@ main() {
- debug 1 "already disabled: no change needed [no $link_path]"
- fi
- if [ -e "$RUN_ENABLED_FILE" ]; then
-+ debug 1 "removing $RUN_ENABLED_FILE and creating $RUN_DISABLED_FILE"
- rm -f "$RUN_ENABLED_FILE"
- fi
-+ : > "$RUN_DISABLED_FILE"
- else
- debug 0 "unexpected result '$result' 'ds=$ds'"
- ret=3
-Index: cloud-init/tests/integration_tests/clouds.py
-===================================================================
---- cloud-init.orig/tests/integration_tests/clouds.py
-+++ cloud-init/tests/integration_tests/clouds.py
-@@ -5,6 +5,7 @@ import os.path
- import random
- import string
- from abc import ABC, abstractmethod
-+from copy import deepcopy
- from typing import Optional, Type
- from uuid import UUID
-
-@@ -291,12 +292,15 @@ class _LxdIntegrationCloud(IntegrationCl
- subp(command.split())
-
- def _perform_launch(self, launch_kwargs, **kwargs):
-- launch_kwargs["inst_type"] = launch_kwargs.pop("instance_type", None)
-- wait = launch_kwargs.pop("wait", True)
-- release = launch_kwargs.pop("image_id")
-+ instance_kwargs = deepcopy(launch_kwargs)
-+ instance_kwargs["inst_type"] = instance_kwargs.pop(
-+ "instance_type", None
-+ )
-+ wait = instance_kwargs.pop("wait", True)
-+ release = instance_kwargs.pop("image_id")
-
- try:
-- profile_list = launch_kwargs["profile_list"]
-+ profile_list = instance_kwargs["profile_list"]
- except KeyError:
- profile_list = self._get_or_set_profile_list(release)
-
-@@ -305,10 +309,10 @@ class _LxdIntegrationCloud(IntegrationCl
- random.choices(string.ascii_lowercase + string.digits, k=8)
- )
- pycloudlib_instance = self.cloud_instance.init(
-- launch_kwargs.pop("name", default_name),
-+ instance_kwargs.pop("name", default_name),
- release,
- profile_list=profile_list,
-- **launch_kwargs,
-+ **instance_kwargs,
- )
- if self.settings.CLOUD_INIT_SOURCE == "IN_PLACE":
- self._mount_source(pycloudlib_instance)
-Index: cloud-init/tests/integration_tests/cmd/test_status.py
-===================================================================
---- /dev/null
-+++ cloud-init/tests/integration_tests/cmd/test_status.py
-@@ -0,0 +1,65 @@
-+"""Tests for `cloud-init status`"""
-+from time import sleep
-+
-+import pytest
-+
-+from tests.integration_tests.clouds import ImageSpecification, IntegrationCloud
-+from tests.integration_tests.instances import IntegrationInstance
-+
-+
-+# We're implementing our own here in case cloud-init status --wait
-+# isn't working correctly (LP: #1966085)
-+def _wait_for_cloud_init(client: IntegrationInstance):
-+ last_exception = None
-+ for _ in range(30):
-+ try:
-+ result = client.execute("cloud-init status --long")
-+ if result and result.ok:
-+ return result
-+ except Exception as e:
-+ last_exception = e
-+ sleep(1)
-+ raise Exception(
-+ "cloud-init status did not return successfully."
-+ ) from last_exception
-+
-+
-+def _remove_nocloud_dir_and_reboot(client: IntegrationInstance):
-+ # On Impish and below, NoCloud will be detected on an LXD container.
-+ # If we remove this directory, it will no longer be detected.
-+ client.execute("rm -rf /var/lib/cloud/seed/nocloud-net")
-+ client.execute("cloud-init clean --logs --reboot")
-+
-+
-+@pytest.mark.ubuntu
-+@pytest.mark.lxd_container
-+def test_wait_when_no_datasource(session_cloud: IntegrationCloud, setup_image):
-+ """Ensure that when no datasource is found, we get status: disabled
-+
-+ LP: #1966085
-+ """
-+ with session_cloud.launch(
-+ launch_kwargs={
-+ # On Jammy and above, we detect the LXD datasource using a
-+ # socket available to the container. This prevents the socket
-+ # from being exposed in the container, causing datasource detection
-+ # to fail. ds-identify will then have failed to detect a datasource
-+ "config_dict": {"security.devlxd": False},
-+ "wait": False, # to prevent cloud-init status --wait
-+ }
-+ ) as client:
-+ # We know this will be an LXD instance due to our pytest mark
-+ client.instance.execute_via_ssh = False # type: ignore
-+ # No ubuntu user if cloud-init didn't run
-+ client.instance.username = "root"
-+ # Jammy and above will use LXD datasource by default
-+ if ImageSpecification.from_os_image().release in [
-+ "bionic",
-+ "focal",
-+ "impish",
-+ ]:
-+ _remove_nocloud_dir_and_reboot(client)
-+ status_out = _wait_for_cloud_init(client).stdout.strip()
-+ assert "status: disabled" in status_out
-+ assert "Cloud-init disabled by cloud-init-generator" in status_out
-+ assert client.execute("cloud-init status --wait").ok
diff --git a/debian/patches/series b/debian/patches/series
deleted file mode 100644
index 40be7ad2..00000000
--- a/debian/patches/series
+++ /dev/null
@@ -1,5 +0,0 @@
-cpick-5e347d25-Revert-Ensure-system_cfg-read-before-ds-net-config-on
-cpick-eee60329-Fix-cloud-init-status-wait-when-no-datasource-found
-cpick-be9389c6-Work-around-bug-in-LXD-VM-detection-1325
-cpick-30ccd51a-ds-identify-also-discover-LXD-by-presence-from-DMI
-cpick-e3307e4d-ds-identify-detect-LXD-for-VMs-launched-from-host-with
diff --git a/doc-requirements.txt b/doc-requirements.txt
index 5bcac862..38c943e1 100644
--- a/doc-requirements.txt
+++ b/doc-requirements.txt
@@ -1,9 +1,10 @@
doc8
-m2r
-sphinx<2
-sphinx_rtd_theme
+m2r2
+sphinx==4.3.0
+sphinx_rtd_theme==1.0.0
pyyaml
+sphinx-panels
# Indirect dependencies
-docutils<0.18
-mistune<2.0.0 # https://github.com/miyakogi/m2r/issues/66
+jinja2<3.1.0 # https://github.com/readthedocs/readthedocs.org/issues/9037
+docutils==0.16 # https://github.com/readthedocs/sphinx_rtd_theme/issues/1115
diff --git a/doc/examples/cloud-config-apt.txt b/doc/examples/cloud-config-apt.txt
index 39f546e1..efeae625 100644
--- a/doc/examples/cloud-config-apt.txt
+++ b/doc/examples/cloud-config-apt.txt
@@ -35,7 +35,7 @@ apt_pipelining: False
#
# Default: none
#
-# if packages are specified, this apt_update will be set to true
+# if packages are specified, this package_update will be set to true
packages: ['pastebinit']
diff --git a/doc/examples/cloud-config-archive.txt b/doc/examples/cloud-config-archive.txt
index 23b1024c..f8a3d778 100644
--- a/doc/examples/cloud-config-archive.txt
+++ b/doc/examples/cloud-config-archive.txt
@@ -9,8 +9,5 @@
multi line payload
here
-
- type: text/upstart-job
- filename: my-upstart.conf
- content: |
- whats this, yo?
-
+ type: text/cloud-config
+ content: '#cloud-config\n\n password: gocubs'
diff --git a/doc/examples/cloud-config-disk-setup.txt b/doc/examples/cloud-config-disk-setup.txt
index 08cf5d8b..cdd176d3 100644
--- a/doc/examples/cloud-config-disk-setup.txt
+++ b/doc/examples/cloud-config-disk-setup.txt
@@ -7,7 +7,7 @@
# (Not implemented yet, but provided for future documentation)
disk_setup:
- ephmeral0:
+ ephemeral0:
table_type: 'mbr'
layout: True
overwrite: False
@@ -78,7 +78,7 @@ fs_setup:
# The disk_setup directive instructs Cloud-init to partition a disk. The format is:
disk_setup:
- ephmeral0:
+ ephemeral0:
table_type: 'mbr'
layout: true
/dev/xvdh:
diff --git a/doc/examples/cloud-config-final-message.txt b/doc/examples/cloud-config-final-message.txt
deleted file mode 100644
index 0ce31467..00000000
--- a/doc/examples/cloud-config-final-message.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-#cloud-config
-
-# final_message
-# default: cloud-init boot finished at $TIMESTAMP. Up $UPTIME seconds
-# this message is written by cloud-final when the system is finished
-# its first boot
-final_message: "The system is finally up, after $UPTIME seconds"
diff --git a/doc/examples/cloud-config-growpart.txt b/doc/examples/cloud-config-growpart.txt
deleted file mode 100644
index 393d5164..00000000
--- a/doc/examples/cloud-config-growpart.txt
+++ /dev/null
@@ -1,31 +0,0 @@
-#cloud-config
-#
-# growpart entry is a dict, if it is not present at all
-# in config, then the default is used ({'mode': 'auto', 'devices': ['/']})
-#
-# mode:
-# values:
-# * auto: use any option possible (any available)
-# if none are available, do not warn, but debug.
-# * growpart: use growpart to grow partitions
-# if growpart is not available, this is an error.
-# * off, false
-#
-# devices:
-# a list of things to resize.
-# items can be filesystem paths or devices (in /dev)
-# examples:
-# devices: [/, /dev/vdb1]
-#
-# ignore_growroot_disabled:
-# a boolean, default is false.
-# if the file /etc/growroot-disabled exists, then cloud-init will not grow
-# the root partition. This is to allow a single file to disable both
-# cloud-initramfs-growroot and cloud-init's growroot support.
-#
-# true indicates that /etc/growroot-disabled should be ignored
-#
-growpart:
- mode: auto
- devices: ['/']
- ignore_growroot_disabled: false
diff --git a/doc/examples/cloud-config-install-packages.txt b/doc/examples/cloud-config-install-packages.txt
index 7b90d7df..8bd9b74f 100644
--- a/doc/examples/cloud-config-install-packages.txt
+++ b/doc/examples/cloud-config-install-packages.txt
@@ -4,7 +4,7 @@
#
# Default: none
#
-# if packages are specified, this apt_update will be set to true
+# if packages are specified, this package_update will be set to true
#
# packages may be supplied as a single package name or as a list
# with the format [<package>, <version>] wherein the specific
diff --git a/doc/examples/cloud-config-landscape.txt b/doc/examples/cloud-config-landscape.txt
deleted file mode 100644
index b76bf028..00000000
--- a/doc/examples/cloud-config-landscape.txt
+++ /dev/null
@@ -1,23 +0,0 @@
-#cloud-config
-# Landscape-client configuration
-#
-# Anything under the top 'landscape: client' entry
-# will be basically rendered into a ConfigObj formatted file
-# under the '[client]' section of /etc/landscape/client.conf
-#
-# Note: 'tags' should be specified as a comma delimited string
-# rather than a list.
-#
-# You can get example key/values by running 'landscape-config',
-# answer question, then look at /etc/landscape/client.config
-landscape:
- client:
- url: "https://landscape.canonical.com/message-system"
- ping_url: "http://landscape.canonical.com/ping"
- data_path: "/var/lib/landscape/client"
- http_proxy: "http://my.proxy.com/foobar"
- tags: "server,cloud"
- computer_title: footitle
- https_proxy: fooproxy
- registration_key: fookey
- account_name: fooaccount
diff --git a/doc/examples/cloud-config-launch-index.txt b/doc/examples/cloud-config-launch-index.txt
index e7dfdc0c..bbae03a7 100644
--- a/doc/examples/cloud-config-launch-index.txt
+++ b/doc/examples/cloud-config-launch-index.txt
@@ -7,16 +7,15 @@
# index (and not other launches) by provided a key here which
# will act as a filter on the instances userdata. When
# this key is left out (or non-integer) then the content
-# of this file will always be used for all launch-indexes
+# of this file will always be used for all launch-indexes
# (ie the previous behavior).
launch-index: 5
# Upgrade the instance on first boot
-# (ie run apt-get upgrade)
#
# Default: false
#
-apt_upgrade: true
+package_upgrade: true
# Other yaml keys below...
# .......
diff --git a/doc/examples/cloud-config-mcollective.txt b/doc/examples/cloud-config-mcollective.txt
deleted file mode 100644
index a701616a..00000000
--- a/doc/examples/cloud-config-mcollective.txt
+++ /dev/null
@@ -1,49 +0,0 @@
-#cloud-config
-#
-# This is an example file to automatically setup and run mcollective
-# when the instance boots for the first time.
-# Make sure that this file is valid yaml before starting instances.
-# It should be passed as user-data when starting the instance.
-mcollective:
- # Every key present in the conf object will be added to server.cfg:
- # key: value
- #
- # For example the configuration below will have the following key
- # added to server.cfg:
- # plugin.stomp.host: dbhost
- conf:
- plugin.stomp.host: dbhost
- # This will add ssl certs to mcollective
- # WARNING WARNING WARNING
- # The ec2 metadata service is a network service, and thus is readable
- # by non-root users on the system (ie: 'ec2metadata --user-data')
- # If you want security for this, please use include-once + SSL urls
- public-cert: |
- -----BEGIN CERTIFICATE-----
- MIICCTCCAXKgAwIBAgIBATANBgkqhkiG9w0BAQUFADANMQswCQYDVQQDDAJjYTAe
- Fw0xMDAyMTUxNzI5MjFaFw0xNTAyMTQxNzI5MjFaMA0xCzAJBgNVBAMMAmNhMIGf
- MA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCu7Q40sm47/E1Pf+r8AYb/V/FWGPgc
- b014OmNoX7dgCxTDvps/h8Vw555PdAFsW5+QhsGr31IJNI3kSYprFQcYf7A8tNWu
- 1MASW2CfaEiOEi9F1R3R4Qlz4ix+iNoHiUDTjazw/tZwEdxaQXQVLwgTGRwVa+aA
- qbutJKi93MILLwIDAQABo3kwdzA4BglghkgBhvhCAQ0EKxYpUHVwcGV0IFJ1Ynkv
- T3BlblNTTCBHZW5lcmF0ZWQgQ2VydGlmaWNhdGUwDwYDVR0TAQH/BAUwAwEB/zAd
- BgNVHQ4EFgQUu4+jHB+GYE5Vxo+ol1OAhevspjAwCwYDVR0PBAQDAgEGMA0GCSqG
- SIb3DQEBBQUAA4GBAH/rxlUIjwNb3n7TXJcDJ6MMHUlwjr03BDJXKb34Ulndkpaf
- +GAlzPXWa7bO908M9I8RnPfvtKnteLbvgTK+h+zX1XCty+S2EQWk29i2AdoqOTxb
- hppiGMp0tT5Havu4aceCXiy2crVcudj3NFciy8X66SoECemW9UYDCb9T5D0d
- -----END CERTIFICATE-----
- private-cert: |
- -----BEGIN CERTIFICATE-----
- MIICCTCCAXKgAwIBAgIBATANBgkqhkiG9w0BAQUFADANMQswCQYDVQQDDAJjYTAe
- Fw0xMDAyMTUxNzI5MjFaFw0xNTAyMTQxNzI5MjFaMA0xCzAJBgNVBAMMAmNhMIGf
- MA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCu7Q40sm47/E1Pf+r8AYb/V/FWGPgc
- b014OmNoX7dgCxTDvps/h8Vw555PdAFsW5+QhsGr31IJNI3kSYprFQcYf7A8tNWu
- 1MASW2CfaEiOEi9F1R3R4Qlz4ix+iNoHiUDTjazw/tZwEdxaQXQVLwgTGRwVa+aA
- qbutJKi93MILLwIDAQABo3kwdzA4BglghkgBhvhCAQ0EKxYpUHVwcGV0IFJ1Ynkv
- T3BlblNTTCBHZW5lcmF0ZWQgQ2VydGlmaWNhdGUwDwYDVR0TAQH/BAUwAwEB/zAd
- BgNVHQ4EFgQUu4+jHB+GYE5Vxo+ol1OAhevspjAwCwYDVR0PBAQDAgEGMA0GCSqG
- SIb3DQEBBQUAA4GBAH/rxlUIjwNb3n7TXJcDJ6MMHUlwjr03BDJXKb34Ulndkpaf
- +GAlzPXWa7bO908M9I8RnPfvtKnteLbvgTK+h+zX1XCty+S2EQWk29i2AdoqOTxb
- hppiGMp0tT5Havu4aceCXiy2crVcudj3NFciy8X66SoECemW9UYDCb9T5D0d
- -----END CERTIFICATE-----
-
diff --git a/doc/examples/cloud-config-mount-points.txt b/doc/examples/cloud-config-mount-points.txt
index d0ad8383..2f45fd4d 100644
--- a/doc/examples/cloud-config-mount-points.txt
+++ b/doc/examples/cloud-config-mount-points.txt
@@ -43,4 +43,4 @@ mount_default_fields: [ None, None, "auto", "defaults,nofail", "0", "2" ]
swap:
filename: /swap.img
size: "auto" # or size in bytes
- maxsize: size in bytes
+ maxsize: 10485760 # size in bytes
diff --git a/doc/examples/cloud-config-phone-home.txt b/doc/examples/cloud-config-phone-home.txt
deleted file mode 100644
index b30c14e3..00000000
--- a/doc/examples/cloud-config-phone-home.txt
+++ /dev/null
@@ -1,14 +0,0 @@
-#cloud-config
-
-# phone_home: if this dictionary is present, then the phone_home
-# cloud-config module will post specified data back to the given
-# url
-# default: none
-# phone_home:
-# url: http://my.foo.bar/$INSTANCE/
-# post: all
-# tries: 10
-#
-phone_home:
- url: http://my.example.com/$INSTANCE_ID/
- post: [ pub_key_dsa, pub_key_rsa, pub_key_ecdsa, instance_id ]
diff --git a/doc/examples/cloud-config-power-state.txt b/doc/examples/cloud-config-power-state.txt
deleted file mode 100644
index 0bbb10e2..00000000
--- a/doc/examples/cloud-config-power-state.txt
+++ /dev/null
@@ -1,40 +0,0 @@
-#cloud-config
-
-## poweroff or reboot system after finished
-# default: none
-#
-# power_state can be used to make the system shutdown, reboot or
-# halt after boot is finished. This same thing can be achieved by
-# user-data scripts or by runcmd by simply invoking 'shutdown'.
-#
-# Doing it this way ensures that cloud-init is entirely finished with
-# modules that would be executed, and avoids any error/log messages
-# that may go to the console as a result of system services like
-# syslog being taken down while cloud-init is running.
-#
-# If you delay '+5' (5 minutes) and have a timeout of
-# 120 (2 minutes), then the max time until shutdown will be 7 minutes.
-# cloud-init will invoke 'shutdown +5' after the process finishes, or
-# when 'timeout' seconds have elapsed.
-#
-# delay: form accepted by shutdown. default is 'now'. other format
-# accepted is '+m' (m in minutes)
-# mode: required. must be one of 'poweroff', 'halt', 'reboot'
-# message: provided as the message argument to 'shutdown'. default is none.
-# timeout: the amount of time to give the cloud-init process to finish
-# before executing shutdown.
-# condition: apply state change only if condition is met.
-# May be boolean True (always met), or False (never met),
-# or a command string or list to be executed.
-# command's exit code indicates:
-# 0: condition met
-# 1: condition not met
-# other exit codes will result in 'not met', but are reserved
-# for future use.
-#
-power_state:
- delay: "+30"
- mode: poweroff
- message: Bye Bye
- timeout: 30
- condition: True
diff --git a/doc/examples/cloud-config-puppet.txt b/doc/examples/cloud-config-puppet.txt
deleted file mode 100644
index c6bc15de..00000000
--- a/doc/examples/cloud-config-puppet.txt
+++ /dev/null
@@ -1,93 +0,0 @@
-#cloud-config
-#
-# This is an example file to automatically setup and run puppet
-# when the instance boots for the first time.
-# Make sure that this file is valid yaml before starting instances.
-# It should be passed as user-data when starting the instance.
-puppet:
- # Boolean: whether or not to install puppet (default: true)
- install: true
-
- # A specific version to pass to the installer script or package manager
- version: "7.7.0"
-
- # Valid values are 'packages' and 'aio' (default: 'packages')
- install_type: "packages"
-
- # Puppet collection to install if 'install_type' is 'aio'
- collection: "puppet7"
-
- # Boolean: whether or not to remove the puppetlabs repo after installation
- # if 'install_type' is 'aio' (default: true)
- cleanup: true
-
- # If 'install_type' is 'aio', change the url to the install script
- aio_install_url: "https://raw.githubusercontent.com/puppetlabs/install-puppet/main/install.sh"
-
- # Path to the puppet config file (default: depends on 'install_type')
- conf_file: "/etc/puppet/puppet.conf"
-
- # Path to the puppet SSL directory (default: depends on 'install_type')
- ssl_dir: "/var/lib/puppet/ssl"
-
- # Path to the CSR attributes file (default: depends on 'install_type')
- csr_attributes_path: "/etc/puppet/csr_attributes.yaml"
-
- # The name of the puppet package to install (no-op if 'install_type' is 'aio')
- package_name: "puppet"
-
- # Boolean: whether or not to run puppet after configuration finishes
- # (default: false)
- exec: false
-
- # A list of arguments to pass to 'puppet agent' if 'exec' is true
- # (default: ['--test'])
- exec_args: ['--test']
-
- # Every key present in the conf object will be added to puppet.conf:
- # [name]
- # subkey=value
- #
- # For example the configuration below will have the following section
- # added to puppet.conf:
- # [main]
- # server=puppetserver.example.org
- # certname=i-0123456.ip-X-Y-Z.cloud.internal
- #
- # The puppetserver ca certificate will be available in
- # /var/lib/puppet/ssl/certs/ca.pem if using distro packages
- # or /etc/puppetlabs/puppet/ssl/certs/ca.pem if using AIO packages.
- conf:
- agent:
- server: "puppetserver.example.org"
- # certname supports substitutions at runtime:
- # %i: instanceid
- # Example: i-0123456
- # %f: fqdn of the machine
- # Example: ip-X-Y-Z.cloud.internal
- #
- # NB: the certname will automatically be lowercased as required by puppet
- certname: "%i.%f"
- # ca_cert is a special case. It won't be added to puppet.conf.
- # It holds the puppetserver certificate in pem format.
- # It should be a multi-line string (using the | yaml notation for
- # multi-line strings).
- # The puppetserver certificate is located in
- # /var/lib/puppet/ssl/ca/ca_crt.pem on the puppetserver host if using
- # distro packages or /etc/puppetlabs/puppet/ssl/ca/ca_crt.pem if using AIO
- # packages.
- #
- ca_cert: |
- -----BEGIN CERTIFICATE-----
- MIICCTCCAXKgAwIBAgIBATANBgkqhkiG9w0BAQUFADANMQswCQYDVQQDDAJjYTAe
- Fw0xMDAyMTUxNzI5MjFaFw0xNTAyMTQxNzI5MjFaMA0xCzAJBgNVBAMMAmNhMIGf
- MA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCu7Q40sm47/E1Pf+r8AYb/V/FWGPgc
- b014OmNoX7dgCxTDvps/h8Vw555PdAFsW5+QhsGr31IJNI3kSYprFQcYf7A8tNWu
- 1MASW2CfaEiOEi9F1R3R4Qlz4ix+iNoHiUDTjazw/tZwEdxaQXQVLwgTGRwVa+aA
- qbutJKi93MILLwIDAQABo3kwdzA4BglghkgBhvhCAQ0EKxYpUHVwcGV0IFJ1Ynkv
- T3BlblNTTCBHZW5lcmF0ZWQgQ2VydGlmaWNhdGUwDwYDVR0TAQH/BAUwAwEB/zAd
- BgNVHQ4EFgQUu4+jHB+GYE5Vxo+ol1OAhevspjAwCwYDVR0PBAQDAgEGMA0GCSqG
- SIb3DQEBBQUAA4GBAH/rxlUIjwNb3n7TXJcDJ6MMHUlwjr03BDJXKb34Ulndkpaf
- +GAlzPXWa7bO908M9I8RnPfvtKnteLbvgTK+h+zX1XCty+S2EQWk29i2AdoqOTxb
- hppiGMp0tT5Havu4aceCXiy2crVcudj3NFciy8X66SoECemW9UYDCb9T5D0d
- -----END CERTIFICATE-----
diff --git a/doc/examples/cloud-config-resolv-conf.txt b/doc/examples/cloud-config-resolv-conf.txt
deleted file mode 100644
index c4843f54..00000000
--- a/doc/examples/cloud-config-resolv-conf.txt
+++ /dev/null
@@ -1,20 +0,0 @@
-#cloud-config
-#
-# This is an example file to automatically configure resolv.conf when the
-# instance boots for the first time.
-#
-# Ensure that your yaml is valid and pass this as user-data when starting
-# the instance. Also be sure that your cloud.cfg file includes this
-# configuration module in the appropriate section.
-#
-manage_resolv_conf: true
-
-resolv_conf:
- nameservers: ['8.8.4.4', '8.8.8.8']
- searchdomains:
- - foo.example.com
- - bar.example.com
- domain: example.com
- options:
- rotate: true
- timeout: 1
diff --git a/doc/examples/cloud-config-rh_subscription.txt b/doc/examples/cloud-config-rh_subscription.txt
deleted file mode 100644
index 5cc903a2..00000000
--- a/doc/examples/cloud-config-rh_subscription.txt
+++ /dev/null
@@ -1,49 +0,0 @@
-#cloud-config
-
-# register your Red Hat Enterprise Linux based operating system
-#
-# this cloud-init plugin is capable of registering by username
-# and password *or* activation and org. Following a successfully
-# registration you can:
-# - auto-attach subscriptions
-# - set the service level
-# - add subscriptions based on its pool ID
-# - enable yum repositories based on its repo id
-# - disable yum repositories based on its repo id
-# - alter the rhsm_baseurl and server-hostname in the
-# /etc/rhsm/rhs.conf file
-
-rh_subscription:
- username: joe@foo.bar
-
- ## Quote your password if it has symbols to be safe
- password: '1234abcd'
-
- ## If you prefer, you can use the activation key and
- ## org instead of username and password. Be sure to
- ## comment out username and password
-
- #activation-key: foobar
- #org: 12345
-
- ## Uncomment to auto-attach subscriptions to your system
- #auto-attach: True
-
- ## Uncomment to set the service level for your
- ## subscriptions
- #service-level: self-support
-
- ## Uncomment to add pools (needs to be a list of IDs)
- #add-pool: []
-
- ## Uncomment to add or remove yum repos
- ## (needs to be a list of repo IDs)
- #enable-repo: []
- #disable-repo: []
-
- ## Uncomment to alter the baseurl in /etc/rhsm/rhsm.conf
- #rhsm-baseurl: http://url
-
- ## Uncomment to alter the server hostname in
- ## /etc/rhsm/rhsm.conf
- #server-hostname: foo.bar.com
diff --git a/doc/examples/cloud-config-rsyslog.txt b/doc/examples/cloud-config-rsyslog.txt
deleted file mode 100644
index d28dd38e..00000000
--- a/doc/examples/cloud-config-rsyslog.txt
+++ /dev/null
@@ -1,47 +0,0 @@
-#cloud-config
-## the rsyslog module allows you to configure the systems syslog.
-## configuration of syslog is under the top level cloud-config
-## entry 'rsyslog'.
-##
-## Example:
-#cloud-config
-rsyslog:
- remotes:
- # udp to host 'maas.mydomain' port 514
- maashost: maas.mydomain
- # udp to ipv4 host on port 514
- maas: "@[10.5.1.56]:514"
- # tcp to host ipv6 host on port 555
- maasipv6: "*.* @@[FE80::0202:B3FF:FE1E:8329]:555"
- configs:
- - "*.* @@192.158.1.1"
- - content: "*.* @@192.0.2.1:10514"
- filename: 01-example.conf
- - content: |
- *.* @@syslogd.example.com
- config_dir: /etc/rsyslog.d
- config_filename: 20-cloud-config.conf
- service_reload_command: [your, syslog, reload, command]
-
-## Additionally the following legacy format is supported
-## it is converted into the format above before use.
-## rsyslog_filename -> rsyslog/config_filename
-## rsyslog_dir -> rsyslog/config_dir
-## rsyslog -> rsyslog/configs
-# rsyslog:
-# - "*.* @@192.158.1.1"
-# - content: "*.* @@192.0.2.1:10514"
-# filename: 01-example.conf
-# - content: |
-# *.* @@syslogd.example.com
-# rsyslog_filename: 20-cloud-config.conf
-# rsyslog_dir: /etc/rsyslog.d
-
-## to configure rsyslog to accept remote logging on Ubuntu
-## write the following into /etc/rsyslog.d/20-remote-udp.conf
-## $ModLoad imudp
-## $UDPServerRun 514
-## $template LogRemote,"/var/log/maas/rsyslog/%HOSTNAME%/messages"
-## :fromhost-ip, !isequal, "127.0.0.1" ?LogRemote
-## then:
-## sudo service rsyslog restart
diff --git a/doc/examples/cloud-config-run-cmds.txt b/doc/examples/cloud-config-run-cmds.txt
index 002398f5..7a8a0ea1 100644
--- a/doc/examples/cloud-config-run-cmds.txt
+++ b/doc/examples/cloud-config-run-cmds.txt
@@ -16,7 +16,7 @@
runcmd:
- [ ls, -l, / ]
- [ sh, -xc, "echo $(date) ': hello world!'" ]
- - [ sh, -c, echo "=========hello world'=========" ]
+ - [ sh, -c, echo "=========hello world=========" ]
- ls -l /root
# Note: Don't write files to /tmp from cloud-init use /run/somedir instead.
# Early boot environments can race systemd-tmpfiles-clean LP: #1707222.
diff --git a/doc/examples/cloud-config-salt-minion.txt b/doc/examples/cloud-config-salt-minion.txt
deleted file mode 100644
index 939fdc8b..00000000
--- a/doc/examples/cloud-config-salt-minion.txt
+++ /dev/null
@@ -1,53 +0,0 @@
-#cloud-config
-#
-# This is an example file to automatically setup and run a salt
-# minion when the instance boots for the first time.
-# Make sure that this file is valid yaml before starting instances.
-# It should be passed as user-data when starting the instance.
-
-salt_minion:
- # conf contains all the directives to be assigned in /etc/salt/minion.
-
- conf:
- # Set the location of the salt master server, if the master server cannot be
- # resolved, then the minion will fail to start.
-
- master: salt.example.com
-
- # Salt keys are manually generated by: salt-key --gen-keys=GEN_KEYS,
- # where GEN_KEYS is the name of the keypair, e.g. 'minion'. The keypair
- # will be copied to /etc/salt/pki on the minion instance.
-
- public_key: |
- -----BEGIN PUBLIC KEY-----
- MIIBIDANBgkqhkiG9w0BAQEFAAOCAQ0AMIIBCAKCAQEAwI4yqk1Y12zVmu9Ejlua
- h2FD6kjrt+N9XfGqZUUVNeRb7CA0Sj5Q6NtgoaiXuIrSea2sLda6ivqAGmtxMMrP
- zpf3FwsYWxBUNF7D4YeLmYjvcTbfr3bCOIRnPNXZ+4isuvvEiM02u2cO0okZSgeb
- dofNa1NbTLYAQr9jZZb7GPKrTO4CKy0xzBih/A+sl6dL9PNDmqXQEjyJS6PXG1Vj
- PvD5jpSrxuIl5Ms/+2Ro3ALgvC8dgoY/3m3csnd06afumGKv5YOGtf+bnWLhc0bf
- 6Sk8Q6i5t0Bl+HAULSPr+B9x/I0rN76ZnPvTj1+hJ0zTof4d0hOLx/K5OQyt7AKo
- 4wIBAQ==
- -----END PUBLIC KEY-----
-
- private_key: |
- -----BEGIN RSA PRIVATE KEY-----
- Proc-Type: 4,ENCRYPTED
- DEK-Info: AES-128-CBC,ECE30DBBA56E2DF06B7BC415F8870994
-
- YQOE5HIsghqjRsxPQqiWMH/VHmyFH6xIpBcmzxzispEHwBojlvLXviwvR66YhgNw
- 7smwE10Ik4/cwwiHTZqCk++jPATPygBiqQkUijCWzcT9kfaxmqdP4PL+hu9g7kGC
- KrD2Bm8/oO08s957aThuHC1sABRcJ1V3FRzJT6Za4fwweyvHVYRnmgaDA6zH0qV8
- NqBSB2hnNXKEdh6UFz9QGcrQxnRjfdIaW64zoEX7jT7gYYL7FkGXBa3XdMOA4fnl
- adRwLFMs0jfilisZv8oUbPdZ6J6x3o8p8LVecCF8tdZt1zkcLSIXKnoDFpHSISGs
- BD9aqD+E4ejynM/tPaVFq4IHzT8viN6h6WcH8fbpClFZ66Iyy9XL3/CjAY7Jzhh9
- fnbc4Iq28cdbmO/vkR7JyVOgEMWe1BcSqtro70XoUNRY8uDJUPqohrhm/9AigFRA
- Pwyf3LqojxRnwXjHsZtGltUtEAPZzgh3fKJnx9MyRR7DPXBRig7TAHU7n2BFRhHA
- TYThy29bK6NkIc/cKc2kEQVo98Cr04PO8jVxZM332FlhiVlP0kpAp+tFj7aMzPTG
- sJumb9kPbMsgpEuTCONm3yyoufGEBFMrIJ+Po48M2RlYOh50VkO09pI+Eu7FPtVB
- H4gKzoJIpZZ/7vYXQ3djM8s9hc5gD5CVExTZV4drbsXt6ITiwHuxZ6CNHRBPL5AY
- wmF8QZz4oivv1afdSe6E6OGC3uVmX3Psn5CVq2pE8VlRDKFy1WqfU2enRAijSS2B
- rtJs263fOJ8ZntDzMVMPgiAlzzfA285KUletpAeUmz+peR1gNzkE0eKSG6THOCi0
- rfmR8SeEzyNvin0wQ3qgYiiHjHbbFhJIMAQxoX+0hDSooM7Wo5wkLREULpGuesTg
- A6Fe3CiOivMDraNGA7H6Yg==
- -----END RSA PRIVATE KEY-----
-
diff --git a/doc/examples/cloud-config-seed-random.txt b/doc/examples/cloud-config-seed-random.txt
deleted file mode 100644
index 142b10cd..00000000
--- a/doc/examples/cloud-config-seed-random.txt
+++ /dev/null
@@ -1,32 +0,0 @@
-#cloud-config
-#
-# random_seed is a dictionary.
-#
-# The config module will write seed data from the datasource
-# to 'file' described below.
-#
-# Entries in this dictionary are:
-# file: the file to write random data to (default is /dev/urandom)
-# data: this data will be written to 'file' before data from
-# the datasource
-# encoding: this will be used to decode 'data' provided.
-# allowed values are 'encoding', 'raw', 'base64', 'b64'
-# 'gzip', or 'gz'. Default is 'raw'
-#
-# command: execute this command to seed random.
-# the command will have RANDOM_SEED_FILE in its environment
-# set to the value of 'file' above.
-# command_required: default False
-# if true, and 'command' is not available to be run
-# then exception is raised and cloud-init will record failure.
-# Otherwise, only debug error is mentioned.
-#
-# Note: command could be ['pollinate',
-# '--server=http://local.pollinate.server']
-# which would have pollinate populate /dev/urandom from provided server
-random_seed:
- file: '/dev/urandom'
- data: 'my random string'
- encoding: 'raw'
- command: ['sh', '-c', 'dd if=/dev/urandom of=$RANDOM_SEED_FILE']
- command_required: True
diff --git a/doc/examples/cloud-config-update-apt.txt b/doc/examples/cloud-config-update-apt.txt
index aaa47326..9191aad7 100644
--- a/doc/examples/cloud-config-update-apt.txt
+++ b/doc/examples/cloud-config-update-apt.txt
@@ -4,5 +4,4 @@
# update will be done independent of this setting.
#
# Default: false
-# Aliases: apt_update
package_update: true
diff --git a/doc/examples/cloud-config-update-packages.txt b/doc/examples/cloud-config-update-packages.txt
index 56b72c63..f8d42a53 100644
--- a/doc/examples/cloud-config-update-packages.txt
+++ b/doc/examples/cloud-config-update-packages.txt
@@ -1,8 +1,6 @@
#cloud-config
# Upgrade the instance on first boot
-# (ie run apt-get upgrade)
#
# Default: false
-# Aliases: apt_upgrade
package_upgrade: true
diff --git a/doc/examples/cloud-config-user-groups.txt b/doc/examples/cloud-config-user-groups.txt
index eaa8dd24..a66b0d75 100644
--- a/doc/examples/cloud-config-user-groups.txt
+++ b/doc/examples/cloud-config-user-groups.txt
@@ -3,7 +3,7 @@
# The following example adds the ubuntu group with members 'root' and 'sys'
# and the empty group cloud-users.
groups:
- - ubuntu: [root,sys]
+ - admingroup: [root,sys]
- cloud-users
# Add users to the system. Users are added after groups are added.
@@ -34,17 +34,16 @@ users:
- gh:TheRealFalcon
lock_passwd: true
ssh_authorized_keys:
- - <ssh pub key 1>
- - <ssh pub key 2>
+ - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDSL7uWGj8cgWyIOaspgKdVy0cKJ+UTjfv7jBOjG2H/GN8bJVXy72XAvnhM0dUM+CCs8FOf0YlPX+Frvz2hKInrmRhZVwRSL129PasD12MlI3l44u6IwS1o/W86Q+tkQYEljtqDOo0a+cOsaZkvUNzUyEXUwz/lmYa6G4hMKZH4NBj7nbAAF96wsMCoyNwbWryBnDYUr6wMbjRR1J9Pw7Xh7WRC73wy4Va2YuOgbD3V/5ZrFPLbWZW/7TFXVrql04QVbyei4aiFR5n//GvoqwQDNe58LmbzX/xvxyKJYdny2zXmdAhMxbrpFQsfpkJ9E/H5w0yOdSvnWbUoG5xNGoOB csmith@fringe
- name: cloudy
gecos: Magic Cloud App Daemon User
inactive: '5'
system: true
- name: fizzbuzz
- sudo: False
+ sudo: false
+ shell: /bin/bash
ssh_authorized_keys:
- - <ssh pub key 1>
- - <ssh pub key 2>
+ - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDSL7uWGj8cgWyIOaspgKdVy0cKJ+UTjfv7jBOjG2H/GN8bJVXy72XAvnhM0dUM+CCs8FOf0YlPX+Frvz2hKInrmRhZVwRSL129PasD12MlI3l44u6IwS1o/W86Q+tkQYEljtqDOo0a+cOsaZkvUNzUyEXUwz/lmYa6G4hMKZH4NBj7nbAAF96wsMCoyNwbWryBnDYUr6wMbjRR1J9Pw7Xh7WRC73wy4Va2YuOgbD3V/5ZrFPLbWZW/7TFXVrql04QVbyei4aiFR5n//GvoqwQDNe58LmbzX/xvxyKJYdny2zXmdAhMxbrpFQsfpkJ9E/H5w0yOdSvnWbUoG5xNGoOB csmith@fringe
- snapuser: joe@joeuser.io
- name: nosshlogins
ssh_redirect_user: true
@@ -64,7 +63,7 @@ users:
# lock_passwd: Defaults to true. Lock the password to disable password login
# inactive: Number of days after password expires until account is disabled
# passwd: The hash -- not the password itself -- of the password you want
-# to use for this user. You can generate a safe hash via:
+# to use for this user. You can generate a hash via:
# mkpasswd --method=SHA-512 --rounds=4096
# (the above command would create from stdin an SHA-512 password hash
# with 4096 salt rounds)
@@ -81,14 +80,15 @@ users:
# In other words, this feature is a potential security risk and is
# provided for your convenience only. If you do not fully trust the
# medium over which your cloud-config will be transmitted, then you
-# should use SSH authentication only.
+# should not use this feature.
#
-# You have thus been warned.
# no_create_home: When set to true, do not create home directory.
# no_user_group: When set to true, do not create a group named after the user.
# no_log_init: When set to true, do not initialize lastlog and faillog database.
# ssh_import_id: Optional. Import SSH ids
# ssh_authorized_keys: Optional. [list] Add keys to user's authorized keys file
+# An error will be raised if no_create_home or system is
+# also set.
# ssh_redirect_user: Optional. [bool] Set true to block ssh logins for cloud
# ssh public keys and emit a message redirecting logins to
# use <default_username> instead. This option only disables cloud
diff --git a/doc/examples/cloud-config-vendor-data.txt b/doc/examples/cloud-config-vendor-data.txt
deleted file mode 100644
index 920d12e8..00000000
--- a/doc/examples/cloud-config-vendor-data.txt
+++ /dev/null
@@ -1,16 +0,0 @@
-#cloud-config
-#
-# This explains how to control vendordata via a cloud-config
-#
-# On select Datasources, vendors have a channel for the consumptions
-# of all support user-data types via a special channel called
-# vendordata. Users of the end system are given ultimate control.
-#
-vendor_data:
- enabled: True
- prefix: /usr/bin/ltrace
-
-# enabled: whether it is enabled or not
-# prefix: the command to run before any vendor scripts.
-# Note: this is a fairly weak method of containment. It should
-# be used to profile a script, not to prevent its run
diff --git a/doc/examples/cloud-config.txt b/doc/examples/cloud-config.txt
index a2b4a3fa..177c5600 100644
--- a/doc/examples/cloud-config.txt
+++ b/doc/examples/cloud-config.txt
@@ -3,19 +3,15 @@
# (ie run apt-get update)
#
# Default: true
-# Aliases: apt_update
package_update: false
# Upgrade the instance on first boot
-# (ie run apt-get upgrade)
#
# Default: false
-# Aliases: apt_upgrade
package_upgrade: true
# Reboot after package install/update if necessary
# Default: false
-# Aliases: apt_reboot_if_required
package_reboot_if_required: true
# For 'apt' specific config, see cloud-config-apt.txt
@@ -165,7 +161,7 @@ bootcmd:
# where 'frequency' is one of:
# once-per-instance
# always
-# a python file in the CloudConfig/ module directory named
+# a python file in the CloudConfig/ module directory named
# cc_<name>.py
# example:
cloud_config_modules:
@@ -257,21 +253,6 @@ locale: en_US.UTF-8
# render template default-locale.tmpl to locale_configfile
locale_configfile: /etc/default/locale
-# add entries to rsyslog configuration
-# The first occurrence of a given filename will truncate.
-# subsequent entries will append.
-# if value is a scalar, its content is assumed to be 'content', and the
-# default filename is used.
-# if filename is not provided, it will default to 'rsylog_filename'
-# if filename does not start with a '/', it will be put in 'rsyslog_dir'
-# rsyslog_dir default: /etc/rsyslog.d
-# rsyslog_filename default: 20-cloud-config.conf
-rsyslog:
- - ':syslogtag, isequal, "[CLOUDINIT]" /var/log/cloud-foo.log'
- - content: "*.* @@192.0.2.1:10514"
- - filename: 01-examplecom.conf
- content: "*.* @@syslogd.example.com"
-
# resize_rootfs should the / filesystem be resized on first boot
# this allows you to launch an instance with a larger disk / partition
# and have the instance automatically grow / to accomoddate it
@@ -321,7 +302,7 @@ resize_rootfs: True
#
# == /etc/hosts management ==
#
-# The cloud-config variable that covers management of /etc/hosts is
+# The cloud-config variable that covers management of /etc/hosts is
# 'manage_etc_hosts'
#
# By default, its value is 'false' (boolean False)
@@ -330,29 +311,29 @@ resize_rootfs: True
# default: false
#
# false:
-# cloud-init will not modify /etc/hosts at all.
+# cloud-init will not modify /etc/hosts at all.
# * Whatever is present at instance boot time will be present after boot.
# * User changes will not be overwritten
#
-# true or 'template':
-# on every boot, /etc/hosts will be re-written from
+# true:
+# on every boot, /etc/hosts will be re-written from
# /etc/cloud/templates/hosts.tmpl.
# The strings '$hostname' and '$fqdn' are replaced in the template
# with the appropriate values.
-# To make modifications persistent across a reboot, you must make
+# To make modifications persistent across a reboot, you must make
# modificatoins to /etc/cloud/templates/hosts.tmpl
#
# localhost:
# This option ensures that an entry is present for fqdn as described in
-# section 5.1.2 of the debian manual
+# section 5.1.2 of the debian manual
# http://www.debian.org/doc/manuals/debian-reference/ch05.en.html
#
-# cloud-init will generally own the 127.0.1.1 entry, and will update
+# cloud-init will generally own the 127.0.1.1 entry, and will update
# it to the hostname and fqdn on every boot. All other entries will
# be left as is. 'ping `hostname`' will ping 127.0.1.1
#
# If you want a fqdn entry with aliases other than 'hostname' to resolve
-# to a localhost interface, you'll need to use something other than
+# to a localhost interface, you'll need to use something other than
# 127.0.1.1. For example:
# 127.0.1.2 myhost.fqdn.example.com myhost whatup.example.com
@@ -366,7 +347,7 @@ final_message: "The system is finally up, after $UPTIME seconds"
# configure where output will go
# 'output' entry is a dict with 'init', 'config', 'final' or 'all'
-# entries. Each one defines where
+# entries. Each one defines where
# cloud-init, cloud-config, cloud-config-final or all output will go
# each entry in the dict can be a string, list or dict.
# if it is a string, it refers to stdout and stderr
@@ -401,7 +382,7 @@ phone_home:
timezone: US/Eastern
# def_log_file and syslog_fix_perms work together
-# if
+# if
# - logging is set to go to a log file 'L' both with and without syslog
# - and 'L' does not exist
# - and syslog is configured to write to 'L'
@@ -474,17 +455,6 @@ ssh_pwauth: True
# default is False
manual_cache_clean: False
-# When cloud-init is finished running including having run
-# cloud_init_modules, then it will run this command. The default
-# is to emit an upstart signal as shown below. If the value is a
-# list, it will be passed to Popen. If it is a string, it will be
-# invoked through 'sh -c'.
-#
-# default value:
-# cc_ready_cmd: [ initctl, emit, cloud-config, CLOUD_CFG=/var/lib/instance//cloud-config.txt ]
-# example:
-# cc_ready_cmd: [ sh, -c, 'echo HI MOM > /tmp/file' ]
-
## configure interaction with ssh server
# ssh_svcname: ssh
# set the name of the option to 'service restart'
diff --git a/doc/examples/part-handler-v2.txt b/doc/examples/part-handler-v2.txt
index 554c34a5..b556a45a 100644
--- a/doc/examples/part-handler-v2.txt
+++ b/doc/examples/part-handler-v2.txt
@@ -16,7 +16,7 @@ def list_types():
# return a list of mime-types that are handled by this module
return(["text/plain", "text/go-cubs-go"])
-def handle_part(data,ctype,filename,payload,frequency):
+def handle_part(data, ctype, filename, payload, frequency):
# data: the cloudinit object
# ctype: '__begin__', '__end__', or the specific mime-type of the part
# filename: the filename for the part, or dynamically generated part if
@@ -27,12 +27,12 @@ def handle_part(data,ctype,filename,payload,frequency):
# will be invoked only on the first boot. 'always' will
# will be called on subsequent boots.
if ctype == "__begin__":
- print "my handler is beginning, frequency=%s" % frequency
+ print(f"my handler is beginning, frequency={frequency}")
return
if ctype == "__end__":
- print "my handler is ending, frequency=%s" % frequency
+ print(f"my handler is ending, frequency={frequency}")
return
- print "==== received ctype=%s filename=%s ====" % (ctype,filename)
- print payload
- print "==== end ctype=%s filename=%s" % (ctype, filename)
+ print(f"==== received ctype={ctype} filename={filename} ====")
+ print(payload)
+ print(f"==== end ctype={ctype} filename={filename}")
diff --git a/doc/examples/part-handler.txt b/doc/examples/part-handler.txt
index 1484e1a0..7cc356f6 100644
--- a/doc/examples/part-handler.txt
+++ b/doc/examples/part-handler.txt
@@ -4,19 +4,19 @@ def list_types():
# return a list of mime-types that are handled by this module
return(["text/plain", "text/go-cubs-go"])
-def handle_part(data,ctype,filename,payload):
+def handle_part(data, ctype, filename, payload):
# data: the cloudinit object
# ctype: '__begin__', '__end__', or the specific mime-type of the part
# filename: the filename for the part, or dynamically generated part if
# no filename is given attribute is present
# payload: the content of the part (empty for begin or end)
if ctype == "__begin__":
- print "my handler is beginning"
+ print("my handler is beginning")
return
if ctype == "__end__":
- print "my handler is ending"
+ print("my handler is ending")
return
- print "==== received ctype=%s filename=%s ====" % (ctype,filename)
- print payload
- print "==== end ctype=%s filename=%s" % (ctype, filename)
+ print(f"==== received ctype={ctype} filename={filename} ====")
+ print(payload)
+ print(f"==== end ctype={ctype} filename={filename}")
diff --git a/doc/examples/upstart-cloud-config.txt b/doc/examples/upstart-cloud-config.txt
deleted file mode 100644
index 1fcec34d..00000000
--- a/doc/examples/upstart-cloud-config.txt
+++ /dev/null
@@ -1,12 +0,0 @@
-#upstart-job
-description "My test job"
-
-start on cloud-config
-console output
-task
-
-script
-echo "====BEGIN======="
-echo "HELLO WORLD: $UPSTART_JOB"
-echo "=====END========"
-end script
diff --git a/doc/examples/upstart-rclocal.txt b/doc/examples/upstart-rclocal.txt
deleted file mode 100644
index 5cd049a9..00000000
--- a/doc/examples/upstart-rclocal.txt
+++ /dev/null
@@ -1,12 +0,0 @@
-#upstart-job
-description "a test upstart job"
-
-start on stopped rc RUNLEVEL=[2345]
-console output
-task
-
-script
-echo "====BEGIN======="
-echo "HELLO RC.LOCAL LIKE WORLD: $UPSTART_JOB"
-echo "=====END========"
-end script
diff --git a/doc/man/cloud-init.1 b/doc/man/cloud-init.1
index 2cb63135..388617de 100644
--- a/doc/man/cloud-init.1
+++ b/doc/man/cloud-init.1
@@ -4,8 +4,7 @@
cloud-init \- Cloud instance initialization
.SH SYNOPSIS
-.BR "cloud-init" " [-h] [-d] [-f FILES] [--force] [-v]
-{init,modules,single,query,dhclient-hook,features,analyze,devel,collect-logs,clean,status}"
+.BR "cloud-init" " [-h] [-d] [-f FILES] [--force] [-v] [SUBCOMMAND]"
.SH DESCRIPTION
Cloud-init provides a mechanism for cloud instance initialization.
@@ -20,23 +19,23 @@ debug of deployments.
.SH OPTIONS
.TP
.B "-h, --help"
-Show help message and exit
+Show help message and exit.
.TP
.B "-d, --debug"
-Show additional pre-action logging (default: False)
+Show additional pre-action logging (default: False).
.TP
.B "-f <files>, --files <files>"
-Additional YAML configuration files to use
+Use additional YAML configuration files.
.TP
.B "--force"
-Force running even if no datasource is found (use at your own risk)
+Force running even if no datasource is found (use at your own risk).
.TP
.B "-v, --version"
-Show program's version number and exit
+Show program's version number and exit.
.SH SUBCOMMANDS
Please see the help output for each subcommand for additional details,
@@ -68,17 +67,21 @@ List defined features.
.TP
.B "init"
-Initializes cloud-init and performs initial modules.
+Initialize cloud-init and execute initial modules.
.TP
.B "modules"
-Activates modules using a given configuration key.
+Activate modules using a given configuration key.
.TP
.B "query"
Query standardized instance metadata from the command line.
.TP
+.B "schema"
+Validate cloud-config files using jsonschema.
+
+.TP
.B "single"
Run a single module.
diff --git a/doc/rtd/conf.py b/doc/rtd/conf.py
index 9976afa4..1e9539a9 100644
--- a/doc/rtd/conf.py
+++ b/doc/rtd/conf.py
@@ -1,6 +1,8 @@
import os
import sys
+import sphinx_rtd_theme
+
from cloudinit import version
# If extensions (or modules to document with autodoc) are in another directory,
@@ -18,17 +20,19 @@ sys.path.insert(0, os.path.abspath("."))
# General information about the project.
project = "cloud-init"
-copyright = "2020, Canonical Ltd."
+copyright = "2022, Canonical Ltd."
# -- General configuration ----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
-# needs_sphinx = '1.0'
+needs_sphinx = "4.0"
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
- "m2r",
+ "m2r2",
+ "sphinx_rtd_theme",
+ "sphinx_panels",
"sphinx.ext.autodoc",
"sphinx.ext.autosectionlabel",
"sphinx.ext.viewcode",
@@ -66,3 +70,7 @@ html_theme = "sphinx_rtd_theme"
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "static/logo.png"
+
+# Make sure the target is unique
+autosectionlabel_prefix_document = True
+autosectionlabel_maxdepth = 2
diff --git a/doc/rtd/index.rst b/doc/rtd/index.rst
index 251a904d..1516a5cb 100644
--- a/doc/rtd/index.rst
+++ b/doc/rtd/index.rst
@@ -8,25 +8,20 @@ cross-platform cloud instance initialization. It is supported across all
major public cloud providers, provisioning systems for private cloud
infrastructure, and bare-metal installations.
-Cloud instances are initialized from a disk image and instance data:
-
-- Cloud metadata
-- User data (optional)
-- Vendor data (optional)
-
-Cloud-init will identify the cloud it is running on during boot, read any
-provided metadata from the cloud and initialize the system accordingly. This
-may involve setting up the network and storage devices to configuring SSH
-access key and many other aspects of a system. Later on the cloud-init will
-also parse and process any optional user or vendor data that was passed to the
-instance.
+On instance boot, cloud-init will identify the cloud it is running on, read
+any provided metadata from the cloud, and initialize the system accordingly.
+This may involve setting up the network and storage devices, configuring SSH
+access keys, and setting up many other aspects of a system. Later,
+cloud-init will parse and process any optional user or vendor data that was
+passed to the instance.
Getting help
************
Having trouble? We would like to help!
-- Try the :ref:`FAQ` – its got answers to some common questions
+- Check out the :ref:`lxd_tutorial` if you're new to cloud-init
+- Try the :ref:`FAQ` for answers to some common questions
- Ask a question in the ``#cloud-init`` IRC channel on Libera
- Join and ask questions on the `cloud-init mailing list <https://launchpad.net/~cloud-init>`_
- Find a bug? `Report bugs on Launchpad <https://bugs.launchpad.net/cloud-init/+filebug>`_
@@ -36,6 +31,7 @@ Having trouble? We would like to help!
:titlesonly:
:caption: Getting Started
+ topics/tutorial.rst
topics/availability.rst
topics/boot.rst
topics/cli.rst
@@ -69,6 +65,7 @@ Having trouble? We would like to help!
:caption: Development
topics/contributing.rst
+ topics/module_creation.rst
topics/code_review.rst
topics/security.rst
topics/debugging.rst
diff --git a/doc/rtd/topics/analyze.rst b/doc/rtd/topics/analyze.rst
index 709131b8..61213e28 100644
--- a/doc/rtd/topics/analyze.rst
+++ b/doc/rtd/topics/analyze.rst
@@ -100,7 +100,6 @@ execution.
00.00000s (modules-final/config-chef)
00.00000s (modules-config/config-snap_config)
00.00000s (modules-config/config-ntp)
- 00.00000s (modules-config/config-emit_upstart)
00.00000s (modules-config/config-disable-ec2-metadata)
00.00000s (init-network/setup-datasource)
@@ -138,7 +137,6 @@ The following is an abbreviated example of the show output:
Finished stage: (init-network) 02.72100 seconds
Starting stage: modules-config
- |`->config-emit_upstart ran successfully @15.43100s +00.00000s
|`->config-snap ran successfully @15.43100s +00.00100s
...
|`->config-runcmd ran successfully @16.22300s +00.00100s
diff --git a/doc/rtd/topics/boot.rst b/doc/rtd/topics/boot.rst
index b904eaf4..e0663760 100644
--- a/doc/rtd/topics/boot.rst
+++ b/doc/rtd/topics/boot.rst
@@ -158,7 +158,7 @@ scripts until cloud-init is done without having to write your own systemd
units dependency chains. See :ref:`cli_status` for more info.
First Boot Determination
-************************
+========================
cloud-init has to determine whether or not the current boot is the first boot
of a new instance or not, so that it applies the appropriate configuration. On
diff --git a/doc/rtd/topics/cli.rst b/doc/rtd/topics/cli.rst
index e2f48bf0..2e209bb4 100644
--- a/doc/rtd/topics/cli.rst
+++ b/doc/rtd/topics/cli.rst
@@ -9,33 +9,32 @@ option. This can be used against cloud-init itself or any of its subcommands.
.. code-block:: shell-session
$ cloud-init --help
- usage: /usr/bin/cloud-init [-h] [--version] [--file FILES] [--debug] [--force]
- {init,modules,single,query,dhclient-hook,features,analyze,devel,collect-logs,clean,status}
- ...
-
- optional arguments:
- -h, --help show this help message and exit
- --version, -v show program's version number and exit
- --file FILES, -f FILES
- additional yaml configuration files to use
- --debug, -d show additional pre-action logging (default: False)
- --force force running even if no datasource is found (use at
- your own risk)
+ usage: cloud-init [-h] [--version] [--file FILES] [--debug] [--force]
+ {init,modules,single,query,dhclient-hook,features,analyze,devel,collect-logs,clean,status,schema} ...
+
+ options:
+ -h, --help show this help message and exit
+ --version, -v Show program's version number and exit.
+ --file FILES, -f FILES
+ Use additional yaml configuration files.
+ --debug, -d Show additional pre-action logging (default: False).
+ --force Force running even if no datasource is found (use at your own risk).
Subcommands:
- {init,modules,single,query,dhclient-hook,features,analyze,devel,collect-logs,clean,status}
- init initializes cloud-init and performs initial modules
- modules activates modules using a given configuration key
- single run a single module
- query Query standardized instance metadata from the command
- line.
+ {init,modules,single,query,dhclient-hook,features,analyze,devel,collect-logs,clean,status,schema}
+ init Initialize cloud-init and perform initial modules.
+ modules Activate modules using a given configuration key.
+ single Run a single module.
+ query Query standardized instance metadata from the command line.
dhclient-hook Run the dhclient hook to record network info.
- features list defined features
- analyze Devel tool: Analyze cloud-init logs and data
- devel Run development tools
- collect-logs Collect and tar all cloud-init debug info
+ features List defined features.
+ analyze Devel tool: Analyze cloud-init logs and data.
+ devel Run development tools.
+ collect-logs Collect and tar all cloud-init debug info.
clean Remove logs and artifacts so cloud-init can re-run.
status Report cloud-init status or wait on completion.
+ schema Validate cloud-config files using jsonschema.
+
The rest of this document will give an overview of each of the subcommands.
@@ -66,8 +65,8 @@ clean
Remove cloud-init artifacts from ``/var/lib/cloud`` to simulate a clean
instance. On reboot, cloud-init will re-run all stages as it did on first boot.
-* *\\-\\-logs*: optionally remove all cloud-init log files in ``/var/log/``
-* *\\-\\-reboot*: reboot the system after removing artifacts
+* ``--logs``: optionally remove all cloud-init log files in ``/var/log/``
+* ``--reboot``: reboot the system after removing artifacts
.. _cli_collect_logs:
@@ -114,11 +113,6 @@ Current subcommands:
from ``/run/cloud-init/instance-data.json``. It accepts a user-data file
containing the jinja template header ``## template: jinja`` and renders
that content with any instance-data.json variables present.
- * ``schema``: a **#cloud-config** format and schema
- validator. It accepts a cloud-config YAML file and annotates potential
- schema errors locally without the need for deployment. Schema
- validation is work in progress and supports a subset of cloud-config
- modules.
* ``hotplug-hook``: respond to newly added system devices by retrieving
updated system metadata and bringing up/down the corresponding device.
This command is intended to be called via a systemd service and is
@@ -152,7 +146,7 @@ Can be run on the commandline, but is generally gated to run only once
due to semaphores in ``/var/lib/cloud/instance/sem/`` and
``/var/lib/cloud/sem``.
-* *\\-\\-local*: run *init-local* stage instead of *init*
+* ``--local``: run *init-local* stage instead of *init*
.. _cli_modules:
@@ -173,8 +167,8 @@ declared to run in various boot stages in the file
Can be run on the command line, but each module is gated to run only once due
to semaphores in ``/var/lib/cloud/``.
-* *\\-\\-mode [init|config|final]*: run *modules:init*, *modules:config* or
- *modules:final* cloud-init stages. See :ref:`boot_stages` for more info.
+* ``--mode [init|config|final]``: run ``modules:init``, ``modules:config`` or
+ `modules:final` cloud-init stages. See :ref:`boot_stages` for more info.
.. _cli_query:
@@ -187,13 +181,13 @@ in ``/run/cloud-init/instance-data.json``. This is a convenience command-line
interface to reference any cached configuration metadata that cloud-init
crawls when booting the instance. See :ref:`instance_metadata` for more info.
-* *\\-\\-all*: dump all available instance data as json which can be queried
-* *\\-\\-instance-data*: optional path to a different instance-data.json file
+* ``--all``: dump all available instance data as json which can be queried
+* ``--instance-data``: optional path to a different instance-data.json file
to source for queries
-* *\\-\\-list-keys*: list available query keys from cached instance data
-* *\\-\\-format*: a string that will use jinja-template syntax to render a
+* ``--list-keys``: list available query keys from cached instance data
+* ``--format``: a string that will use jinja-template syntax to render a
string replacing
-* *<varname>*: a dot-delimited variable path into the instance-data.json
+* ``<varname>``: a dot-delimited variable path into the instance-data.json
object
Below demonstrates how to list all top-level query keys that are standardized
@@ -249,6 +243,29 @@ This data can then be formatted to generate custom strings or data:
custom-i-0e91f69987f37ec74.us-east-2.aws.com
+.. _cli_schema:
+
+schema
+======
+
+Validate cloud-config files using jsonschema.
+
+* ``-h, --help``: show this help message and exit
+* ``-c CONFIG_FILE, --config-file CONFIG_FILE``: Path of the cloud-config yaml
+ file to validate
+* ``--system``: Validate the system cloud-config userdata
+* ``-d DOCS [DOCS ...], --docs DOCS [DOCS ...]``: Print schema module docs.
+ Choices: all or space-delimited cc_names.
+* ``--annotate``: Annotate existing cloud-config file with errors
+
+The following example checks a config file and annotates the config file with
+errors on stdout.
+
+.. code-block:: shell-session
+
+ $ cloud-init schema -c ./config.yml --annotate
+
+
.. _cli_single:
single
@@ -256,8 +273,8 @@ single
Attempt to run a single named cloud config module.
-* *\\-\\-name*: the cloud-config module name to run
-* *\\-\\-frequency*: optionally override the declared module frequency
+* ``--name``: the cloud-config module name to run
+* ``--frequency``: optionally override the declared module frequency
with one of (always|once-per-instance|once)
The following example re-runs the cc_set_hostname module ignoring the module
@@ -281,8 +298,8 @@ status
Report whether cloud-init is running, done, disabled or errored. Exits
non-zero if an error is detected in cloud-init.
-* *\\-\\-long*: detailed status information
-* *\\-\\-wait*: block until cloud-init completes
+* ``--long``: detailed status information
+* ``--wait``: block until cloud-init completes
Below are examples of output when cloud-init is running, showing status and
the currently running modules, as well as when it is done.
@@ -306,5 +323,3 @@ the currently running modules, as well as when it is done.
time: Wed, 17 Jan 2018 20:41:59 +0000
detail:
DataSourceNoCloud [seed=/var/lib/cloud/seed/nocloud-net][dsmode=net]
-
-.. vi: textwidth=79
diff --git a/doc/rtd/topics/code_review.rst b/doc/rtd/topics/code_review.rst
index 33aad789..20c81eac 100644
--- a/doc/rtd/topics/code_review.rst
+++ b/doc/rtd/topics/code_review.rst
@@ -115,7 +115,7 @@ request and meeting the prerequisites laid out above.
If they need help understanding the prerequisites, or help meeting the
prerequisites, then they can (and should!) ask for help. See the
-:ref:`Asking For Help` section above for the ways to do that.
+`Asking For Help`_ section above for the ways to do that.
These are the steps that comprise the opening phase:
@@ -127,7 +127,7 @@ These are the steps that comprise the opening phase:
The **Proposer** is expected to fix CI failures. If the
**Proposer** doesn't understand the nature of the failures they
are seeing, they should comment in the PR to request assistance,
- or use another way of :ref:`Asking For Help`.
+ or use another way of `Asking For Help`_.
(Note that if assistance is not requested, the **Committers**
will assume that the **Proposer** is working on addressing the
@@ -135,7 +135,7 @@ These are the steps that comprise the opening phase:
for help!)
CI passes
- Move on to the :ref:`Review phase`.
+ Move on to the `Review Phase`_.
Review Phase
============
@@ -144,7 +144,7 @@ In this phase, the **Proposer** and the **Reviewers** will iterate
together to, hopefully, get the PR merged into the cloud-init codebase.
There are three potential outcomes: merged, rejected permanently, and
temporarily closed. (The first two are covered in this section; see
-:ref:`Inactive Pull Requests` for details about temporary closure.)
+`Inactive Pull Requests`_ for details about temporary closure.)
(In the below, when the verbs "merge" or "squash merge" are used, they
should be understood to mean "squash merged using the GitHub UI", which
@@ -177,8 +177,8 @@ These are the steps that comprise the review phase:
in one of the following:
Approve
- If the submitted PR meets all of the :ref:`Prerequisites for
- Landing Pull Requests` and passes code review, then the
+ If the submitted PR meets all of the `Prerequisites for
+ Landing Pull Requests`_ and passes code review, then the
**Committer** will squash merge immediately.
There may be circumstances where a PR should not be merged
@@ -252,5 +252,5 @@ of inactivity. It will be closed after a further 7 days of inactivity.
These closes are not considered permanent, and the closing message
should reflect this for the **Proposer**. However, if a PR is reopened,
-it should effectively enter the :ref:`Opening phase` again, as it may
+it should effectively enter the `Opening phase`_ again, as it may
need some work done to get CI passing again.
diff --git a/doc/rtd/topics/datasources.rst b/doc/rtd/topics/datasources.rst
index f73a5b2a..fc08bb7d 100644
--- a/doc/rtd/topics/datasources.rst
+++ b/doc/rtd/topics/datasources.rst
@@ -111,63 +111,3 @@ need to take care of the following items:
* **Add documentation for your datasource**: You should add a new
file in ``doc/datasources/<cloudplatform>.rst``
-
-
-API
-===
-
-The current interface that a datasource object must provide is the following:
-
-.. sourcecode:: python
-
- # returns a mime multipart message that contains
- # all the various fully-expanded components that
- # were found from processing the raw user data string
- # - when filtering only the mime messages targeting
- # this instance id will be returned (or messages with
- # no instance id)
- def get_userdata(self, apply_filter=False)
-
- # returns the raw userdata string (or none)
- def get_userdata_raw(self)
-
- # returns a integer (or none) which can be used to identify
- # this instance in a group of instances which are typically
- # created from a single command, thus allowing programmatic
- # filtering on this launch index (or other selective actions)
- @property
- def launch_index(self)
-
- # the data sources' config_obj is a cloud-config formatted
- # object that came to it from ways other than cloud-config
- # because cloud-config content would be handled elsewhere
- def get_config_obj(self)
-
- # returns a list of public SSH keys
- def get_public_ssh_keys(self)
-
- # translates a device 'short' name into the actual physical device
- # fully qualified name (or none if said physical device is not attached
- # or does not exist)
- def device_name_to_device(self, name)
-
- # gets the locale string this instance should be applying
- # which typically used to adjust the instances locale settings files
- def get_locale(self)
-
- @property
- def availability_zone(self)
-
- # gets the instance id that was assigned to this instance by the
- # cloud provider or when said instance id does not exist in the backing
- # metadata this will return 'iid-datasource'
- def get_instance_id(self)
-
- # gets the fully qualified domain name that this host should be using
- # when configuring network or hostname related settings, typically
- # assigned either by the cloud provider or the user creating the vm
- def get_hostname(self, fqdn=False)
-
- def get_package_mirror_info(self)
-
-.. vi: textwidth=79
diff --git a/doc/rtd/topics/datasources/azure.rst b/doc/rtd/topics/datasources/azure.rst
index 1bd03970..b73d7d38 100644
--- a/doc/rtd/topics/datasources/azure.rst
+++ b/doc/rtd/topics/datasources/azure.rst
@@ -11,20 +11,6 @@ CD formatted in UDF. That CD contains a 'ovf-env.xml' file that provides some
information. Additional information is obtained via interaction with the
"endpoint".
-To find the endpoint, we now leverage the dhcp client's ability to log its
-known values on exit. The endpoint server is special DHCP option 245.
-Depending on your networking stack, this can be done
-by calling a script in /etc/dhcp/dhclient-exit-hooks or a file in
-/etc/NetworkManager/dispatcher.d. Both of these call a sub-command
-'dhclient_hook' of cloud-init itself. This sub-command will write the client
-information in json format to /run/cloud-init/dhclient.hook/<interface>.json.
-
-If those files are not available, the fallback is to check the leases file
-for the endpoint server (again option 245).
-
-You can define the path to the lease file with the 'dhclient_lease_file'
-configuration.
-
IMDS
----
@@ -56,8 +42,6 @@ The settings that may be configured are:
dhcp on eth0. Default is True. For Ubuntu 16.04 or earlier, default is
False.
* **data_dir**: Path used to read metadata files and write crawled data.
- * **dhclient_lease_file**: The fallback lease file to source when looking for
- custom DHCP option 245 from Azure fabric.
* **disk_aliases**: A dictionary defining which device paths should be
interpreted as ephemeral images. See cc_disk_setup module for more info.
@@ -74,7 +58,6 @@ An example configuration with the default values is provided below:
Azure:
apply_network_config: true
data_dir: /var/lib/waagent
- dhclient_lease_file: /var/lib/dhcp/dhclient.eth0.leases
disk_aliases:
ephemeral0: /dev/disk/cloud/azure_resource
diff --git a/doc/rtd/topics/datasources/configdrive.rst b/doc/rtd/topics/datasources/configdrive.rst
index ecc37df6..777597c2 100644
--- a/doc/rtd/topics/datasources/configdrive.rst
+++ b/doc/rtd/topics/datasources/configdrive.rst
@@ -18,39 +18,41 @@ case then the files contained on the located drive must provide equivalents to
what the EC2 metadata service would provide (which is typical of the version 2
support listed below)
-Version 1
----------
-**Note:** Version 1 is legacy and should be considered deprecated. Version 2
-has been supported in OpenStack since 2012.2 (Folsom).
+.. dropdown:: Version 1 (Deprecated)
-The following criteria are required to as a config drive:
+ **Note:** Version 1 is legacy and should be considered deprecated.
+ Version 2 has been supported in OpenStack since 2012.2 (Folsom).
-1. Must be formatted with `vfat`_ filesystem
-2. Must contain *one* of the following files
+ The following criteria are required to as a config drive:
-::
+ 1. Must be formatted with `vfat`_ filesystem
+ 2. Must contain *one* of the following files
+
+ ::
+
+ /etc/network/interfaces
+ /root/.ssh/authorized_keys
+ /meta.js
- /etc/network/interfaces
- /root/.ssh/authorized_keys
- /meta.js
+ ``/etc/network/interfaces``
-``/etc/network/interfaces``
+ This file is laid down by nova in order to pass static networking
+ information to the guest. Cloud-init will copy it off of the
+ config-drive and into /etc/network/interfaces (or convert it to RH
+ format) as soon as it can, and then attempt to bring up all network
+ interfaces.
- This file is laid down by nova in order to pass static networking
- information to the guest. Cloud-init will copy it off of the config-drive
- and into /etc/network/interfaces (or convert it to RH format) as soon as
- it can, and then attempt to bring up all network interfaces.
+ ``/root/.ssh/authorized_keys``
-``/root/.ssh/authorized_keys``
+ This file is laid down by nova, and contains the ssk keys that were
+ provided to nova on instance creation (nova-boot --key ....)
- This file is laid down by nova, and contains the ssk keys that were
- provided to nova on instance creation (nova-boot --key ....)
+ ``/meta.js``
-``/meta.js``
+ meta.js is populated on the config-drive in response to the user
+ passing "meta flags" (nova boot --meta key=value ...). It is
+ expected to be json formatted.
- meta.js is populated on the config-drive in response to the user passing
- "meta flags" (nova boot --meta key=value ...). It is expected to be json
- formatted.
Version 2
---------
@@ -58,7 +60,7 @@ Version 2
The following criteria are required to as a config drive:
1. Must be formatted with `vfat`_ or `iso9660`_ filesystem
- or have a *filesystem* label of **config-2**
+ or have a *filesystem* label of **config-2** or **CONFIG-2**
2. The files that will typically be present in the config drive are:
::
diff --git a/doc/rtd/topics/datasources/ec2.rst b/doc/rtd/topics/datasources/ec2.rst
index 94e4158d..77232269 100644
--- a/doc/rtd/topics/datasources/ec2.rst
+++ b/doc/rtd/topics/datasources/ec2.rst
@@ -38,11 +38,26 @@ Userdata is accessible via the following URL:
GET http://169.254.169.254/2009-04-04/user-data
1234,fred,reboot,true | 4512,jimbo, | 173,,,
-Note that there are multiple versions of this data provided, cloud-init
-by default uses **2009-04-04** but newer versions can be supported with
-relative ease (newer versions have more data exposed, while maintaining
-backward compatibility with the previous versions).
-Version **2016-09-02** is required for secondary IP address support.
+Note that there are multiple EC2 Metadata versions of this data provided
+to instances. cloud-init will attempt to use the most recent API version it
+supports in order to get latest API features and instance-data. If a given
+API version is not exposed to the instance, those API features will be
+unavailable to the instance.
+
+
++----------------+----------------------------------------------------------+
++ EC2 version | supported instance-data/feature |
++================+==========================================================+
++ **2021-03-23** | Required for Instance tag support. This feature must be |
+| | enabled individually on each instance. See the |
+| | `EC2 tags user guide`_. |
++----------------+----------------------------------------------------------+
+| **2016-09-02** | Required for secondary IP address support. |
++----------------+----------------------------------------------------------+
+| **2009-04-04** | Minimum supports EC2 API version for meta-data and |
+| | user-data. |
++----------------+----------------------------------------------------------+
+
To see which versions are supported from your cloud provider use the following
URL:
@@ -71,7 +86,7 @@ configuration (in `/etc/cloud/cloud.cfg` or `/etc/cloud/cloud.cfg.d/`).
The settings that may be configured are:
- * **metadata_urls**: This list of urls will be searched for an Ec2
+ * **metadata_urls**: This list of urls will be searched for an EC2
metadata service. The first entry that successfully returns a 200 response
for <url>/<version>/meta-data/instance-id will be selected.
(default: ['http://169.254.169.254', 'http://instance-data:8773']).
@@ -121,4 +136,5 @@ Notes
For example: the primary NIC will have a DHCP route-metric of 100,
the next NIC will be 200.
+.. _EC2 tags user guide: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html#work-with-tags-in-IMDS
.. vi: textwidth=79
diff --git a/doc/rtd/topics/datasources/lxd.rst b/doc/rtd/topics/datasources/lxd.rst
index fa2dcf5d..99b42cfa 100644
--- a/doc/rtd/topics/datasources/lxd.rst
+++ b/doc/rtd/topics/datasources/lxd.rst
@@ -20,18 +20,22 @@ The LXD socket device ``/dev/lxd/sock`` is only present on containers and VMs
when the instance configuration has ``security.devlxd=true`` (default).
Disabling ``security.devlxd`` configuration setting at initial launch will
ensure that cloud-init uses the :ref:`datasource_nocloud` datasource.
-Disabling ``security.devlxd`` ove the life of the container will result in
+Disabling ``security.devlxd`` over the life of the container will result in
warnings from cloud-init and cloud-init will keep the originally detected LXD
datasource.
-The LXD datasource provides cloud-init the opportunity to react to meta-data,
+The LXD datasource is detected as viable by ``ds-identify`` during systemd
+generator time when either ``/dev/lxd/sock`` exists or
+``/sys/class/dmi/id/board_name`` matches "LXD".
+
+The LXD datasource provides cloud-init the ability to react to meta-data,
vendor-data, user-data and network-config changes and render the updated
configuration across a system reboot.
-One can manipulate what meta-data, vendor-data or user-data is provided to
-the launched container using the LXD profiles or
-``lxc launch ... -c <key>="<value>"`` at initial container launch using one of
-the following keys:
+To modify what meta-data, vendor-data or user-data are provided to the
+launched container, use either LXD profiles or
+``lxc launch ... -c <key>="<value>"`` at initial container launch setting one
+of the following keys:
- user.meta-data: YAML metadata which will be appended to base meta-data
- user.vendor-data: YAML which overrides any meta-data values
@@ -44,6 +48,14 @@ the following keys:
used by both `#template: jinja` #cloud-config templates and
the `cloud-init query` command.
+Note: LXD version 4.22 introduced a new scope of config keys prefaced by
+``cloud-init.`` which are preferred above the related ``user.*`` keys:
+
+ - cloud-init.meta-data
+ - cloud-init.vendor-data
+ - cloud-init.network-config
+ - cloud-init.user-data
+
By default, network configuration from this datasource will be:
diff --git a/doc/rtd/topics/datasources/maas.rst b/doc/rtd/topics/datasources/maas.rst
index 427fba24..eb59dab9 100644
--- a/doc/rtd/topics/datasources/maas.rst
+++ b/doc/rtd/topics/datasources/maas.rst
@@ -3,8 +3,6 @@
MAAS
====
-*TODO*
+.. TODO: add content
For now see: https://maas.io/docs
-
-
diff --git a/doc/rtd/topics/datasources/nocloud.rst b/doc/rtd/topics/datasources/nocloud.rst
index d31f5d0f..8ce656af 100644
--- a/doc/rtd/topics/datasources/nocloud.rst
+++ b/doc/rtd/topics/datasources/nocloud.rst
@@ -61,35 +61,42 @@ You may also optionally provide a vendor-data file in the following format.
/vendor-data
-Given a disk ubuntu 12.04 cloud image in 'disk.img', you can create a
+Given a disk ubuntu cloud image in 'disk.img', you can create a
sufficient disk by following the example below.
::
- ## create user-data and meta-data files that will be used
+ ## 1) create user-data and meta-data files that will be used
## to modify image on first boot
- $ { echo instance-id: iid-local01; echo local-hostname: cloudimg; } > meta-data
+ $ echo "instance-id: iid-local01\nlocal-hostname: cloudimg" > meta-data
+ $ echo "#cloud-config\npassword: passw0rd\nchpasswd: { expire: False }\nssh_pwauth: True\n" > user-data
- $ printf "#cloud-config\npassword: passw0rd\nchpasswd: { expire: False }\nssh_pwauth: True\n" > user-data
-
- ## create a disk to attach with some user-data and meta-data
+ ## 2a) create a disk to attach with some user-data and meta-data
$ genisoimage -output seed.iso -volid cidata -joliet -rock user-data meta-data
- ## alternatively, create a vfat filesystem with same files
- ## $ truncate --size 2M seed.img
- ## $ mkfs.vfat -n cidata seed.img
- ## $ mcopy -oi seed.img user-data meta-data ::
+ ## 2b) alternatively, create a vfat filesystem with same files
+ ## $ truncate --size 2M seed.iso
+ ## $ mkfs.vfat -n cidata seed.iso
+
+ ## 2b) option 1: mount and copy files
+ ## $ sudo mount -t vfat seed.iso /mnt
+ ## $ sudo cp user-data meta-data /mnt
+ ## $ sudo umount /mnt
+
+ ## 2b) option 2: the mtools package provides mcopy, which can access vfat
+ ## filesystems without mounting them
+ ## $ mcopy -oi seed.iso user-data meta-data
- ## create a new qcow image to boot, backed by your original image
- $ qemu-img create -f qcow2 -b disk.img boot-disk.img
+ ## 3) create a new qcow image to boot, backed by your original image
+ $ qemu-img create -f qcow2 -b disk.img -F qcow2 boot-disk.img
- ## boot the image and login as 'ubuntu' with password 'passw0rd'
+ ## 4) boot the image and login as 'ubuntu' with password 'passw0rd'
## note, passw0rd was set as password through the user-data above,
## there is no password set on these images.
$ kvm -m 256 \
-net nic -net user,hostfwd=tcp::2222-:22 \
-drive file=boot-disk.img,if=virtio \
- -drive file=seed.iso,if=virtio
+ -drive driver=raw,file=seed.iso,if=virtio
**Note:** that the instance-id provided (``iid-local01`` above) is what is used
to determine if this is "first boot". So if you are making updates to
diff --git a/doc/rtd/topics/datasources/rbxcloud.rst b/doc/rtd/topics/datasources/rbxcloud.rst
index c4b3f2d0..cbbf6900 100644
--- a/doc/rtd/topics/datasources/rbxcloud.rst
+++ b/doc/rtd/topics/datasources/rbxcloud.rst
@@ -12,14 +12,12 @@ user accounts and user metadata.
Metadata drive
--------------
-Drive metadata is a `FAT`_-formatted partition with the ```CLOUDMD``` label on
-the system disk. Its contents are refreshed each time the virtual machine
-is restarted, if the partition exists. For more information see
-`HyperOne Virtual Machine docs`_.
+Drive metadata is a `FAT`_-formatted partition with the ```CLOUDMD``` or
+```cloudmd``` label on the system disk. Its contents are refreshed each time
+the virtual machine is restarted, if the partition exists. For more information
+see `HyperOne Virtual Machine docs`_.
.. _HyperOne: http://www.hyperone.com/
.. _Rootbox: https://rootbox.com/
.. _HyperOne Virtual Machine docs: http://www.hyperone.com/
.. _FAT: https://en.wikipedia.org/wiki/File_Allocation_Table
-
-.. vi: textwidth=79
diff --git a/doc/rtd/topics/debugging.rst b/doc/rtd/topics/debugging.rst
index a4a2779f..23ef0dfe 100644
--- a/doc/rtd/topics/debugging.rst
+++ b/doc/rtd/topics/debugging.rst
@@ -33,7 +33,6 @@ subcommands default to reading /var/log/cloud-init.log.
The time the event takes is printed after the "+" character.
Starting stage: modules-config
- |`->config-emit_upstart ran successfully @05.47600s +00.00100s
|`->config-snap_config ran successfully @05.47700s +00.00100s
|`->config-ssh-import-id ran successfully @05.47800s +00.00200s
|`->config-locale ran successfully @05.48000s +00.00100s
diff --git a/doc/rtd/topics/dir_layout.rst b/doc/rtd/topics/dir_layout.rst
index 9d2c9896..d4606ac5 100644
--- a/doc/rtd/topics/dir_layout.rst
+++ b/doc/rtd/topics/dir_layout.rst
@@ -38,7 +38,7 @@ application::
It is typically located at ``/var/lib`` but there are certain configuration
scenarios where this can be altered.
- TBD, describe this overriding more.
+.. TODO: expand this section
``data/``
@@ -74,7 +74,7 @@ application::
``seed/``
- TBD
+ Contains seeded data files: meta-data, network-config, user-data, vendor-data
``sem/``
@@ -83,5 +83,3 @@ application::
is only ran `per-once`, `per-instance`, `per-always`. This folder contains
semaphore `files` which are only supposed to run `per-once` (not tied to the
instance id).
-
-.. vi: textwidth=79
diff --git a/doc/rtd/topics/docs.rst b/doc/rtd/topics/docs.rst
index 1b15377e..99c2c140 100644
--- a/doc/rtd/topics/docs.rst
+++ b/doc/rtd/topics/docs.rst
@@ -58,14 +58,6 @@ other pages. For example for the FAQ page this would be:
.. _faq:
-Footer
-------
-The footer should include the textwidth
-
-.. code-block:: rst
-
- .. vi: textwidth=79
-
Vertical Whitespace
-------------------
One newline between each section helps ensure readability of the documentation
diff --git a/doc/rtd/topics/examples.rst b/doc/rtd/topics/examples.rst
index 8c7071e5..8ec8d8ab 100644
--- a/doc/rtd/topics/examples.rst
+++ b/doc/rtd/topics/examples.rst
@@ -34,18 +34,6 @@ Configure an instances trusted CA certificates
:language: yaml
:linenos:
-Configure an instances resolv.conf
-==================================
-
-*Note:* when using a config drive and a RHEL like system resolv.conf
-will also be managed 'automatically' due to the available information
-provided for dns servers in the config drive network format. For those
-that wish to have different settings use this module.
-
-.. literalinclude:: ../../examples/cloud-config-resolv-conf.txt
- :language: yaml
- :linenos:
-
Install and run `chef`_ recipes
===============================
@@ -53,13 +41,6 @@ Install and run `chef`_ recipes
:language: yaml
:linenos:
-Setup and run `puppet`_
-=======================
-
-.. literalinclude:: ../../examples/cloud-config-puppet.txt
- :language: yaml
- :linenos:
-
Add primary apt repositories
============================
@@ -78,14 +59,6 @@ Run commands on first boot
:language: yaml
:linenos:
-
-Alter the completion message
-============================
-
-.. literalinclude:: ../../examples/cloud-config-final-message.txt
- :language: yaml
- :linenos:
-
Install arbitrary packages
==========================
@@ -114,20 +87,6 @@ Adjust mount points mounted
:language: yaml
:linenos:
-Call a url when finished
-========================
-
-.. literalinclude:: ../../examples/cloud-config-phone-home.txt
- :language: yaml
- :linenos:
-
-Reboot/poweroff when finished
-=============================
-
-.. literalinclude:: ../../examples/cloud-config-power-state.txt
- :language: yaml
- :linenos:
-
Configure instances SSH keys
============================
@@ -149,13 +108,6 @@ Disk setup
:language: yaml
:linenos:
-Register Red Hat Subscription
-=============================
-
-.. literalinclude:: ../../examples/cloud-config-rh_subscription.txt
- :language: yaml
- :linenos:
-
Configure data sources
======================
@@ -170,13 +122,6 @@ Create partitions and filesystems
:language: yaml
:linenos:
-Grow partitions
-===============
-
-.. literalinclude:: ../../examples/cloud-config-growpart.txt
- :language: yaml
- :linenos:
-
.. _chef: http://www.chef.io/chef/
.. _puppet: http://puppetlabs.com/
.. vi: textwidth=79
diff --git a/doc/rtd/topics/faq.rst b/doc/rtd/topics/faq.rst
index 125ce9f4..0f77fb15 100644
--- a/doc/rtd/topics/faq.rst
+++ b/doc/rtd/topics/faq.rst
@@ -60,7 +60,7 @@ to the most recently used instance-id directory. This folder contains the
information cloud-init received from datasources, including vendor and user
data. This can be helpful to review to ensure the correct data was passed.
-It also contains the `datasource` file that containers the full information
+It also contains the `datasource` file that contains the full information
about what datasource was identified and used to setup the system.
Finally, the `boot-finished` file is the last thing that cloud-init does.
@@ -146,7 +146,7 @@ provided to the system:
.. code-block:: shell-session
- $ cloud-init devel schema --system --annotate
+ $ cloud-init schema --system --annotate
As launching instances in the cloud can cost money and take a bit longer,
sometimes it is easier to launch instances locally using Multipass or LXD:
@@ -278,7 +278,7 @@ configuration, including network configuration and metadata:
See the :ref:`network_config_v2` page for details on the format and config of
network configuration. To learn more about the possible values for metadata,
-check out the :ref:`nocloud` page.
+check out the :ref:`datasource_nocloud` page.
.. _cloud-utils: https://github.com/canonical/cloud-utils/
@@ -313,5 +313,7 @@ variety of sources.
.. _Metadata and cloud-init: https://www.youtube.com/watch?v=RHVhIWifVqU
.. _The beauty of cloud-init: http://brandon.fuller.name/archives/2011/05/02/06.40.57/
.. _Introduction to cloud-init: http://www.youtube.com/watch?v=-zL3BdbKyGY
+.. Blog Post: [terraform, azure, devops, docker, dotnet, cloud-init] https://codingsoul.org/2022/04/25/build-azure-devops-agents-with-linux-cloud-init-for-dotnet-development/
+.. Youtube: [proxmox, cloud-init, template] https://www.youtube.com/watch?v=shiIi38cJe4
.. vi: textwidth=79
diff --git a/doc/rtd/topics/format.rst b/doc/rtd/topics/format.rst
index 93ef34f0..a4b772a2 100644
--- a/doc/rtd/topics/format.rst
+++ b/doc/rtd/topics/format.rst
@@ -7,6 +7,67 @@ User-Data Formats
User data that will be acted upon by cloud-init must be in one of the following
types.
+Cloud Config Data
+=================
+
+Cloud-config is the simplest way to accomplish some things via user-data. Using
+cloud-config syntax, the user can specify certain things in a human friendly
+format.
+
+These things include:
+
+- apt upgrade should be run on first boot
+- a different apt mirror should be used
+- additional apt sources should be added
+- certain SSH keys should be imported
+- *and many more...*
+
+.. note::
+ This file must be valid YAML syntax.
+
+See the :ref:`yaml_examples` section for a commented set of examples of
+supported cloud config formats.
+
+Begins with: ``#cloud-config`` or ``Content-Type: text/cloud-config`` when
+using a MIME archive.
+
+.. note::
+ New in cloud-init v. 18.4: Cloud config data can also render cloud instance
+ metadata variables using jinja templating. See
+ :ref:`instance_metadata` for more information.
+
+User-Data Script
+================
+
+Typically used by those who just want to execute a shell script.
+
+Begins with: ``#!`` or ``Content-Type: text/x-shellscript`` when using a MIME
+archive.
+
+.. note::
+ New in cloud-init v. 18.4: User-data scripts can also render cloud instance
+ metadata variables using jinja templating. See
+ :ref:`instance_metadata` for more information.
+
+Example
+-------
+
+.. code-block:: shell-session
+
+ $ cat myscript.sh
+
+ #!/bin/sh
+ echo "Hello World. The time is now $(date -R)!" | tee /root/output.txt
+
+ $ euca-run-instances --key mykey --user-data-file myscript.sh ami-a07d95c9
+
+Kernel Command Line
+===================
+
+When using the :ref:`datasource_nocloud` datasource, users can pass user data
+via the kernel command line parameters. See the :ref:`datasource_nocloud`
+datasource documentation for more details.
+
Gzip Compressed Content
=======================
@@ -34,7 +95,6 @@ Supported content-types are listed from the cloud-init subcommand make-mime:
cloud-config-jsonp
jinja2
part-handler
- upstart-job
x-include-once-url
x-include-url
x-shellscript
@@ -42,7 +102,6 @@ Supported content-types are listed from the cloud-init subcommand make-mime:
x-shellscript-per-instance
x-shellscript-per-once
-
Helper subcommand to generate mime messages
-------------------------------------------
@@ -73,32 +132,6 @@ Create userdata containing 3 shell scripts:
.. _make-mime: https://github.com/canonical/cloud-init/blob/main/cloudinit/cmd/devel/make_mime.py
-
-User-Data Script
-================
-
-Typically used by those who just want to execute a shell script.
-
-Begins with: ``#!`` or ``Content-Type: text/x-shellscript`` when using a MIME
-archive.
-
-.. note::
- New in cloud-init v. 18.4: User-data scripts can also render cloud instance
- metadata variables using jinja templating. See
- :ref:`instance_metadata` for more information.
-
-Example
--------
-
-.. code-block:: shell-session
-
- $ cat myscript.sh
-
- #!/bin/sh
- echo "Hello World. The time is now $(date -R)!" | tee /root/output.txt
-
- $ euca-run-instances --key mykey --user-data-file myscript.sh ami-a07d95c9
-
Include File
============
@@ -112,44 +145,6 @@ an error occurs reading a file the remaining files will not be read.
Begins with: ``#include`` or ``Content-Type: text/x-include-url`` when using
a MIME archive.
-Cloud Config Data
-=================
-
-Cloud-config is the simplest way to accomplish some things via user-data. Using
-cloud-config syntax, the user can specify certain things in a human friendly
-format.
-
-These things include:
-
-- apt upgrade should be run on first boot
-- a different apt mirror should be used
-- additional apt sources should be added
-- certain SSH keys should be imported
-- *and many more...*
-
-.. note::
- This file must be valid YAML syntax.
-
-See the :ref:`yaml_examples` section for a commented set of examples of
-supported cloud config formats.
-
-Begins with: ``#cloud-config`` or ``Content-Type: text/cloud-config`` when
-using a MIME archive.
-
-.. note::
- New in cloud-init v. 18.4: Cloud config data can also render cloud instance
- metadata variables using jinja templating. See
- :ref:`instance_metadata` for more information.
-
-Upstart Job
-===========
-
-Content is placed into a file in ``/etc/init``, and will be consumed by upstart
-as any other upstart job.
-
-Begins with: ``#upstart-job`` or ``Content-Type: text/upstart-job`` when using
-a MIME archive.
-
Cloud Boothook
==============
@@ -207,13 +202,6 @@ Example
Also this `blog`_ post offers another example for more advanced usage.
-Kernel Command Line
-===================
-
-When using the :ref:`datasource_nocloud` datasource, users can pass user data
-via the kernel command line parameters. See the :ref:`datasource_nocloud`
-datasource documentation for more details.
-
Disabling User-Data
===================
@@ -224,5 +212,3 @@ cloud-init from processing user-data.
.. [#] See your cloud provider for applicable user-data size limitations...
.. _blog: http://foss-boss.blogspot.com/2011/01/advanced-cloud-init-custom-handlers.html
-
-.. vi: textwidth=79
diff --git a/doc/rtd/topics/instancedata.rst b/doc/rtd/topics/instancedata.rst
index f08ead69..0c44d04e 100644
--- a/doc/rtd/topics/instancedata.rst
+++ b/doc/rtd/topics/instancedata.rst
@@ -263,7 +263,6 @@ EC2 instance:
"[handler_cloudLogHandler]\nclass=handlers.SysLogHandler\nlevel=DEBUG\nformatter=simpleFormatter\nargs=(\"/dev/log\", handlers.SysLogHandler.LOG_USER)\n"
],
"cloud_config_modules": [
- "emit_upstart",
"snap",
"ssh-import-id",
"locale",
diff --git a/doc/rtd/topics/integration_tests.rst b/doc/rtd/topics/integration_tests.rst
index f9f719da..2bee6a5c 100644
--- a/doc/rtd/topics/integration_tests.rst
+++ b/doc/rtd/topics/integration_tests.rst
@@ -22,7 +22,7 @@ marks can be used to limit tests to particular platforms. The
client fixture can be used to interact with the launched
test instance.
-See :ref:`Examples` section for examples.
+See `Examples`_ section for examples.
Test Execution
==============
@@ -54,8 +54,8 @@ or by providing an environment variable of the same name prepended with
CLOUD_INIT_PLATFORM='ec2' pytest tests/integration_tests/
-Cloud Interation
-================
+Cloud Interaction
+=================
Cloud interaction happens via the
`pycloudlib <https://pycloudlib.readthedocs.io/en/latest/index.html>`_ library.
In order to run integration tests, pycloudlib must first be
@@ -74,7 +74,7 @@ Image Selection
Each integration testing run uses a single image as its basis. This
image is configured using the ``OS_IMAGE`` variable; see
-:ref:`Configuration` for details of how configuration works.
+`Configuration`_ for details of how configuration works.
``OS_IMAGE`` can take two types of value: an Ubuntu series name (e.g.
"focal"), or an image specification. If an Ubuntu series name is
@@ -139,13 +139,13 @@ Integration tests rely heavily on fixtures to do initial test setup.
One or more of these fixtures will be used in almost every integration test.
Details such as the cloud platform or initial image to use are determined
-via what is specified in the :ref:`Configuration`.
+via what is specified in the `Configuration`_.
client
------
The ``client`` fixture should be used for most test cases. It ensures:
-- All setup performed by :ref:`session_cloud` and :ref:`setup_image`
+- All setup performed by `session_cloud`_ and `setup_image`_
- `Pytest marks <https://github.com/canonical/cloud-init/blob/af7eb1deab12c7208853c5d18b55228e0ba29c4d/tests/integration_tests/conftest.py#L220-L224>`_
used during instance creation are obtained and applied
- The test instance is launched
diff --git a/doc/rtd/topics/logging.rst b/doc/rtd/topics/logging.rst
index a14fb685..744e9bd4 100644
--- a/doc/rtd/topics/logging.rst
+++ b/doc/rtd/topics/logging.rst
@@ -168,7 +168,8 @@ With defaults used::
log_serv: "10.0.4.1"
-For more information on rsyslog configuration, see :ref:`cc_rsyslog`.
+For more information on rsyslog configuration, see
+:ref:`topics/modules:rsyslog`.
.. _python logging config: https://docs.python.org/3/library/logging.config.html#configuration-file-format
.. _python logging handlers: https://docs.python.org/3/library/logging.handlers.html
diff --git a/doc/rtd/topics/module_creation.rst b/doc/rtd/topics/module_creation.rst
new file mode 100644
index 00000000..b09cd2cc
--- /dev/null
+++ b/doc/rtd/topics/module_creation.rst
@@ -0,0 +1,118 @@
+.. _module_creation:
+
+Module Creation
+***************
+
+Much of cloud-init functionality is provided by :ref:`modules<modules>`.
+All modules follow a similar layout in order to provide consistent execution
+and documentation. Use the example provided here to create a new module.
+
+Example
+=======
+.. code-block:: python
+
+ # This file is part of cloud-init. See LICENSE file for license information.
+ """Example Module: Shows how to create a module"""
+
+ from logging import Logger
+
+ from cloudinit.cloud import Cloud
+ from cloudinit.config.schema import MetaSchema, get_meta_doc
+ from cloudinit.distros import ALL_DISTROS
+ from cloudinit.settings import PER_INSTANCE
+
+ MODULE_DESCRIPTION = """\
+ Description that will be used in module documentation.
+
+ This will likely take multiple lines.
+ """
+
+ meta: MetaSchema = {
+ "id": "cc_example",
+ "name": "Example Module",
+ "title": "Shows how to create a module",
+ "description": MODULE_DESCRIPTION,
+ "distros": [ALL_DISTROS],
+ "frequency": PER_INSTANCE,
+ "examples": [
+ "example_key: example_value",
+ "example_other_key: ['value', 2]",
+ ],
+ }
+
+ __doc__ = get_meta_doc(meta)
+
+
+ def handle(name: str, cfg: dict, cloud: Cloud, log: Logger, args: list):
+ log.debug(f"Hi from module {name}")
+
+
+Guidelines
+==========
+
+* Create a new module in the ``cloudinit/config`` directory with a `cc_`
+ prefix.
+* Your module must include a ``handle`` function. The arguments are:
+
+ * ``name``: The module name specified in the configuration
+ * ``cfg``: A configuration object that is the result of the merging of
+ cloud-config configuration with any datasource provided configuration.
+ * ``cloud``: A cloud object that can be used to access various datasource
+ and paths for the given distro and data provided by the various datasource
+ instance types.
+ * ``log``: A logger object that can be used to log messages.
+ * ``args``: An argument list. This is usually empty and is only populated
+ if the module is called independently from the command line.
+
+* If your module introduces any new cloud-config keys, you must provide a
+ schema definition in `cloud-init-schema.json`_.
+* The ``meta`` variable must exist and be of type `MetaSchema`_.
+
+ * ``id``: The module id. In most cases this will be the filename without
+ the `.py` extension.
+ * ``distros``: Defines the list of supported distros. It can contain
+ any of the values (not keys) defined in the `OSFAMILIES`_ map or
+ ``[ALL_DISTROS]`` if there is no distro restriction.
+ * ``frequency``: Defines how often module runs. It must be one of:
+
+ * ``PER_ALWAYS``: Runs on every boot.
+ * ``ONCE``: Runs only on first boot.
+ * ``PER_INSTANCE``: Runs once per instance. When exactly this happens
+ is dependent on the datasource but may triggered anytime there
+ would be a significant change to the instance metadata. An example
+ could be an instance being moved to a different subnet.
+
+ * ``examples``: Lists examples of any cloud-config keys this module reacts
+ to. These examples will be rendered in the module reference documentation
+ and will automatically be tested against the defined schema
+ during testing.
+
+* ``__doc__ = get_meta_doc(meta)`` is necessary to provide proper module
+ documentation.
+
+Module Execution
+================
+
+In order for a module to be run, it must be defined in a module run section in
+``/etc/cloud/cloud.cfg`` or ``/etc/cloud/cloud.cfg.d`` on the launched
+instance. The three module sections are
+`cloud_init_modules`_, `cloud_config_modules`_, and `cloud_final_modules`_,
+corresponding to the :ref:`topics/boot:Network`, :ref:`topics/boot:Config`,
+and :ref:`topics/boot:Final` boot stages respectively.
+
+Add your module to `cloud.cfg.tmpl`_ under the appropriate module section.
+Each module gets run in the order listed, so ensure your module is defined
+in the correct location based on dependencies. If your module has no particular
+dependencies or is not necessary for a later boot stage, it should be placed
+in the ``cloud_final_modules`` section before the ``final-message`` module.
+
+
+
+.. _MetaSchema: https://github.com/canonical/cloud-init/blob/3bcffacb216d683241cf955e4f7f3e89431c1491/cloudinit/config/schema.py#L58
+.. _OSFAMILIES: https://github.com/canonical/cloud-init/blob/3bcffacb216d683241cf955e4f7f3e89431c1491/cloudinit/distros/__init__.py#L35
+.. _settings.py: https://github.com/canonical/cloud-init/blob/3bcffacb216d683241cf955e4f7f3e89431c1491/cloudinit/settings.py#L66
+.. _cloud-init-schema.json: https://github.com/canonical/cloud-init/blob/main/cloudinit/config/cloud-init-schema.json
+.. _cloud.cfg.tmpl: https://github.com/canonical/cloud-init/blob/main/config/cloud.cfg.tmpl
+.. _cloud_init_modules: https://github.com/canonical/cloud-init/blob/b4746b6aed7660510071395e70b2d6233fbdc3ab/config/cloud.cfg.tmpl#L70
+.. _cloud_config_modules: https://github.com/canonical/cloud-init/blob/b4746b6aed7660510071395e70b2d6233fbdc3ab/config/cloud.cfg.tmpl#L101
+.. _cloud_final_modules: https://github.com/canonical/cloud-init/blob/b4746b6aed7660510071395e70b2d6233fbdc3ab/config/cloud.cfg.tmpl#L144
diff --git a/doc/rtd/topics/modules.rst b/doc/rtd/topics/modules.rst
index 093cee61..4bfb27cf 100644
--- a/doc/rtd/topics/modules.rst
+++ b/doc/rtd/topics/modules.rst
@@ -1,9 +1,8 @@
.. _modules:
-*******
-Modules
-*******
+Module Reference
+****************
.. contents:: Table of Contents
.. automodule:: cloudinit.config.cc_apk_configure
@@ -16,10 +15,8 @@ Modules
.. automodule:: cloudinit.config.cc_debug
.. automodule:: cloudinit.config.cc_disable_ec2_metadata
.. automodule:: cloudinit.config.cc_disk_setup
-.. automodule:: cloudinit.config.cc_emit_upstart
.. automodule:: cloudinit.config.cc_fan
.. automodule:: cloudinit.config.cc_final_message
-.. automodule:: cloudinit.config.cc_foo
.. automodule:: cloudinit.config.cc_growpart
.. automodule:: cloudinit.config.cc_grub_dpkg
.. automodule:: cloudinit.config.cc_install_hotplug
@@ -64,4 +61,5 @@ Modules
.. automodule:: cloudinit.config.cc_users_groups
.. automodule:: cloudinit.config.cc_write_files
.. automodule:: cloudinit.config.cc_yum_add_repo
+.. automodule:: cloudinit.config.cc_zypper_add_repo
.. vi: textwidth=79
diff --git a/doc/rtd/topics/security.rst b/doc/rtd/topics/security.rst
index 48fcb0a5..ad934ded 100644
--- a/doc/rtd/topics/security.rst
+++ b/doc/rtd/topics/security.rst
@@ -1,5 +1,6 @@
+********
+Security
+********
.. _security:
.. mdinclude:: ../../../SECURITY.md
-
-.. vi: textwidth=79
diff --git a/doc/rtd/topics/testing.rst b/doc/rtd/topics/testing.rst
index 5543c6f5..4bcbba5b 100644
--- a/doc/rtd/topics/testing.rst
+++ b/doc/rtd/topics/testing.rst
@@ -1,3 +1,5 @@
+.. _testing:
+
*******
Testing
*******
diff --git a/doc/rtd/topics/tutorial.rst b/doc/rtd/topics/tutorial.rst
new file mode 100644
index 00000000..e8bed272
--- /dev/null
+++ b/doc/rtd/topics/tutorial.rst
@@ -0,0 +1,141 @@
+.. _lxd_tutorial:
+
+Tutorial
+********
+
+In this tutorial, we will create our first cloud-init user data script
+and deploy it into an LXD container. We'll be using LXD_ for this tutorial
+because it provides first class support for cloud-init user data as well as
+systemd support. Because it is container based, it allows for quick
+testing and iterating on our user data definition.
+
+Setup LXD
+=========
+
+Skip this section if you already have LXD_ setup.
+
+Install LXD
+-----------
+
+.. code-block:: shell-session
+
+ $ sudo snap install lxd
+
+If you don't have snap, you can install LXD using one of the
+`other installation options`_.
+
+Initialize LXD
+--------------
+
+.. code-block:: shell-session
+
+ $ lxd init --minimal
+
+The minimal configuration should work fine for our purposes. It can always
+be changed at a later time if needed.
+
+Define our user data
+====================
+
+Now that LXD is setup, we can define our user data. Create the
+following file on your local filesystem at ``/tmp/my-user-data``:
+
+.. code-block:: yaml
+
+ #cloud-config
+ runcmd:
+ - echo 'Hello, World!' > /var/tmp/hello-world.txt
+
+Here we are defining our cloud-init user data in the
+:ref:`cloud-config<topics/format:Cloud Config Data>` format, using the
+`runcmd`_ module to define a command to run. When applied, it
+should write ``Hello, World!`` to ``/var/tmp/hello-world.txt``.
+
+Launch a container with our user data
+=====================================
+
+Now that we have LXD setup and our user data defined, we can launch an
+instance with our user data:
+
+.. code-block:: shell-session
+
+ $ lxc launch ubuntu:focal my-test --config=user.user-data="$(cat /tmp/my-user-data)"
+
+Verify that cloud-init ran successfully
+=======================================
+
+After launching the container, we should be able to connect
+to our instance using
+
+.. code-block:: shell-session
+
+ $ lxc shell my-test
+
+You should now be in a shell inside the LXD instance.
+Before validating the user data, let's wait for cloud-init to complete
+successfully:
+
+.. code-block:: shell-session
+
+ $ cloud-init status --wait
+ .....
+ cloud-init status: done
+ $
+
+We can now verify that cloud-init received the expected user data:
+
+.. code-block:: shell-session
+
+ $ cloud-init query userdata
+ #cloud-config
+ runcmd:
+ - echo 'Hello, World!' > /var/tmp/hello-world.txt
+
+We can also assert the user data we provided is a valid cloud-config:
+
+.. code-block:: shell-session
+
+ $ cloud-init schema --system --annotate
+ Valid cloud-config: system userdata
+ $
+
+Finally, verify that our user data was applied successfully:
+
+.. code-block:: shell-session
+
+ $ cat /var/tmp/hello-world.txt
+ Hello, World!
+ $
+
+We can see that cloud-init has consumed our user data successfully!
+
+Tear down
+=========
+
+Exit the container shell (i.e., using ``exit`` or ctrl-d). Once we have
+exited the container, we can stop the container using:
+
+.. code-block:: shell-session
+
+ $ lxc stop my-test
+
+and we can remove the container using:
+
+.. code-block:: shell-session
+
+ $ lxc rm my-test
+
+What's next?
+============
+
+In this tutorial, we used the runcmd_ module to execute a shell command.
+The full list of modules available can be found in
+:ref:`modules documentation<modules>`.
+Each module contains examples of how to use it.
+
+You can also head over to the :ref:`examples<yaml_examples>` page for
+examples of more common use cases.
+
+.. _LXD: https://linuxcontainers.org/lxd/
+.. _other installation options: https://linuxcontainers.org/lxd/getting-started-cli/#other-installation-options
+.. _runcmd: https://cloudinit.readthedocs.io/en/latest/topics/modules.html#runcmd
diff --git a/doc/status.txt b/doc/status.txt
index 60993216..374997b8 100644
--- a/doc/status.txt
+++ b/doc/status.txt
@@ -46,8 +46,8 @@ Thus, to determine if cloud-init is finished:
if os.path.exists(fin):
ret = json.load(open(fin, "r"))
if len(ret['v1']['errors']):
- print "Finished with errors:" + "\n".join(ret['v1']['errors'])
+ print("Finished with errors:" + "\n".join(ret['v1']['errors']))
else:
- print "Finished no errors"
+ print("Finished no errors")
else:
- print "Not Finished"
+ print("Not Finished")
diff --git a/doc/userdata.txt b/doc/userdata.txt
index 355966a8..e44a4952 100644
--- a/doc/userdata.txt
+++ b/doc/userdata.txt
@@ -52,12 +52,6 @@ finds. However, certain types of user-data are handled specially.
This content is "cloud-config" data. See the examples for a
commented example of supported config formats.
- * Upstart Job
- begins with #upstart-job or Content-Type: text/upstart-job
-
- Content is placed into a file in /etc/init, and will be consumed
- by upstart as any other upstart job.
-
* Cloud Boothook
begins with #cloud-boothook or Content-Type: text/cloud-boothook
diff --git a/integration-requirements.txt b/integration-requirements.txt
index 8329eeec..102553cb 100644
--- a/integration-requirements.txt
+++ b/integration-requirements.txt
@@ -1,5 +1,5 @@
# PyPI requirements for cloud-init integration testing
# https://cloudinit.readthedocs.io/en/latest/topics/integration_tests.html
#
-pycloudlib @ git+https://github.com/canonical/pycloudlib.git@44206bb95c49901d994c9eb772eba07f2a1b6661
+pycloudlib @ git+https://github.com/canonical/pycloudlib.git@675dffdc14224a03f8f0ba7212ecb3ca2a8a7083
pytest
diff --git a/packages/debian/control.in b/packages/debian/control.in
index 72895b47..5bb915a9 100644
--- a/packages/debian/control.in
+++ b/packages/debian/control.in
@@ -14,6 +14,7 @@ Depends: ${misc:Depends},
iproute2,
isc-dhcp-client
Recommends: eatmydata, sudo, software-properties-common, gdisk
+Suggests: ssh-import-id, openssh-server
Description: Init scripts for cloud instances
Cloud instances need special scripts to run during initialisation
to retrieve and install ssh keys and to let the user run various scripts.
diff --git a/packages/pkg-deps.json b/packages/pkg-deps.json
index eaf13469..36e6b38f 100644
--- a/packages/pkg-deps.json
+++ b/packages/pkg-deps.json
@@ -52,7 +52,8 @@
"procps",
"rsyslog",
"shadow-utils",
- "sudo"
+ "sudo",
+ "hostname"
]
},
"suse" : {
diff --git a/pyproject.toml b/pyproject.toml
index 324d6f35..1aac03a8 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -42,7 +42,6 @@ exclude=[
'^cloudinit/sources/DataSourceVMware\.py$',
'^cloudinit/sources/__init__\.py$',
'^cloudinit/sources/helpers/vmware/imc/config_file\.py$',
- '^cloudinit/stages\.py$',
'^cloudinit/templater\.py$',
'^cloudinit/url_helper\.py$',
'^conftest\.py$',
@@ -56,12 +55,9 @@ exclude=[
'^tests/integration_tests/modules/test_growpart\.py$',
'^tests/integration_tests/modules/test_ssh_keysfile\.py$',
'^tests/unittests/__init__\.py$',
- '^tests/unittests/cmd/devel/test_render\.py$',
'^tests/unittests/cmd/test_clean\.py$',
'^tests/unittests/cmd/test_cloud_id\.py$',
'^tests/unittests/cmd/test_main\.py$',
- '^tests/unittests/cmd/test_query\.py$',
- '^tests/unittests/cmd/test_status\.py$',
'^tests/unittests/config/test_cc_chef\.py$',
'^tests/unittests/config/test_cc_landscape\.py$',
'^tests/unittests/config/test_cc_locale\.py$',
@@ -77,7 +73,6 @@ exclude=[
'^tests/unittests/net/test_dhcp\.py$',
'^tests/unittests/net/test_init\.py$',
'^tests/unittests/sources/test_aliyun\.py$',
- '^tests/unittests/sources/test_azure\.py$',
'^tests/unittests/sources/test_ec2\.py$',
'^tests/unittests/sources/test_exoscale\.py$',
'^tests/unittests/sources/test_gce\.py$',
@@ -97,6 +92,9 @@ exclude=[
'^tests/unittests/test_subp\.py$',
'^tests/unittests/test_templating\.py$',
'^tests/unittests/test_url_helper\.py$',
- '^tests/unittests/test_util\.py$',
'^tools/mock-meta\.py$',
]
+
+[[tool.mypy.overrides]]
+module = [ "httpretty", "pycloudlib.*" ]
+ignore_missing_imports = true
diff --git a/setup.py b/setup.py
index a9132d2c..7ba0ee8c 100755..100644
--- a/setup.py
+++ b/setup.py
@@ -21,10 +21,12 @@ import setuptools
from setuptools.command.egg_info import egg_info
from setuptools.command.install import install
+# pylint: disable=W0402
try:
from setuptools.errors import DistutilsError
except ImportError:
from distutils.errors import DistutilsArgError as DistutilsError
+# pylint: enable=W0402
RENDERED_TMPD_PREFIX = "RENDERED_TEMPD"
VARIANT = None
@@ -113,11 +115,13 @@ def render_tmpl(template, mode=None):
VARIANT,
template,
fpath,
- ]
+ ],
+ check=True,
)
else:
subprocess.run(
- [sys.executable, "./tools/render-cloudcfg", template, fpath]
+ [sys.executable, "./tools/render-cloudcfg", template, fpath],
+ check=True,
)
if mode:
os.chmod(fpath, mode)
@@ -154,7 +158,6 @@ INITSYS_FILES = {
for f in glob("systemd/*")
if is_f(f) and is_generator(f)
],
- "upstart": [f for f in glob("upstart/*") if is_f(f)],
}
INITSYS_ROOTS = {
"sysvinit": "etc/rc.d/init.d",
@@ -167,7 +170,6 @@ INITSYS_ROOTS = {
"systemd.generators": pkg_config_read(
"systemd", "systemdsystemgeneratordir"
),
- "upstart": "etc/init/",
}
INITSYS_TYPES = sorted([f.partition(".")[0] for f in INITSYS_ROOTS.keys()])
@@ -202,7 +204,7 @@ class MyEggInfo(egg_info):
"""This makes sure to not include the rendered files in SOURCES.txt."""
def find_sources(self):
- ret = egg_info.find_sources(self)
+ egg_info.find_sources(self)
# update the self.filelist.
self.filelist.exclude_pattern(
RENDERED_TMPD_PREFIX + ".*", is_regex=True
@@ -216,7 +218,6 @@ class MyEggInfo(egg_info):
]
with open(mfname, "w") as fp:
fp.write("".join(files))
- return ret
# TODO: Is there a better way to do this??
diff --git a/systemd/cloud-config.service.tmpl b/systemd/cloud-config.service.tmpl
index 9d928ca2..d5568a6e 100644
--- a/systemd/cloud-config.service.tmpl
+++ b/systemd/cloud-config.service.tmpl
@@ -4,6 +4,10 @@ Description=Apply the settings specified in cloud-config
After=network-online.target cloud-config.target
After=snapd.seeded.service
Wants=network-online.target cloud-config.target
+{% if variant == "rhel" %}
+ConditionPathExists=!/etc/cloud/cloud-init.disabled
+ConditionKernelCommandLine=!cloud-init=disabled
+{% endif %}
[Service]
Type=oneshot
diff --git a/systemd/cloud-config.target b/systemd/cloud-config.target
index ae9b7d02..2d65e343 100644
--- a/systemd/cloud-config.target
+++ b/systemd/cloud-config.target
@@ -1,9 +1,16 @@
-# cloud-init normally emits a "cloud-config" upstart event to inform third
-# parties that cloud-config is available, which does us no good when we're
-# using systemd. cloud-config.target serves as this synchronization point
-# instead. Services that would "start on cloud-config" with upstart can
-# instead use "After=cloud-config.target" and "Wants=cloud-config.target"
-# as appropriate.
+# cloud-config.target is the earliest synchronization point in cloud-init's
+# boot stages that indicates the completion of datasource detection (cloud-id)
+# and the presence of any meta-data, vendor-data and user-data.
+
+# Any services started at cloud-config.target will need to be aware that
+# cloud-init configuration modules have not yet completed initial system
+# configuration of cloud-config modules and there may be race conditions with
+# ongoing cloud-init setup, package installs or user-data scripts.
+
+# Services that need to start at this point should use the directives
+# "After=cloud-config.target" and "Wants=cloud-config.target" as appropriate.
+
+# To order services after all cloud-init operations, see cloud-init.target.
[Unit]
Description=Cloud-config availability
diff --git a/systemd/cloud-final.service.tmpl b/systemd/cloud-final.service.tmpl
index 8207b18c..85f423ac 100644
--- a/systemd/cloud-final.service.tmpl
+++ b/systemd/cloud-final.service.tmpl
@@ -7,6 +7,10 @@ After=multi-user.target
Before=apt-daily.service
{% endif %}
Wants=network-online.target cloud-config.service
+{% if variant == "rhel" %}
+ConditionPathExists=!/etc/cloud/cloud-init.disabled
+ConditionKernelCommandLine=!cloud-init=disabled
+{% endif %}
[Service]
@@ -15,7 +19,16 @@ ExecStart=/usr/bin/cloud-init modules --mode=final
RemainAfterExit=yes
TimeoutSec=0
KillMode=process
+{% if variant == "rhel" %}
+# Restart NetworkManager if it is present and running.
+ExecStartPost=/bin/sh -c 'u=NetworkManager.service; \
+ out=$(systemctl show --property=SubState $u) || exit; \
+ [ "$out" = "SubState=running" ] || exit 0; \
+ systemctl reload-or-try-restart $u'
+{% else %}
TasksMax=infinity
+{% endif %}
+
# Output needs to appear in instance console output
StandardOutput=journal+console
diff --git a/systemd/cloud-init-generator.tmpl b/systemd/cloud-init-generator.tmpl
index 74d47428..66893098 100644
--- a/systemd/cloud-init-generator.tmpl
+++ b/systemd/cloud-init-generator.tmpl
@@ -5,28 +5,36 @@ set -f
LOG=""
DEBUG_LEVEL=1
LOG_D="/run/cloud-init"
+LOG_F="/run/cloud-init/cloud-init-generator.log"
ENABLE="enabled"
DISABLE="disabled"
-FOUND="found"
-NOTFOUND="notfound"
RUN_ENABLED_FILE="$LOG_D/$ENABLE"
+RUN_DISABLED_FILE="$LOG_D/$DISABLE"
+CLOUD_TARGET_NAME="cloud-init.target"
+# lxc sets 'container', but lets make that explicitly a global
+CONTAINER="${container}"
+
+# start: template section
{% if variant in ["suse"] %}
CLOUD_SYSTEM_TARGET="/usr/lib/systemd/system/cloud-init.target"
{% else %}
CLOUD_SYSTEM_TARGET="/lib/systemd/system/cloud-init.target"
{% endif %}
-CLOUD_TARGET_NAME="cloud-init.target"
-# lxc sets 'container', but lets make that explicitly a global
-CONTAINER="${container}"
+{% if variant in ["almalinux", "centos", "cloudlinux", "eurolinux", "fedora",
+ "miraclelinux", "openEuler", "rhel", "rocky", "virtuozzo"] %}
+ dsidentify="/usr/libexec/cloud-init/ds-identify"
+{% else %}
+ dsidentify="/usr/lib/cloud-init/ds-identify"
+{% endif %}
+# end: template section
debug() {
local lvl="$1"
shift
[ "$lvl" -gt "$DEBUG_LEVEL" ] && return
if [ -z "$LOG" ]; then
- local log="$LOG_D/${0##*/}.log"
{ [ -d "$LOG_D" ] || mkdir -p "$LOG_D"; } &&
- { : > "$log"; } >/dev/null 2>&1 && LOG="$log" ||
+ { : > "$LOG_F"; } >/dev/null 2>&1 && LOG="$LOG_F" ||
LOG="/dev/kmsg"
fi
echo "$@" >> "$LOG"
@@ -64,9 +72,9 @@ kernel_cmdline() {
# use KERNEL_CMDLINE if present in environment even if empty
cmdline=${KERNEL_CMDLINE}
debug 1 "kernel command line from env KERNEL_CMDLINE: $cmdline"
- elif read_proc_cmdline; then
- read_proc_cmdline && cmdline="$_RET"
- debug 1 "kernel command line ($_RET_MSG): $cmdline"
+ else
+ read_proc_cmdline && cmdline="$_RET" &&
+ debug 1 "kernel command line ($_RET_MSG): $cmdline"
fi
_RET="unset"
cmdline=" $cmdline "
@@ -83,26 +91,16 @@ default() {
check_for_datasource() {
local ds_rc=""
-{% if variant in ["almalinux", "centos", "cloudlinux", "eurolinux", "fedora",
- "miraclelinux", "openEuler", "rhel", "rocky", "virtuozzo"] %}
- local dsidentify="/usr/libexec/cloud-init/ds-identify"
-{% else %}
- local dsidentify="/usr/lib/cloud-init/ds-identify"
-{% endif %}
if [ ! -x "$dsidentify" ]; then
- debug 1 "no ds-identify in $dsidentify. _RET=$FOUND"
+ debug 1 "no ds-identify in $dsidentify"
return 0
fi
$dsidentify
ds_rc=$?
debug 1 "ds-identify rc=$ds_rc"
if [ "$ds_rc" = "0" ]; then
- _RET="$FOUND"
- debug 1 "ds-identify _RET=$_RET"
return 0
fi
- _RET="$NOTFOUND"
- debug 1 "ds-identify _RET=$_RET"
return 1
}
@@ -110,7 +108,7 @@ main() {
local normal_d="$1" early_d="$2" late_d="$3"
local target_name="multi-user.target" gen_d="$early_d"
local link_path="$gen_d/${target_name}.wants/${CLOUD_TARGET_NAME}"
- local ds="$NOTFOUND"
+ local ds=""
debug 1 "$0 normal=$normal_d early=$early_d late=$late_d"
debug 2 "$0 $*"
@@ -133,8 +131,8 @@ main() {
if [ "$result" = "$ENABLE" ]; then
debug 1 "checking for datasource"
check_for_datasource
- ds=$_RET
- if [ "$ds" = "$NOTFOUND" ]; then
+ ds=$?
+ if [ "$ds" = "1" ]; then
debug 1 "cloud-init is enabled but no datasource found, disabling"
result="$DISABLE"
fi
@@ -154,6 +152,10 @@ main() {
"ln $CLOUD_SYSTEM_TARGET $link_path"
fi
fi
+ if [ -e $RUN_DISABLED_FILE ]; then
+ debug 1 "removing $RUN_DISABLED_FILE and creating $RUN_ENABLED_FILE"
+ rm -f $RUN_DISABLED_FILE
+ fi
: > "$RUN_ENABLED_FILE"
elif [ "$result" = "$DISABLE" ]; then
if [ -f "$link_path" ]; then
@@ -167,8 +169,10 @@ main() {
debug 1 "already disabled: no change needed [no $link_path]"
fi
if [ -e "$RUN_ENABLED_FILE" ]; then
+ debug 1 "removing $RUN_ENABLED_FILE and creating $RUN_DISABLED_FILE"
rm -f "$RUN_ENABLED_FILE"
fi
+ : > "$RUN_DISABLED_FILE"
else
debug 0 "unexpected result '$result' 'ds=$ds'"
ret=3
diff --git a/systemd/cloud-init-hotplugd.service b/systemd/cloud-init-hotplugd.service
index b64632ef..10962d54 100644
--- a/systemd/cloud-init-hotplugd.service
+++ b/systemd/cloud-init-hotplugd.service
@@ -12,6 +12,7 @@
[Unit]
Description=cloud-init hotplug hook daemon
After=cloud-init-hotplugd.socket
+Requires=cloud-init-hotplugd.socket
[Service]
Type=simple
diff --git a/systemd/cloud-init-local.service.tmpl b/systemd/cloud-init-local.service.tmpl
index 7166f640..6f3f9d8d 100644
--- a/systemd/cloud-init-local.service.tmpl
+++ b/systemd/cloud-init-local.service.tmpl
@@ -1,25 +1,44 @@
## template:jinja
[Unit]
Description=Initial cloud-init job (pre-networking)
-{% if variant in ["ubuntu", "unknown", "debian"] %}
+{% if variant in ["ubuntu", "unknown", "debian", "rhel" ] %}
DefaultDependencies=no
{% endif %}
Wants=network-pre.target
After=hv_kvp_daemon.service
After=systemd-remount-fs.service
+{% if variant == "rhel" %}
+Requires=dbus.socket
+After=dbus.socket
+{% endif %}
Before=NetworkManager.service
+{% if variant == "rhel" %}
+Before=network.service
+{% endif %}
Before=network-pre.target
Before=shutdown.target
+{% if variant == "rhel" %}
+Before=firewalld.target
+Conflicts=shutdown.target
+{% endif %}
{% if variant in ["ubuntu", "unknown", "debian"] %}
Before=sysinit.target
Conflicts=shutdown.target
{% endif %}
RequiresMountsFor=/var/lib/cloud
+{% if variant == "rhel" %}
+ConditionPathExists=!/etc/cloud/cloud-init.disabled
+ConditionKernelCommandLine=!cloud-init=disabled
+{% endif %}
[Service]
Type=oneshot
+{% if variant == "rhel" %}
+ExecStartPre=/bin/mkdir -p /run/cloud-init
+ExecStartPre=/sbin/restorecon /run/cloud-init
+ExecStartPre=/usr/bin/touch /run/cloud-init/enabled
+{% endif %}
ExecStart=/usr/bin/cloud-init init --local
-ExecStart=/bin/touch /run/cloud-init/network-config-ready
RemainAfterExit=yes
TimeoutSec=0
diff --git a/systemd/cloud-init.service.tmpl b/systemd/cloud-init.service.tmpl
index e71e5679..c170aef7 100644
--- a/systemd/cloud-init.service.tmpl
+++ b/systemd/cloud-init.service.tmpl
@@ -1,7 +1,7 @@
## template:jinja
[Unit]
Description=Initial cloud-init job (metadata service crawler)
-{% if variant not in ["photon"] %}
+{% if variant not in ["photon", "rhel"] %}
DefaultDependencies=no
{% endif %}
Wants=cloud-init-local.service
@@ -36,6 +36,10 @@ Before=shutdown.target
Conflicts=shutdown.target
{% endif %}
Before=systemd-user-sessions.service
+{% if variant == "rhel" %}
+ConditionPathExists=!/etc/cloud/cloud-init.disabled
+ConditionKernelCommandLine=!cloud-init=disabled
+{% endif %}
[Service]
Type=oneshot
diff --git a/systemd/cloud-init.target b/systemd/cloud-init.target
index 083c3b6f..760dfee5 100644
--- a/systemd/cloud-init.target
+++ b/systemd/cloud-init.target
@@ -1,7 +1,12 @@
-# cloud-init target is enabled by cloud-init-generator
+# cloud-init.target is enabled by cloud-init-generator
# To disable it you can either:
# a.) boot with kernel cmdline of 'cloud-init=disabled'
# b.) touch a file /etc/cloud/cloud-init.disabled
+#
+# cloud-init.target is a synchronization point when all cloud-init's initial
+# system configuration tasks have completed. To order a service after cloud-init
+# is done, add the directives as applicable:
+# After=cloud-init.target and Wants=cloud-init.target
[Unit]
Description=Cloud-init target
After=multi-user.target
diff --git a/templates/hosts.arch.tmpl b/templates/hosts.arch.tmpl
new file mode 100644
index 00000000..b80ba61e
--- /dev/null
+++ b/templates/hosts.arch.tmpl
@@ -0,0 +1,23 @@
+## template:jinja
+{#
+This file (/etc/cloud/templates/hosts.arch.tmpl) is only utilized
+if enabled in cloud-config. Specifically, in order to enable it
+you need to add the following to config:
+ manage_etc_hosts: True
+-#}
+# Your system has configured 'manage_etc_hosts' as True.
+# As a result, if you wish for changes to this file to persist
+# then you will need to either
+# a.) make changes to the master file in /etc/cloud/templates/hosts.arch.tmpl
+# b.) change or remove the value of 'manage_etc_hosts' in
+# /etc/cloud/cloud.cfg or cloud-config from user-data
+#
+{# The value '{{hostname}}' will be replaced with the local-hostname -#}
+127.0.1.1 {{fqdn}} {{hostname}}
+127.0.0.1 localhost
+
+# The following lines are desirable for IPv6 capable hosts
+::1 localhost ip6-localhost ip6-loopback
+ff02::1 ip6-allnodes
+ff02::2 ip6-allrouters
+
diff --git a/test-requirements.txt b/test-requirements.txt
index 06dfbbec..df4ad0ff 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -2,7 +2,9 @@
httpretty>=0.7.1
pytest
pytest-cov
+pytest-mock
# Only really needed on older versions of python
setuptools
jsonschema
+responses
diff --git a/tests/integration_tests/bugs/test_gh570.py b/tests/integration_tests/bugs/test_gh570.py
deleted file mode 100644
index e98ab5d0..00000000
--- a/tests/integration_tests/bugs/test_gh570.py
+++ /dev/null
@@ -1,39 +0,0 @@
-"""Integration test for #570.
-
-Test that we can add optional vendor-data to the seedfrom file in a
-NoCloud environment
-"""
-
-import pytest
-
-from tests.integration_tests.instances import IntegrationInstance
-
-VENDOR_DATA = """\
-#cloud-config
-runcmd:
- - touch /var/tmp/seeded_vendordata_test_file
-"""
-
-
-# Only running on LXD because we need NoCloud for this test
-@pytest.mark.lxd_container
-@pytest.mark.lxd_vm
-def test_nocloud_seedfrom_vendordata(client: IntegrationInstance):
- seed_dir = "/var/tmp/test_seed_dir"
- result = client.execute(
- "mkdir {seed_dir} && "
- "touch {seed_dir}/user-data && "
- "touch {seed_dir}/meta-data && "
- "echo 'seedfrom: {seed_dir}/' > "
- "/var/lib/cloud/seed/nocloud-net/meta-data".format(seed_dir=seed_dir)
- )
- assert result.return_code == 0
-
- client.write_to_file(
- "{}/vendor-data".format(seed_dir),
- VENDOR_DATA,
- )
- client.execute("cloud-init clean --logs")
- client.restart()
- assert client.execute("cloud-init status").ok
- assert "seeded_vendordata_test_file" in client.execute("ls /var/tmp")
diff --git a/tests/integration_tests/bugs/test_gh632.py b/tests/integration_tests/bugs/test_gh632.py
index c7a897c6..d83d244b 100644
--- a/tests/integration_tests/bugs/test_gh632.py
+++ b/tests/integration_tests/bugs/test_gh632.py
@@ -15,7 +15,7 @@ from tests.integration_tests.util import verify_clean_log
def test_datasource_rbx_no_stacktrace(client: IntegrationInstance):
client.write_to_file(
"/etc/cloud/cloud.cfg.d/90_dpkg.cfg",
- "datasource_list: [ RbxCloud, NoCloud ]\n",
+ "datasource_list: [ RbxCloud, NoCloud, LXD ]\n",
)
client.write_to_file(
"/etc/cloud/ds-identify.cfg",
diff --git a/tests/integration_tests/clouds.py b/tests/integration_tests/clouds.py
index 14eaf9c3..0e2e1deb 100644
--- a/tests/integration_tests/clouds.py
+++ b/tests/integration_tests/clouds.py
@@ -5,6 +5,7 @@ import os.path
import random
import string
from abc import ABC, abstractmethod
+from copy import deepcopy
from typing import Optional, Type
from uuid import UUID
@@ -206,9 +207,18 @@ class Ec2Cloud(IntegrationCloud):
def _perform_launch(self, launch_kwargs, **kwargs):
"""Use a dual-stack VPC for cloud-init integration testing."""
- launch_kwargs["vpc"] = self.cloud_instance.get_or_create_vpc(
- name="ec2-cloud-init-integration"
- )
+ if "vpc" not in launch_kwargs:
+ launch_kwargs["vpc"] = self.cloud_instance.get_or_create_vpc(
+ name="ec2-cloud-init-integration"
+ )
+ # Enable IPv6 metadata at http://[fd00:ec2::254]
+ if "Ipv6AddressCount" not in launch_kwargs:
+ launch_kwargs["Ipv6AddressCount"] = 1
+ if "MetadataOptions" not in launch_kwargs:
+ launch_kwargs["MetadataOptions"] = {}
+ if "HttpProtocolIpv6" not in launch_kwargs["MetadataOptions"]:
+ launch_kwargs["MetadataOptions"] = {"HttpProtocolIpv6": "enabled"}
+
pycloudlib_instance = self.cloud_instance.launch(**launch_kwargs)
return pycloudlib_instance
@@ -291,12 +301,15 @@ class _LxdIntegrationCloud(IntegrationCloud):
subp(command.split())
def _perform_launch(self, launch_kwargs, **kwargs):
- launch_kwargs["inst_type"] = launch_kwargs.pop("instance_type", None)
- wait = launch_kwargs.pop("wait", True)
- release = launch_kwargs.pop("image_id")
+ instance_kwargs = deepcopy(launch_kwargs)
+ instance_kwargs["inst_type"] = instance_kwargs.pop(
+ "instance_type", None
+ )
+ wait = instance_kwargs.pop("wait", True)
+ release = instance_kwargs.pop("image_id")
try:
- profile_list = launch_kwargs["profile_list"]
+ profile_list = instance_kwargs["profile_list"]
except KeyError:
profile_list = self._get_or_set_profile_list(release)
@@ -305,10 +318,10 @@ class _LxdIntegrationCloud(IntegrationCloud):
random.choices(string.ascii_lowercase + string.digits, k=8)
)
pycloudlib_instance = self.cloud_instance.init(
- launch_kwargs.pop("name", default_name),
+ instance_kwargs.pop("name", default_name),
release,
profile_list=profile_list,
- **launch_kwargs,
+ **instance_kwargs,
)
if self.settings.CLOUD_INIT_SOURCE == "IN_PLACE":
self._mount_source(pycloudlib_instance)
diff --git a/tests/integration_tests/cmd/test_status.py b/tests/integration_tests/cmd/test_status.py
new file mode 100644
index 00000000..ced883fd
--- /dev/null
+++ b/tests/integration_tests/cmd/test_status.py
@@ -0,0 +1,69 @@
+"""Tests for `cloud-init status`"""
+from time import sleep
+
+import pytest
+
+from tests.integration_tests.clouds import ImageSpecification, IntegrationCloud
+from tests.integration_tests.instances import IntegrationInstance
+
+
+# We're implementing our own here in case cloud-init status --wait
+# isn't working correctly (LP: #1966085)
+def _wait_for_cloud_init(client: IntegrationInstance):
+ last_exception = None
+ for _ in range(30):
+ try:
+ result = client.execute("cloud-init status")
+ if (
+ result
+ and result.ok
+ and ("running" not in result or "not run" not in result)
+ ):
+ return result
+ except Exception as e:
+ last_exception = e
+ sleep(1)
+ raise Exception(
+ "cloud-init status did not return successfully."
+ ) from last_exception
+
+
+def _remove_nocloud_dir_and_reboot(client: IntegrationInstance):
+ # On Impish and below, NoCloud will be detected on an LXD container.
+ # If we remove this directory, it will no longer be detected.
+ client.execute("rm -rf /var/lib/cloud/seed/nocloud-net")
+ client.execute("cloud-init clean --logs --reboot")
+
+
+@pytest.mark.ubuntu
+@pytest.mark.lxd_container
+def test_wait_when_no_datasource(session_cloud: IntegrationCloud, setup_image):
+ """Ensure that when no datasource is found, we get status: disabled
+
+ LP: #1966085
+ """
+ with session_cloud.launch(
+ launch_kwargs={
+ # On Jammy and above, we detect the LXD datasource using a
+ # socket available to the container. This prevents the socket
+ # from being exposed in the container, causing datasource detection
+ # to fail. ds-identify will then have failed to detect a datasource
+ "config_dict": {"security.devlxd": False},
+ "wait": False, # to prevent cloud-init status --wait
+ }
+ ) as client:
+ # We know this will be an LXD instance due to our pytest mark
+ client.instance.execute_via_ssh = False # type: ignore
+ # No ubuntu user if cloud-init didn't run
+ client.instance.username = "root"
+ # Jammy and above will use LXD datasource by default
+ if ImageSpecification.from_os_image().release in [
+ "bionic",
+ "focal",
+ "impish",
+ ]:
+ _remove_nocloud_dir_and_reboot(client)
+ status_out = _wait_for_cloud_init(client).stdout.strip()
+ assert "status: disabled" in status_out
+ assert "Cloud-init disabled by cloud-init-generator" in status_out
+ assert client.execute("cloud-init status --wait").ok
diff --git a/tests/integration_tests/datasources/test_ec2_ipv6.py b/tests/integration_tests/datasources/test_ec2_ipv6.py
new file mode 100644
index 00000000..8cde4dc9
--- /dev/null
+++ b/tests/integration_tests/datasources/test_ec2_ipv6.py
@@ -0,0 +1,43 @@
+import re
+
+import pytest
+
+from tests.integration_tests.instances import IntegrationInstance
+
+
+def _test_crawl(client, ip):
+ assert client.execute("cloud-init clean --logs").ok
+ assert client.execute("cloud-init init --local").ok
+ log = client.read_from_file("/var/log/cloud-init.log")
+ assert f"Using metadata source: '{ip}'" in log
+ result = re.findall(
+ r"Crawl of metadata service took (\d+.\d+) seconds", log
+ )
+ if len(result) != 1:
+ pytest.fail(f"Expected 1 metadata crawl time, got {result}")
+ # 20 would still be a crazy long time for metadata service to crawl,
+ # but it's short enough to know we're not waiting for a response
+ assert float(result[0]) < 20
+
+
+@pytest.mark.ec2
+def test_dual_stack(client: IntegrationInstance):
+ # Drop IPv4 responses
+ assert client.execute("iptables -I INPUT -s 169.254.169.254 -j DROP").ok
+ _test_crawl(client, "http://[fd00:ec2::254]")
+
+ # Block IPv4 requests
+ assert client.execute("iptables -I OUTPUT -d 169.254.169.254 -j REJECT").ok
+ _test_crawl(client, "http://[fd00:ec2::254]")
+
+ # Re-enable IPv4
+ assert client.execute("iptables -D OUTPUT -d 169.254.169.254 -j REJECT").ok
+ assert client.execute("iptables -D INPUT -s 169.254.169.254 -j DROP").ok
+
+ # Drop IPv6 responses
+ assert client.execute("ip6tables -I INPUT -s fd00:ec2::254 -j DROP").ok
+ _test_crawl(client, "http://169.254.169.254")
+
+ # Block IPv6 requests
+ assert client.execute("ip6tables -I OUTPUT -d fd00:ec2::254 -j REJECT").ok
+ _test_crawl(client, "http://169.254.169.254")
diff --git a/tests/integration_tests/datasources/test_lxd_discovery.py b/tests/integration_tests/datasources/test_lxd_discovery.py
index eb2a4cf2..f72b1b4b 100644
--- a/tests/integration_tests/datasources/test_lxd_discovery.py
+++ b/tests/integration_tests/datasources/test_lxd_discovery.py
@@ -9,24 +9,53 @@ from tests.integration_tests.util import verify_clean_log
def _customize_envionment(client: IntegrationInstance):
+ # Assert our platform can detect LXD during systemd generator timeframe.
+ ds_id_log = client.execute("cat /run/cloud-init/ds-identify.log").stdout
+ assert "check for 'LXD' returned found" in ds_id_log
+
+ if client.settings.PLATFORM == "lxd_vm":
+ # ds-identify runs at systemd generator time before /dev/lxd/sock.
+ # Assert we can expected artifact which indicates LXD is viable.
+ result = client.execute("cat /sys/class/dmi/id/board_name")
+ if not result.ok:
+ raise AssertionError(
+ "Missing expected /sys/class/dmi/id/board_name"
+ )
+ if "LXD" != result.stdout:
+ raise AssertionError(f"DMI board_name is not LXD: {result.stdout}")
+
+ # Having multiple datasources prevents ds-identify from short-circuiting
+ # detection logic with a log like:
+ # single entry in datasource_list (LXD) use that.
+ # Also, NoCloud is detected during init-local timeframe.
+
+ # If there is a race on VMs where /dev/lxd/sock is not setup in init-local
+ # cloud-init will fallback to NoCloud and fail this test.
client.write_to_file(
- "/etc/cloud/cloud.cfg.d/99-detect-lxd.cfg",
- "datasource_list: [LXD]\n",
+ "/etc/cloud/cloud.cfg.d/99-detect-lxd-first.cfg",
+ "datasource_list: [LXD, NoCloud]\n",
)
+ # This is also to ensure that NoCloud can be detected
+ if ImageSpecification.from_os_image().release == "jammy":
+ # Add nocloud-net seed files because Jammy no longer delivers NoCloud
+ # (LP: #1958460).
+ client.execute("mkdir -p /var/lib/cloud/seed/nocloud-net")
+ client.write_to_file("/var/lib/cloud/seed/nocloud-net/meta-data", "")
+ client.write_to_file(
+ "/var/lib/cloud/seed/nocloud-net/user-data", "#cloud-config\n{}"
+ )
client.execute("cloud-init clean --logs")
client.restart()
-# This test should be able to work on any cloud whose datasource specifies
-# a NETWORK dependency
@pytest.mark.lxd_container
@pytest.mark.lxd_vm
@pytest.mark.ubuntu # Because netplan
def test_lxd_datasource_discovery(client: IntegrationInstance):
"""Test that DataSourceLXD is detected instead of NoCloud."""
+
_customize_envionment(client)
- nic_dev = "enp5s0" if client.settings.PLATFORM == "lxd_vm" else "eth0"
- result = client.execute("cloud-init status --long")
+ result = client.execute("cloud-init status --wait --long")
if not result.ok:
raise AssertionError("cloud-init failed:\n%s", result.stderr)
if "DataSourceLXD" not in result.stdout:
@@ -35,6 +64,9 @@ def test_lxd_datasource_discovery(client: IntegrationInstance):
)
netplan_yaml = client.execute("cat /etc/netplan/50-cloud-init.yaml")
netplan_cfg = yaml.safe_load(netplan_yaml)
+
+ platform = client.settings.PLATFORM
+ nic_dev = "eth0" if platform == "lxd_container" else "enp5s0"
assert {
"network": {"ethernets": {nic_dev: {"dhcp4": True}}, "version": 2}
} == netplan_cfg
@@ -76,15 +108,21 @@ def test_lxd_datasource_discovery(client: IntegrationInstance):
yaml.safe_load(ds_cfg["config"]["user.meta-data"])
)
assert "#cloud-config\ninstance-id" in ds_cfg["meta-data"]
- # Assert NoCloud seed data is still present in cloud image metadata
- # This will start failing if we redact metadata templates from
- # https://cloud-images.ubuntu.com/daily/server/jammy/current/\
- # jammy-server-cloudimg-amd64-lxd.tar.xz
- nocloud_metadata = yaml.safe_load(
- client.read_from_file("/var/lib/cloud/seed/nocloud-net/meta-data")
- )
- assert client.instance.name == nocloud_metadata["instance-id"]
- assert (
- nocloud_metadata["instance-id"] == nocloud_metadata["local-hostname"]
- )
- assert v1["public_ssh_keys"][0] == nocloud_metadata["public-keys"]
+
+ # Jammy not longer provides nocloud-net seed files (LP: #1958460)
+ if ImageSpecification.from_os_image().release in [
+ "bionic",
+ "focal",
+ "impish",
+ ]:
+ # Assert NoCloud seed files are still present in non-Jammy images
+ # and that NoCloud seed files provide the same content as LXD socket.
+ nocloud_metadata = yaml.safe_load(
+ client.read_from_file("/var/lib/cloud/seed/nocloud-net/meta-data")
+ )
+ assert client.instance.name == nocloud_metadata["instance-id"]
+ assert (
+ nocloud_metadata["instance-id"]
+ == nocloud_metadata["local-hostname"]
+ )
+ assert v1["public_ssh_keys"][0] == nocloud_metadata["public-keys"]
diff --git a/tests/integration_tests/datasources/test_nocloud.py b/tests/integration_tests/datasources/test_nocloud.py
new file mode 100644
index 00000000..d9410410
--- /dev/null
+++ b/tests/integration_tests/datasources/test_nocloud.py
@@ -0,0 +1,87 @@
+"""NoCloud datasource integration tests."""
+import pytest
+from pycloudlib.lxd.instance import LXDInstance
+
+from cloudinit.subp import subp
+from tests.integration_tests.instances import IntegrationInstance
+
+VENDOR_DATA = """\
+#cloud-config
+runcmd:
+ - touch /var/tmp/seeded_vendordata_test_file
+"""
+
+
+LXD_METADATA_NOCLOUD_SEED = """\
+ /var/lib/cloud/seed/nocloud-net/meta-data:
+ when:
+ - create
+ - copy
+ create_only: false
+ template: emptycfg.tpl
+ properties:
+ default: |
+ #cloud-config
+ {}
+ /var/lib/cloud/seed/nocloud-net/user-data:
+ when:
+ - create
+ - copy
+ create_only: false
+ template: emptycfg.tpl
+ properties:
+ default: |
+ #cloud-config
+ {}
+"""
+
+
+def setup_nocloud(instance: LXDInstance):
+ # On Jammy and above, LXD no longer uses NoCloud, so we need to set
+ # it up manually
+ lxd_image_metadata = subp(
+ ["lxc", "config", "metadata", "show", instance.name]
+ )
+ if "/var/lib/cloud/seed/nocloud-net" in lxd_image_metadata.stdout:
+ return
+ subp(
+ ["lxc", "config", "template", "create", instance.name, "emptycfg.tpl"],
+ )
+ subp(
+ ["lxc", "config", "template", "edit", instance.name, "emptycfg.tpl"],
+ data="#cloud-config\n{}\n",
+ )
+ subp(
+ ["lxc", "config", "metadata", "edit", instance.name],
+ data=f"{lxd_image_metadata.stdout}{LXD_METADATA_NOCLOUD_SEED}",
+ )
+
+
+# Only running on LXD container because we need NoCloud with custom setup
+@pytest.mark.lxd_container
+@pytest.mark.lxd_setup.with_args(setup_nocloud)
+@pytest.mark.lxd_use_exec
+def test_nocloud_seedfrom_vendordata(client: IntegrationInstance):
+ """Integration test for #570.
+
+ Test that we can add optional vendor-data to the seedfrom file in a
+ NoCloud environment
+ """
+ seed_dir = "/var/tmp/test_seed_dir"
+ result = client.execute(
+ "mkdir {seed_dir} && "
+ "touch {seed_dir}/user-data && "
+ "touch {seed_dir}/meta-data && "
+ "echo 'seedfrom: {seed_dir}/' > "
+ "/var/lib/cloud/seed/nocloud-net/meta-data".format(seed_dir=seed_dir)
+ )
+ assert result.return_code == 0
+
+ client.write_to_file(
+ "{}/vendor-data".format(seed_dir),
+ VENDOR_DATA,
+ )
+ client.execute("cloud-init clean --logs")
+ client.restart()
+ assert client.execute("cloud-init status").ok
+ assert "seeded_vendordata_test_file" in client.execute("ls /var/tmp")
diff --git a/tests/integration_tests/decorators.py b/tests/integration_tests/decorators.py
new file mode 100644
index 00000000..1c7e6046
--- /dev/null
+++ b/tests/integration_tests/decorators.py
@@ -0,0 +1,34 @@
+import functools
+import time
+
+
+def retry(*, tries: int = 30, delay: int = 1):
+ """Decorator for retries.
+
+ Retry a function until code no longer raises an exception or
+ max tries is reached.
+
+ Example:
+ @retry(tries=5, delay=1)
+ def try_something_that_may_not_be_ready():
+ ...
+ """
+
+ def _retry(func):
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ last_error = None
+ for _ in range(tries):
+ try:
+ func(*args, **kwargs)
+ break
+ except Exception as e:
+ last_error = e
+ time.sleep(delay)
+ else:
+ if last_error:
+ raise last_error
+
+ return wrapper
+
+ return _retry
diff --git a/tests/integration_tests/instances.py b/tests/integration_tests/instances.py
index e26ee233..65cd977a 100644
--- a/tests/integration_tests/instances.py
+++ b/tests/integration_tests/instances.py
@@ -9,7 +9,7 @@ from pycloudlib.instance import BaseInstance
from pycloudlib.result import Result
from tests.integration_tests import integration_settings
-from tests.integration_tests.util import retry
+from tests.integration_tests.decorators import retry
try:
from typing import TYPE_CHECKING
diff --git a/tests/integration_tests/modules/test_cli.py b/tests/integration_tests/modules/test_cli.py
index baaa7567..e878176f 100644
--- a/tests/integration_tests/modules/test_cli.py
+++ b/tests/integration_tests/modules/test_cli.py
@@ -28,11 +28,11 @@ apt_pipelining: bogus
@pytest.mark.user_data(VALID_USER_DATA)
def test_valid_userdata(client: IntegrationInstance):
- """Test `cloud-init devel schema` with valid userdata.
+ """Test `cloud-init schema` with valid userdata.
PR #575
"""
- result = client.execute("cloud-init devel schema --system")
+ result = client.execute("cloud-init schema --system")
assert result.ok
assert "Valid cloud-config: system userdata" == result.stdout.strip()
result = client.execute("cloud-init status --long")
@@ -44,11 +44,11 @@ def test_valid_userdata(client: IntegrationInstance):
@pytest.mark.user_data(INVALID_USER_DATA_HEADER)
def test_invalid_userdata(client: IntegrationInstance):
- """Test `cloud-init devel schema` with invalid userdata.
+ """Test `cloud-init schema` with invalid userdata.
PR #575
"""
- result = client.execute("cloud-init devel schema --system")
+ result = client.execute("cloud-init schema --system")
assert not result.ok
assert "Cloud config schema errors" in result.stderr
assert 'needs to begin with "#cloud-config"' in result.stderr
diff --git a/tests/integration_tests/modules/test_combined.py b/tests/integration_tests/modules/test_combined.py
index 05499580..70850fd9 100644
--- a/tests/integration_tests/modules/test_combined.py
+++ b/tests/integration_tests/modules/test_combined.py
@@ -5,15 +5,20 @@ of the test would be unlikely to affect the running of another test using
the same instance launch. Most independent module coherence tests can go
here.
"""
+import glob
+import importlib
import json
import re
+import uuid
+from pathlib import Path
import pytest
+import cloudinit.config
from tests.integration_tests.clouds import ImageSpecification
+from tests.integration_tests.decorators import retry
from tests.integration_tests.instances import IntegrationInstance
from tests.integration_tests.util import (
- retry,
verify_clean_log,
verify_ordered_items_in_text,
)
@@ -58,7 +63,6 @@ runcmd:
- #
- logger "My test log"
snap:
- squashfuse_in_container: true
commands:
- snap install hello-world
ssh_import_id:
@@ -145,23 +149,6 @@ class TestCombined:
client = class_client
assert "hello world" == client.read_from_file("/var/tmp/runcmd_output")
- @retry(tries=30, delay=1)
- def test_ssh_import_id(self, class_client: IntegrationInstance):
- """Integration test for the ssh_import_id module.
-
- This test specifies ssh keys to be imported by the ``ssh_import_id``
- module and then checks that if the ssh keys were successfully imported.
-
- TODO:
- * This test assumes that SSH keys will be imported into the
- /home/ubuntu; this will need modification to run on other OSes.
- """
- client = class_client
- ssh_output = client.read_from_file("/home/ubuntu/.ssh/authorized_keys")
-
- assert "# ssh-import-id gh:powersj" in ssh_output
- assert "# ssh-import-id lp:smoser" in ssh_output
-
def test_snap(self, class_client: IntegrationInstance):
"""Integration test for the snap module.
@@ -208,7 +195,15 @@ class TestCombined:
parsed_datasource = json.loads(status_file)["v1"]["datasource"]
if client.settings.PLATFORM in ["lxd_container", "lxd_vm"]:
- assert parsed_datasource.startswith("DataSourceNoCloud")
+ if ImageSpecification.from_os_image().release in [
+ "bionic",
+ "focal",
+ "impish",
+ ]:
+ datasource = "DataSourceNoCloud"
+ else:
+ datasource = "DataSourceLXD"
+ assert parsed_datasource.startswith(datasource)
else:
platform_datasources = {
"azure": "DataSourceAzure [seed=/dev/sr0]",
@@ -232,11 +227,32 @@ class TestCombined:
class_client.execute("stat -c %N /run/cloud-init/cloud-id")
)
+ def test_run_frequency(self, class_client: IntegrationInstance):
+ log = class_client.read_from_file("/var/log/cloud-init.log")
+ config_dir = Path(cloudinit.config.__file__).parent
+ module_paths = glob.glob(str(config_dir / "cc*.py"))
+ module_names = [Path(x).stem for x in module_paths]
+ found_count = 0
+ for name in module_names:
+ mod = importlib.import_module(f"cloudinit.config.{name}")
+ frequency = mod.meta["frequency"]
+ # cc_ gets replaced with config- in logs
+ log_name = name.replace("cc_", "config-")
+ # Some modules have been filtered out in /etc/cloud/cloud.cfg,
+ if f"running {log_name}" in log:
+ found_count += 1 # Ensure we're matching on the right text
+ assert f"running {log_name} with frequency {frequency}" in log
+ assert (
+ found_count > 10
+ ), "Not enough modules found in log. Did the log message change?"
+ assert "with frequency None" not in log
+
def _check_common_metadata(self, data):
assert data["base64_encoded_keys"] == []
assert data["merged_cfg"] == "redacted for non-root user"
image_spec = ImageSpecification.from_os_image()
+ image_spec = ImageSpecification.from_os_image()
assert data["sys_info"]["dist"][0] == image_spec.os
v1_data = data["v1"]
@@ -245,7 +261,7 @@ class TestCombined:
assert v1_data["distro"] == image_spec.os
assert v1_data["distro_release"] == image_spec.release
assert v1_data["machine"] == "x86_64"
- assert re.match(r"3.\d\.\d", v1_data["python_version"])
+ assert re.match(r"3.\d+\.\d+", v1_data["python_version"])
@pytest.mark.lxd_container
def test_instance_json_lxd(self, class_client: IntegrationInstance):
@@ -257,18 +273,33 @@ class TestCombined:
data = json.loads(instance_json_file)
self._check_common_metadata(data)
v1_data = data["v1"]
- assert v1_data["cloud_name"] == "unknown"
+ if ImageSpecification.from_os_image().release not in [
+ "bionic",
+ "focal",
+ "impish",
+ ]:
+ cloud_name = "lxd"
+ subplatform = "LXD socket API v. 1.0 (/dev/lxd/sock)"
+ # instance-id should be a UUID
+ try:
+ uuid.UUID(v1_data["instance_id"])
+ except ValueError:
+ raise AssertionError(
+ f"LXD instance-id is not a UUID: {v1_data['instance_id']}"
+ )
+ else:
+ cloud_name = "unknown"
+ subplatform = "seed-dir (/var/lib/cloud/seed/nocloud-net)"
+ # Pre-Jammy instance-id and instance.name are synonymous
+ assert v1_data["instance_id"] == client.instance.name
+ assert v1_data["cloud_name"] == cloud_name
+ assert v1_data["subplatform"] == subplatform
assert v1_data["platform"] == "lxd"
assert v1_data["cloud_id"] == "lxd"
assert f"{v1_data['cloud_id']}" == client.read_from_file(
"/run/cloud-init/cloud-id-lxd"
)
- assert (
- v1_data["subplatform"]
- == "seed-dir (/var/lib/cloud/seed/nocloud-net)"
- )
assert v1_data["availability_zone"] is None
- assert v1_data["instance_id"] == client.instance.name
assert v1_data["local_hostname"] == client.instance.name
assert v1_data["region"] is None
@@ -282,20 +313,40 @@ class TestCombined:
data = json.loads(instance_json_file)
self._check_common_metadata(data)
v1_data = data["v1"]
- assert v1_data["cloud_name"] == "unknown"
+ if ImageSpecification.from_os_image().release not in [
+ "bionic",
+ "focal",
+ "impish",
+ ]:
+ cloud_name = "lxd"
+ subplatform = "LXD socket API v. 1.0 (/dev/lxd/sock)"
+ # instance-id should be a UUID
+ try:
+ uuid.UUID(v1_data["instance_id"])
+ except ValueError as e:
+ raise AssertionError(
+ f"LXD instance-id is not a UUID: {v1_data['instance_id']}"
+ ) from e
+ assert v1_data["subplatform"] == subplatform
+ else:
+ cloud_name = "unknown"
+ # Pre-Jammy instance-id and instance.name are synonymous
+ assert v1_data["instance_id"] == client.instance.name
+ assert any(
+ [
+ "/var/lib/cloud/seed/nocloud-net"
+ in v1_data["subplatform"],
+ "/dev/sr0" in v1_data["subplatform"],
+ ]
+ )
+ assert v1_data["cloud_name"] == cloud_name
assert v1_data["platform"] == "lxd"
assert v1_data["cloud_id"] == "lxd"
assert f"{v1_data['cloud_id']}" == client.read_from_file(
"/run/cloud-init/cloud-id-lxd"
)
- assert any(
- [
- "/var/lib/cloud/seed/nocloud-net" in v1_data["subplatform"],
- "/dev/sr0" in v1_data["subplatform"],
- ]
- )
+
assert v1_data["availability_zone"] is None
- assert v1_data["instance_id"] == client.instance.name
assert v1_data["local_hostname"] == client.instance.name
assert v1_data["region"] is None
@@ -340,3 +391,23 @@ class TestCombined:
assert v1_data["availability_zone"] == client.instance.zone
assert v1_data["instance_id"] == client.instance.instance_id
assert v1_data["local_hostname"] == client.instance.name
+
+
+@pytest.mark.user_data(USER_DATA)
+class TestCombinedNoCI:
+ @retry(tries=30, delay=1)
+ def test_ssh_import_id(self, class_client: IntegrationInstance):
+ """Integration test for the ssh_import_id module.
+
+ This test specifies ssh keys to be imported by the ``ssh_import_id``
+ module and then checks that if the ssh keys were successfully imported.
+
+ TODO:
+ * This test assumes that SSH keys will be imported into the
+ /home/ubuntu; this will need modification to run on other OSes.
+ """
+ client = class_client
+ ssh_output = client.read_from_file("/home/ubuntu/.ssh/authorized_keys")
+
+ assert "# ssh-import-id gh:powersj" in ssh_output
+ assert "# ssh-import-id lp:smoser" in ssh_output
diff --git a/tests/integration_tests/modules/test_disk_setup.py b/tests/integration_tests/modules/test_disk_setup.py
index 7aaba7db..1aa22ef0 100644
--- a/tests/integration_tests/modules/test_disk_setup.py
+++ b/tests/integration_tests/modules/test_disk_setup.py
@@ -170,6 +170,13 @@ class TestPartProbeAvailability:
log = client.read_from_file("/var/log/cloud-init.log")
self._verify_first_disk_setup(client, log)
+ # Ensure NoCloud gets detected on reboot
+ client.execute("mkdir -p /var/lib/cloud/seed/nocloud-net/")
+ client.execute("touch /var/lib/cloud/seed/nocloud-net/meta-data")
+ client.write_to_file(
+ "/etc/cloud/cloud.cfg.d/99_nocloud.cfg",
+ "datasource_list: [ NoCloud ]\n",
+ )
# Update our userdata and cloud.cfg to mount then perform new disk
# setup
client.write_to_file(
diff --git a/tests/integration_tests/modules/test_frequency_override.py b/tests/integration_tests/modules/test_frequency_override.py
new file mode 100644
index 00000000..0cefadcc
--- /dev/null
+++ b/tests/integration_tests/modules/test_frequency_override.py
@@ -0,0 +1,33 @@
+import pytest
+
+from tests.integration_tests.instances import IntegrationInstance
+
+USER_DATA = """\
+#cloud-config
+runcmd:
+ - echo "hi" >> /var/tmp/hi
+"""
+
+
+@pytest.mark.user_data(USER_DATA)
+def test_frequency_override(client: IntegrationInstance):
+ # Some pre-checks
+ assert (
+ "running config-scripts-user with frequency once-per-instance"
+ in client.read_from_file("/var/log/cloud-init.log")
+ )
+ assert client.read_from_file("/var/tmp/hi").strip().count("hi") == 1
+
+ # Change frequency of scripts-user to always
+ config = client.read_from_file("/etc/cloud/cloud.cfg")
+ new_config = config.replace("- scripts-user", "- [ scripts-user, always ]")
+ client.write_to_file("/etc/cloud/cloud.cfg", new_config)
+
+ client.restart()
+
+ # Ensure the script was run again
+ assert (
+ "running config-scripts-user with frequency always"
+ in client.read_from_file("/var/log/cloud-init.log")
+ )
+ assert client.read_from_file("/var/tmp/hi").strip().count("hi") == 2
diff --git a/tests/integration_tests/modules/test_keys_to_console.py b/tests/integration_tests/modules/test_keys_to_console.py
index 50899982..5e2b3645 100644
--- a/tests/integration_tests/modules/test_keys_to_console.py
+++ b/tests/integration_tests/modules/test_keys_to_console.py
@@ -4,7 +4,9 @@
``tests/cloud_tests/testcases/modules/keys_to_console.yaml``.)"""
import pytest
-from tests.integration_tests.util import retry
+from tests.integration_tests.decorators import retry
+from tests.integration_tests.instances import IntegrationInstance
+from tests.integration_tests.util import get_console_log
BLACKLIST_USER_DATA = """\
#cloud-config
@@ -85,29 +87,15 @@ class TestKeysToConsoleDisabled:
@pytest.mark.user_data(ENABLE_KEYS_TO_CONSOLE_USER_DATA)
+@retry(tries=30, delay=1)
@pytest.mark.ec2
@pytest.mark.lxd_container
@pytest.mark.oci
@pytest.mark.openstack
-class TestKeysToConsoleEnabled:
+# No Azure because no console log on Azure
+def test_duplicate_messaging_console_log(client: IntegrationInstance):
"""Test that output can be enabled disabled."""
-
- def test_duplicate_messaging_console_log(self, class_client):
- class_client.execute("cloud-init status --wait --long").ok
- try:
- console_log = class_client.instance.console_log()
- except NotImplementedError:
- # Assume that an exception here means that we can't use the console
- # log
- pytest.skip("NotImplementedError when requesting console log")
- return
- if console_log.lower() == "no console output":
- # This test retries because we might not have the full console log
- # on the first fetch. However, if we have no console output
- # at all, we don't want to keep retrying as that would trigger
- # another 5 minute wait on the pycloudlib side, which could
- # leave us waiting for a couple hours
- pytest.fail("no console output")
- return
- msg = "no authorized SSH keys fingerprints found for user barfoo."
- assert 1 == console_log.count(msg)
+ assert (
+ "no authorized SSH keys fingerprints found for user barfoo."
+ in get_console_log(client)
+ )
diff --git a/tests/integration_tests/modules/test_persistence.py b/tests/integration_tests/modules/test_persistence.py
index 33527e1e..9979cc06 100644
--- a/tests/integration_tests/modules/test_persistence.py
+++ b/tests/integration_tests/modules/test_persistence.py
@@ -26,7 +26,7 @@ def test_log_message_on_missing_version_file(client: IntegrationInstance):
"attribute 'policy'. Ignoring current cache.",
"no cache found",
"Searching for local data source",
- "SUCCESS: found local data from DataSourceNoCloud",
+ r"SUCCESS: found local data from DataSource(NoCloud|LXD)",
],
log,
)
diff --git a/tests/integration_tests/modules/test_set_password.py b/tests/integration_tests/modules/test_set_password.py
index 0e35cd26..66ea52dd 100644
--- a/tests/integration_tests/modules/test_set_password.py
+++ b/tests/integration_tests/modules/test_set_password.py
@@ -11,7 +11,8 @@ only specify one user-data per instance.
import pytest
import yaml
-from tests.integration_tests.util import retry
+from tests.integration_tests.decorators import retry
+from tests.integration_tests.util import get_console_log
COMMON_USER_DATA = """\
#cloud-config
@@ -137,21 +138,7 @@ class Mixin:
@retry(tries=30, delay=1)
def test_random_passwords_emitted_to_serial_console(self, class_client):
"""We should emit passwords to the serial console. (LP: #1918303)"""
- try:
- console_log = class_client.instance.console_log()
- except NotImplementedError:
- # Assume that an exception here means that we can't use the console
- # log
- pytest.skip("NotImplementedError when requesting console log")
- return
- if console_log.lower() == "no console output":
- # This test retries because we might not have the full console log
- # on the first fetch. However, if we have no console output
- # at all, we don't want to keep retrying as that would trigger
- # another 5 minute wait on the pycloudlib side, which could
- # leave us waiting for a couple hours
- pytest.fail("no console output")
- return
+ console_log = get_console_log(class_client)
assert "dick:" in console_log
assert "harry:" in console_log
diff --git a/tests/integration_tests/modules/test_ssh_auth_key_fingerprints.py b/tests/integration_tests/modules/test_ssh_auth_key_fingerprints.py
index 89b49576..b256624a 100644
--- a/tests/integration_tests/modules/test_ssh_auth_key_fingerprints.py
+++ b/tests/integration_tests/modules/test_ssh_auth_key_fingerprints.py
@@ -12,7 +12,8 @@ import re
import pytest
-from tests.integration_tests.util import retry
+from tests.integration_tests.decorators import retry
+from tests.integration_tests.instances import IntegrationInstance
USER_DATA_SSH_AUTHKEY_DISABLE = """\
#cloud-config
@@ -50,3 +51,25 @@ class TestSshAuthkeyFingerprints:
assert re.search(r"256 SHA256:.*(ED25519)", syslog_output) is not None
assert re.search(r"1024 SHA256:.*(DSA)", syslog_output) is None
assert re.search(r"2048 SHA256:.*(RSA)", syslog_output) is None
+
+
+@pytest.mark.user_data(
+ """\
+#cloud-config
+users:
+ - default
+ - name: nch
+ no_create_home: true
+ - name: system
+ system: true
+"""
+)
+def test_no_home_directory_created(client: IntegrationInstance):
+ """Ensure cc_ssh_authkey_fingerprints doesn't create user directories"""
+ home_output = client.execute("ls /home")
+ assert "nch" not in home_output
+ assert "system" not in home_output
+
+ passwd = client.execute("cat /etc/passwd")
+ assert re.search("^nch:", passwd, re.MULTILINE)
+ assert re.search("^system:", passwd, re.MULTILINE)
diff --git a/tests/integration_tests/modules/test_write_files.py b/tests/integration_tests/modules/test_write_files.py
index 1eb7e945..a713b9c5 100644
--- a/tests/integration_tests/modules/test_write_files.py
+++ b/tests/integration_tests/modules/test_write_files.py
@@ -50,6 +50,7 @@ write_files:
defer: true
owner: 'myuser'
permissions: '0644'
+ append: true
""".format(
B64_CONTENT.decode("ascii")
)
@@ -91,3 +92,8 @@ class TestWriteFiles:
class_client.execute('stat -c "%U %a" /home/testuser/my-file')
== "myuser 644"
)
+ # Assert write_files per-instance is honored and run only once.
+ # Given append: true multiple runs across would append new content.
+ class_client.restart()
+ out = class_client.read_from_file("/home/testuser/my-file")
+ assert "echo 'hello world!'" == out
diff --git a/tests/integration_tests/network/test_net_config_load.py b/tests/integration_tests/network/test_net_config_load.py
deleted file mode 100644
index a6863b63..00000000
--- a/tests/integration_tests/network/test_net_config_load.py
+++ /dev/null
@@ -1,27 +0,0 @@
-"""Test loading the network config"""
-import pytest
-
-from tests.integration_tests.instances import IntegrationInstance
-
-
-def _customize_envionment(client: IntegrationInstance):
- # Insert our "disable_network_config" file here
- client.write_to_file(
- "/etc/cloud/cloud.cfg.d/99-disable-network-config.cfg",
- "network: {config: disabled}\n",
- )
- client.execute("cloud-init clean --logs")
- client.restart()
-
-
-def test_network_disabled_via_etc_cloud(client: IntegrationInstance):
- """Test that network can be disabled via config file in /etc/cloud"""
- if client.settings.CLOUD_INIT_SOURCE == "IN_PLACE":
- pytest.skip(
- "IN_PLACE not supported as we mount /etc/cloud contents into the "
- "container"
- )
- _customize_envionment(client)
-
- log = client.read_from_file("/var/log/cloud-init.log")
- assert "network config is disabled by system_cfg" in log
diff --git a/tests/integration_tests/util.py b/tests/integration_tests/util.py
index 31fe69c0..ec6b1434 100644
--- a/tests/integration_tests/util.py
+++ b/tests/integration_tests/util.py
@@ -1,12 +1,16 @@
-import functools
import logging
import multiprocessing
import os
+import re
import time
from collections import namedtuple
from contextlib import contextmanager
from pathlib import Path
+import pytest
+
+from tests.integration_tests.instances import IntegrationInstance
+
log = logging.getLogger("integration_testing")
key_pair = namedtuple("key_pair", "public_key private_key")
@@ -23,8 +27,12 @@ def verify_ordered_items_in_text(to_verify: list, text: str):
"""
index = 0
for item in to_verify:
- index = text[index:].find(item)
- assert index > -1, "Expected item not found: '{}'".format(item)
+ try:
+ matched = re.search(item, text[index:])
+ except re.error:
+ matched = re.search(re.escape(item), text[index:])
+ assert matched, "Expected item not found: '{}'".format(item)
+ index = matched.start()
def verify_clean_log(log):
@@ -67,7 +75,10 @@ def verify_clean_log(log):
for traceback_text in traceback_texts:
expected_tracebacks += log.count(traceback_text)
- assert warning_count == expected_warnings
+ assert warning_count == expected_warnings, (
+ f"Unexpected warning count != {expected_warnings}. Found: "
+ f"{re.findall('WARNING.*', log)}"
+ )
assert traceback_count == expected_tracebacks
@@ -110,33 +121,11 @@ def get_test_rsa_keypair(key_name: str = "test1") -> key_pair:
return key_pair(public_key, private_key)
-def retry(*, tries: int = 30, delay: int = 1):
- """Decorator for retries.
-
- Retry a function until code no longer raises an exception or
- max tries is reached.
-
- Example:
- @retry(tries=5, delay=1)
- def try_something_that_may_not_be_ready():
- ...
- """
-
- def _retry(func):
- @functools.wraps(func)
- def wrapper(*args, **kwargs):
- last_error = None
- for _ in range(tries):
- try:
- func(*args, **kwargs)
- break
- except Exception as e:
- last_error = e
- time.sleep(delay)
- else:
- if last_error:
- raise last_error
-
- return wrapper
-
- return _retry
+def get_console_log(client: IntegrationInstance):
+ try:
+ console_log = client.instance.console_log()
+ except NotImplementedError:
+ pytest.skip("NotImplementedError when requesting console log")
+ if console_log.lower().startswith("no console output"):
+ pytest.fail("no console output")
+ return console_log
diff --git a/tests/unittests/__init__.py b/tests/unittests/__init__.py
index 657cb399..e69de29b 100644
--- a/tests/unittests/__init__.py
+++ b/tests/unittests/__init__.py
@@ -1,12 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-try:
- # For test cases, avoid the following UserWarning to stderr:
- # You don't have the C version of NameMapper installed ...
- from Cheetah import NameMapper as _nm
-
- _nm.C_VERSION = True
-except ImportError:
- pass
-
-# vi: ts=4 expandtab
diff --git a/tests/unittests/cmd/devel/test_render.py b/tests/unittests/cmd/devel/test_render.py
index 4afc64f0..0ef4f364 100644
--- a/tests/unittests/cmd/devel/test_render.py
+++ b/tests/unittests/cmd/devel/test_render.py
@@ -1,6 +1,5 @@
# This file is part of cloud-init. See LICENSE file for license information.
-import os
from collections import namedtuple
from io import StringIO
@@ -8,147 +7,137 @@ from cloudinit.cmd.devel import render
from cloudinit.helpers import Paths
from cloudinit.sources import INSTANCE_JSON_FILE, INSTANCE_JSON_SENSITIVE_FILE
from cloudinit.util import ensure_dir, write_file
-from tests.unittests.helpers import CiTestCase, mock, skipUnlessJinja
+from tests.unittests.helpers import mock, skipUnlessJinja
+M_PATH = "cloudinit.cmd.devel.render."
-class TestRender(CiTestCase):
- with_logs = True
+class TestRender:
- args = namedtuple("renderargs", "user_data instance_data debug")
+ Args = namedtuple("Args", "user_data instance_data debug")
- def setUp(self):
- super(TestRender, self).setUp()
- self.tmp = self.tmp_dir()
-
- def test_handle_args_error_on_missing_user_data(self):
+ def test_handle_args_error_on_missing_user_data(self, caplog, tmpdir):
"""When user_data file path does not exist, log an error."""
- absent_file = self.tmp_path("user-data", dir=self.tmp)
- instance_data = self.tmp_path("instance-data", dir=self.tmp)
+ absent_file = tmpdir.join("user-data")
+ instance_data = tmpdir.join("instance-data")
write_file(instance_data, "{}")
- args = self.args(
+ args = self.Args(
user_data=absent_file, instance_data=instance_data, debug=False
)
with mock.patch("sys.stderr", new_callable=StringIO):
- self.assertEqual(1, render.handle_args("anyname", args))
- self.assertIn(
- "Missing user-data file: %s" % absent_file, self.logs.getvalue()
- )
+ assert render.handle_args("anyname", args) == 1
+ assert "Missing user-data file: %s" % absent_file in caplog.text
- def test_handle_args_error_on_missing_instance_data(self):
+ def test_handle_args_error_on_missing_instance_data(self, caplog, tmpdir):
"""When instance_data file path does not exist, log an error."""
- user_data = self.tmp_path("user-data", dir=self.tmp)
- absent_file = self.tmp_path("instance-data", dir=self.tmp)
- args = self.args(
+ user_data = tmpdir.join("user-data")
+ absent_file = tmpdir.join("instance-data")
+ args = self.Args(
user_data=user_data, instance_data=absent_file, debug=False
)
with mock.patch("sys.stderr", new_callable=StringIO):
- self.assertEqual(1, render.handle_args("anyname", args))
- self.assertIn(
- "Missing instance-data.json file: %s" % absent_file,
- self.logs.getvalue(),
+ assert render.handle_args("anyname", args) == 1
+ assert (
+ "Missing instance-data.json file: %s" % absent_file in caplog.text
)
- def test_handle_args_defaults_instance_data(self):
+ @mock.patch(M_PATH + "read_cfg_paths")
+ def test_handle_args_defaults_instance_data(self, m_paths, caplog, tmpdir):
"""When no instance_data argument, default to configured run_dir."""
- user_data = self.tmp_path("user-data", dir=self.tmp)
- run_dir = self.tmp_path("run_dir", dir=self.tmp)
+ user_data = tmpdir.join("user-data")
+ run_dir = tmpdir.join("run_dir")
ensure_dir(run_dir)
- paths = Paths({"run_dir": run_dir})
- self.add_patch("cloudinit.cmd.devel.render.read_cfg_paths", "m_paths")
- self.m_paths.return_value = paths
- args = self.args(user_data=user_data, instance_data=None, debug=False)
+ m_paths.return_value = Paths({"run_dir": run_dir})
+ args = self.Args(user_data=user_data, instance_data=None, debug=False)
with mock.patch("sys.stderr", new_callable=StringIO):
- self.assertEqual(1, render.handle_args("anyname", args))
- json_file = os.path.join(run_dir, INSTANCE_JSON_FILE)
- self.assertIn(
- "Missing instance-data.json file: %s" % json_file,
- self.logs.getvalue(),
- )
-
- def test_handle_args_root_fallback_from_sensitive_instance_data(self):
+ assert render.handle_args("anyname", args) == 1
+ json_file = run_dir.join(INSTANCE_JSON_FILE)
+ msg = "Missing instance-data.json file: %s" % json_file
+ assert msg in caplog.text
+
+ @mock.patch(M_PATH + "read_cfg_paths")
+ def test_handle_args_root_fallback_from_sensitive_instance_data(
+ self, m_paths, caplog, tmpdir
+ ):
"""When root user defaults to sensitive.json."""
- user_data = self.tmp_path("user-data", dir=self.tmp)
- run_dir = self.tmp_path("run_dir", dir=self.tmp)
+ user_data = tmpdir.join("user-data")
+ run_dir = tmpdir.join("run_dir")
ensure_dir(run_dir)
- paths = Paths({"run_dir": run_dir})
- self.add_patch("cloudinit.cmd.devel.render.read_cfg_paths", "m_paths")
- self.m_paths.return_value = paths
- args = self.args(user_data=user_data, instance_data=None, debug=False)
+ m_paths.return_value = Paths({"run_dir": run_dir})
+ args = self.Args(user_data=user_data, instance_data=None, debug=False)
with mock.patch("sys.stderr", new_callable=StringIO):
with mock.patch("os.getuid") as m_getuid:
m_getuid.return_value = 0
- self.assertEqual(1, render.handle_args("anyname", args))
- json_file = os.path.join(run_dir, INSTANCE_JSON_FILE)
- json_sensitive = os.path.join(run_dir, INSTANCE_JSON_SENSITIVE_FILE)
- self.assertIn(
- "WARNING: Missing root-readable %s. Using redacted %s"
- % (json_sensitive, json_file),
- self.logs.getvalue(),
- )
- self.assertIn(
- "ERROR: Missing instance-data.json file: %s" % json_file,
- self.logs.getvalue(),
+ assert render.handle_args("anyname", args) == 1
+ json_file = run_dir.join(INSTANCE_JSON_FILE)
+ json_sensitive = run_dir.join(INSTANCE_JSON_SENSITIVE_FILE)
+ assert (
+ "Missing root-readable %s. Using redacted %s"
+ % (json_sensitive, json_file)
+ in caplog.text
)
+ assert "Missing instance-data.json file: %s" % json_file in caplog.text
- def test_handle_args_root_uses_sensitive_instance_data(self):
+ @mock.patch(M_PATH + "read_cfg_paths")
+ def test_handle_args_root_uses_sensitive_instance_data(
+ self, m_paths, tmpdir
+ ):
"""When root user, and no instance-data arg, use sensitive.json."""
- user_data = self.tmp_path("user-data", dir=self.tmp)
+ user_data = tmpdir.join("user-data")
write_file(user_data, "##template: jinja\nrendering: {{ my_var }}")
- run_dir = self.tmp_path("run_dir", dir=self.tmp)
+ run_dir = tmpdir.join("run_dir")
ensure_dir(run_dir)
- json_sensitive = os.path.join(run_dir, INSTANCE_JSON_SENSITIVE_FILE)
+ json_sensitive = run_dir.join(INSTANCE_JSON_SENSITIVE_FILE)
write_file(json_sensitive, '{"my-var": "jinja worked"}')
- paths = Paths({"run_dir": run_dir})
- self.add_patch("cloudinit.cmd.devel.render.read_cfg_paths", "m_paths")
- self.m_paths.return_value = paths
- args = self.args(user_data=user_data, instance_data=None, debug=False)
- with mock.patch("sys.stderr", new_callable=StringIO):
- with mock.patch("sys.stdout", new_callable=StringIO) as m_stdout:
- with mock.patch("os.getuid") as m_getuid:
- m_getuid.return_value = 0
- self.assertEqual(0, render.handle_args("anyname", args))
- self.assertIn("rendering: jinja worked", m_stdout.getvalue())
+ m_paths.return_value = Paths({"run_dir": run_dir})
+ args = self.Args(user_data=user_data, instance_data=None, debug=False)
+ with mock.patch("sys.stdout", new_callable=StringIO) as m_stdout:
+ with mock.patch("os.getuid") as m_getuid:
+ m_getuid.return_value = 0
+ assert render.handle_args("anyname", args) == 0
+ assert "rendering: jinja worked" in m_stdout.getvalue()
@skipUnlessJinja()
- def test_handle_args_renders_instance_data_vars_in_template(self):
+ def test_handle_args_renders_instance_data_vars_in_template(
+ self, caplog, tmpdir
+ ):
"""If user_data file is a jinja template render instance-data vars."""
- user_data = self.tmp_path("user-data", dir=self.tmp)
+ user_data = tmpdir.join("user-data")
write_file(user_data, "##template: jinja\nrendering: {{ my_var }}")
- instance_data = self.tmp_path("instance-data", dir=self.tmp)
+ instance_data = tmpdir.join("instance-data")
write_file(instance_data, '{"my-var": "jinja worked"}')
- args = self.args(
+ args = self.Args(
user_data=user_data, instance_data=instance_data, debug=True
)
- with mock.patch("sys.stderr", new_callable=StringIO) as m_console_err:
+ with mock.patch("sys.stderr", new_callable=StringIO):
with mock.patch("sys.stdout", new_callable=StringIO) as m_stdout:
- self.assertEqual(0, render.handle_args("anyname", args))
- self.assertIn(
- "DEBUG: Converted jinja variables\n{", self.logs.getvalue()
- )
- self.assertIn(
- "DEBUG: Converted jinja variables\n{", m_console_err.getvalue()
+ assert render.handle_args("anyname", args) == 0
+ # Make sure the log is correctly captured. There is an issue
+ # with this fixture in pytest==4.6.9 (focal):
+ assert (
+ "Converted jinja variables\n{" in caplog.records[-1].getMessage()
)
- self.assertEqual("rendering: jinja worked", m_stdout.getvalue())
+ assert "rendering: jinja worked" == m_stdout.getvalue()
@skipUnlessJinja()
- def test_handle_args_warns_and_gives_up_on_invalid_jinja_operation(self):
+ def test_handle_args_warns_and_gives_up_on_invalid_jinja_operation(
+ self, caplog, tmpdir
+ ):
"""If user_data file has invalid jinja operations log warnings."""
- user_data = self.tmp_path("user-data", dir=self.tmp)
+ user_data = tmpdir.join("user-data")
write_file(user_data, "##template: jinja\nrendering: {{ my-var }}")
- instance_data = self.tmp_path("instance-data", dir=self.tmp)
+ instance_data = tmpdir.join("instance-data")
write_file(instance_data, '{"my-var": "jinja worked"}')
- args = self.args(
+ args = self.Args(
user_data=user_data, instance_data=instance_data, debug=True
)
with mock.patch("sys.stderr", new_callable=StringIO):
- self.assertEqual(1, render.handle_args("anyname", args))
- self.assertIn(
- "WARNING: Ignoring jinja template for %s: Undefined jinja"
+ assert render.handle_args("anyname", args) == 1
+ assert (
+ "Ignoring jinja template for %s: Undefined jinja"
' variable: "my-var". Jinja tried subtraction. Perhaps you meant'
- ' "my_var"?' % user_data,
- self.logs.getvalue(),
- )
+ ' "my_var"?' % user_data
+ ) in caplog.text
# vi: ts=4 expandtab
diff --git a/tests/unittests/cmd/test_main.py b/tests/unittests/cmd/test_main.py
index 3e778b0b..2f7a1fb1 100644
--- a/tests/unittests/cmd/test_main.py
+++ b/tests/unittests/cmd/test_main.py
@@ -56,50 +56,6 @@ class TestMain(FilesystemMockingTestCase):
self.stderr = StringIO()
self.patchStdoutAndStderr(stderr=self.stderr)
- def test_main_init_run_net_stops_on_file_no_net(self):
- """When no-net file is present, main_init does not process modules."""
- stop_file = os.path.join(self.cloud_dir, "data", "no-net") # stop file
- write_file(stop_file, "")
- cmdargs = myargs(
- debug=False,
- files=None,
- force=False,
- local=False,
- reporter=None,
- subcommand="init",
- )
- (_item1, item2) = wrap_and_call(
- "cloudinit.cmd.main",
- {
- "util.close_stdin": True,
- "netinfo.debug_info": "my net debug info",
- "util.fixup_output": ("outfmt", "errfmt"),
- },
- main.main_init,
- "init",
- cmdargs,
- )
- # We should not run write_files module
- self.assertFalse(
- os.path.exists(os.path.join(self.new_root, "etc/blah.ini")),
- "Unexpected run of write_files module produced blah.ini",
- )
- self.assertEqual([], item2)
- # Instancify is called
- instance_id_path = "var/lib/cloud/data/instance-id"
- self.assertFalse(
- os.path.exists(os.path.join(self.new_root, instance_id_path)),
- "Unexpected call to datasource.instancify produced instance-id",
- )
- expected_logs = [
- "Exiting. stop file ['{stop_file}'] existed\n".format(
- stop_file=stop_file
- ),
- "my net debug info", # netinfo.debug_info
- ]
- for log in expected_logs:
- self.assertIn(log, self.stderr.getvalue())
-
def test_main_init_run_net_runs_modules(self):
"""Modules like write_files are run in 'net' mode."""
cmdargs = myargs(
@@ -137,7 +93,6 @@ class TestMain(FilesystemMockingTestCase):
expected_logs = [
"network config is disabled by fallback", # apply_network_config
"my net debug info", # netinfo.debug_info
- "no previous run detected",
]
for log in expected_logs:
self.assertIn(log, self.stderr.getvalue())
@@ -209,7 +164,6 @@ class TestMain(FilesystemMockingTestCase):
expected_logs = [
"network config is disabled by fallback", # apply_network_config
"my net debug info", # netinfo.debug_info
- "no previous run detected",
]
for log in expected_logs:
self.assertIn(log, self.stderr.getvalue())
diff --git a/tests/unittests/cmd/test_query.py b/tests/unittests/cmd/test_query.py
index 03a73bb5..207078fa 100644
--- a/tests/unittests/cmd/test_query.py
+++ b/tests/unittests/cmd/test_query.py
@@ -20,6 +20,8 @@ from cloudinit.sources import (
from cloudinit.util import b64e, write_file
from tests.unittests.helpers import mock
+M_PATH = "cloudinit.cmd.query."
+
def _gzip_data(data):
with BytesIO() as iobuf:
@@ -28,11 +30,11 @@ def _gzip_data(data):
return iobuf.getvalue()
-@mock.patch("cloudinit.cmd.query.addLogHandlerCLI", lambda *args: "")
+@mock.patch(M_PATH + "addLogHandlerCLI", lambda *args: "")
class TestQuery:
- args = namedtuple(
- "queryargs",
+ Args = namedtuple(
+ "Args",
"debug dump_all format instance_data list_keys user_data vendor_data"
" varname",
)
@@ -70,7 +72,7 @@ class TestQuery:
def test_handle_args_error_on_missing_param(self, caplog, capsys):
"""Error when missing required parameters and print usage."""
- args = self.args(
+ args = self.Args(
debug=False,
dump_all=False,
format=None,
@@ -81,7 +83,7 @@ class TestQuery:
varname=None,
)
with mock.patch(
- "cloudinit.cmd.query.addLogHandlerCLI", return_value=""
+ M_PATH + "addLogHandlerCLI", return_value=""
) as m_cli_log:
assert 1 == query.handle_args("anyname", args)
expected_error = (
@@ -108,13 +110,13 @@ class TestQuery:
),
),
)
- def test_handle_args_error_on_invalid_vaname_paths(
+ def test_handle_args_error_on_invalid_varname_paths(
self, inst_data, varname, expected_error, caplog, tmpdir
):
"""Error when varname is not a valid instance-data variable path."""
instance_data = tmpdir.join("instance-data")
instance_data.write(inst_data)
- args = self.args(
+ args = self.Args(
debug=False,
dump_all=False,
format=None,
@@ -125,12 +127,10 @@ class TestQuery:
varname=varname,
)
paths, _, _, _ = self._setup_paths(tmpdir)
- with mock.patch("cloudinit.cmd.query.read_cfg_paths") as m_paths:
+ with mock.patch(M_PATH + "read_cfg_paths") as m_paths:
m_paths.return_value = paths
- with mock.patch(
- "cloudinit.cmd.query.addLogHandlerCLI", return_value=""
- ):
- with mock.patch("cloudinit.cmd.query.load_userdata") as m_lud:
+ with mock.patch(M_PATH + "addLogHandlerCLI", return_value=""):
+ with mock.patch(M_PATH + "load_userdata") as m_lud:
m_lud.return_value = "ud"
assert 1 == query.handle_args("anyname", args)
assert expected_error in caplog.text
@@ -138,7 +138,7 @@ class TestQuery:
def test_handle_args_error_on_missing_instance_data(self, caplog, tmpdir):
"""When instance_data file path does not exist, log an error."""
absent_fn = tmpdir.join("absent")
- args = self.args(
+ args = self.Args(
debug=False,
dump_all=True,
format=None,
@@ -159,7 +159,7 @@ class TestQuery:
"""When instance_data file is unreadable, log an error."""
noread_fn = tmpdir.join("unreadable")
noread_fn.write("thou shall not pass")
- args = self.args(
+ args = self.Args(
debug=False,
dump_all=True,
format=None,
@@ -169,15 +169,47 @@ class TestQuery:
vendor_data="vd",
varname=None,
)
- with mock.patch("cloudinit.cmd.query.util.load_file") as m_load:
+ with mock.patch(M_PATH + "util.load_file") as m_load:
m_load.side_effect = OSError(errno.EACCES, "Not allowed")
assert 1 == query.handle_args("anyname", args)
msg = "No read permission on '%s'. Try sudo" % noread_fn
assert msg in caplog.text
+ @pytest.mark.parametrize(
+ "exception",
+ [
+ (OSError(errno.EACCES, "Not allowed"),),
+ (OSError(errno.ENOENT, "Not allowed"),),
+ (IOError,),
+ ],
+ )
+ def test_handle_args_error_when_no_read_permission_init_cfg(
+ self, exception, capsys
+ ):
+ """query.handle_status_args exists with 1 and no sys-output."""
+ args = self.Args(
+ debug=False,
+ dump_all=True,
+ format=None,
+ instance_data=None,
+ list_keys=False,
+ user_data=None,
+ vendor_data=None,
+ varname=None,
+ )
+ with mock.patch(
+ M_PATH + "read_cfg_paths",
+ side_effect=exception,
+ ) as m_read_cfg_paths:
+ query.handle_args("anyname", args)
+ assert m_read_cfg_paths.call_count == 1
+ out, err = capsys.readouterr()
+ assert not out
+ assert not err
+
def test_handle_args_defaults_instance_data(self, caplog, tmpdir):
"""When no instance_data argument, default to configured run_dir."""
- args = self.args(
+ args = self.Args(
debug=False,
dump_all=True,
format=None,
@@ -188,7 +220,7 @@ class TestQuery:
varname=None,
)
paths, run_dir, _, _ = self._setup_paths(tmpdir)
- with mock.patch("cloudinit.cmd.query.read_cfg_paths") as m_paths:
+ with mock.patch(M_PATH + "read_cfg_paths") as m_paths:
m_paths.return_value = paths
assert 1 == query.handle_args("anyname", args)
json_file = run_dir.join(INSTANCE_JSON_FILE)
@@ -197,7 +229,7 @@ class TestQuery:
def test_handle_args_root_fallsback_to_instance_data(self, caplog, tmpdir):
"""When no instance_data argument, root falls back to redacted json."""
- args = self.args(
+ args = self.Args(
debug=False,
dump_all=True,
format=None,
@@ -208,7 +240,7 @@ class TestQuery:
varname=None,
)
paths, run_dir, _, _ = self._setup_paths(tmpdir)
- with mock.patch("cloudinit.cmd.query.read_cfg_paths") as m_paths:
+ with mock.patch(M_PATH + "read_cfg_paths") as m_paths:
m_paths.return_value = paths
with mock.patch("os.getuid") as m_getuid:
m_getuid.return_value = 0
@@ -239,7 +271,7 @@ class TestQuery:
)
sensitive_file = run_dir.join(INSTANCE_JSON_SENSITIVE_FILE)
sensitive_file.write('{"my-var": "it worked"}')
- args = self.args(
+ args = self.Args(
debug=False,
dump_all=True,
format=None,
@@ -249,7 +281,7 @@ class TestQuery:
vendor_data=vendor_data.strpath,
varname=None,
)
- with mock.patch("cloudinit.cmd.query.read_cfg_paths") as m_paths:
+ with mock.patch(M_PATH + "read_cfg_paths") as m_paths:
m_paths.return_value = paths
with mock.patch("os.getuid") as m_getuid:
m_getuid.return_value = 0
@@ -277,7 +309,7 @@ class TestQuery:
vd_path = os.path.join(paths.instance_link, "vendor-data.txt")
write_file(vd_path, "instance_link_vd")
- args = self.args(
+ args = self.Args(
debug=False,
dump_all=True,
format=None,
@@ -287,7 +319,7 @@ class TestQuery:
vendor_data=None,
varname=None,
)
- with mock.patch("cloudinit.cmd.query.read_cfg_paths") as m_paths:
+ with mock.patch(M_PATH + "read_cfg_paths") as m_paths:
m_paths.return_value = paths
with mock.patch("os.getuid", return_value=0):
assert 0 == query.handle_args("anyname", args)
@@ -308,7 +340,7 @@ class TestQuery:
)
sensitive_file = run_dir.join(INSTANCE_JSON_SENSITIVE_FILE)
sensitive_file.write('{"my-var": "it worked"}')
- args = self.args(
+ args = self.Args(
debug=False,
dump_all=True,
format=None,
@@ -318,7 +350,7 @@ class TestQuery:
vendor_data=vendor_data.strpath,
varname=None,
)
- with mock.patch("cloudinit.cmd.query.read_cfg_paths") as m_paths:
+ with mock.patch(M_PATH + "read_cfg_paths") as m_paths:
m_paths.return_value = paths
with mock.patch("os.getuid") as m_getuid:
m_getuid.return_value = 0
@@ -334,7 +366,7 @@ class TestQuery:
"""When --all is specified query will dump all instance data vars."""
instance_data = tmpdir.join("instance-data")
instance_data.write('{"my-var": "it worked"}')
- args = self.args(
+ args = self.Args(
debug=False,
dump_all=True,
format=None,
@@ -359,7 +391,7 @@ class TestQuery:
"""When the argument varname is passed, report its value."""
instance_data = tmpdir.join("instance-data")
instance_data.write('{"my-var": "it worked"}')
- args = self.args(
+ args = self.Args(
debug=False,
dump_all=True,
format=None,
@@ -398,7 +430,7 @@ class TestQuery:
"""If user_data file is a jinja template render instance-data vars."""
instance_data = tmpdir.join("instance-data")
instance_data.write(inst_data)
- args = self.args(
+ args = self.Args(
debug=False,
dump_all=False,
format=None,
@@ -440,7 +472,7 @@ class TestQuery:
}
"""
)
- args = self.args(
+ args = self.Args(
debug=False,
dump_all=True,
format=None,
@@ -466,7 +498,7 @@ class TestQuery:
' "top": "gun"}'
)
expected = "top\nuserdata\nv1\nv1_1\nv2\nv2_2\nvendordata\n"
- args = self.args(
+ args = self.Args(
debug=False,
dump_all=False,
format=None,
@@ -492,7 +524,7 @@ class TestQuery:
+ ' {"v2_2": "val2.2"}, "top": "gun"}'
)
expected = "v1_1\nv1_2\n"
- args = self.args(
+ args = self.Args(
debug=False,
dump_all=False,
format=None,
@@ -518,7 +550,7 @@ class TestQuery:
+ '{"v2_2": "val2.2"}, "top": "gun"}'
)
expected_error = "--list-keys provided but 'top' is not a dict"
- args = self.args(
+ args = self.Args(
debug=False,
dump_all=False,
format=None,
diff --git a/tests/unittests/cmd/test_status.py b/tests/unittests/cmd/test_status.py
index c5f424da..e9169a55 100644
--- a/tests/unittests/cmd/test_status.py
+++ b/tests/unittests/cmd/test_status.py
@@ -4,188 +4,189 @@ import os
from collections import namedtuple
from io import StringIO
from textwrap import dedent
+from typing import Callable, Dict, Optional, Union
+from unittest import mock
+
+import pytest
from cloudinit.atomic_helper import write_json
from cloudinit.cmd import status
from cloudinit.util import ensure_file
-from tests.unittests.helpers import CiTestCase, mock, wrap_and_call
-
-mypaths = namedtuple("MyPaths", "run_dir")
-myargs = namedtuple("MyArgs", "long wait")
-
-
-class TestStatus(CiTestCase):
- def setUp(self):
- super(TestStatus, self).setUp()
- self.new_root = self.tmp_dir()
- self.status_file = self.tmp_path("status.json", self.new_root)
- self.disable_file = self.tmp_path("cloudinit-disable", self.new_root)
- self.paths = mypaths(run_dir=self.new_root)
-
- class FakeInit(object):
- paths = self.paths
-
- def __init__(self, ds_deps):
- pass
-
- def read_cfg(self):
- pass
-
- self.init_class = FakeInit
-
- def test__is_cloudinit_disabled_false_on_sysvinit(self):
- """When not in an environment using systemd, return False."""
- ensure_file(self.disable_file) # Create the ignored disable file
+from tests.unittests.helpers import wrap_and_call
+
+M_NAME = "cloudinit.cmd.status"
+M_PATH = f"{M_NAME}."
+
+MyPaths = namedtuple("MyPaths", "run_dir")
+MyArgs = namedtuple("MyArgs", "long wait")
+Config = namedtuple(
+ "Config", "new_root, status_file, disable_file, result_file, paths"
+)
+
+
+@pytest.fixture(scope="function")
+def config(tmpdir):
+ return Config(
+ new_root=tmpdir,
+ status_file=tmpdir.join("status.json"),
+ disable_file=tmpdir.join("cloudinit-disable"),
+ result_file=tmpdir.join("result.json"),
+ paths=MyPaths(run_dir=tmpdir),
+ )
+
+
+class TestStatus:
+ @pytest.mark.parametrize(
+ [
+ "ensured_file",
+ "uses_systemd",
+ "get_cmdline",
+ "expected_is_disabled",
+ "is_disabled_msg",
+ "expected_reason",
+ ],
+ [
+ # When not in an environment using systemd, return False.
+ pytest.param(
+ lambda config: config.disable_file,
+ False,
+ "root=/dev/my-root not-important",
+ False,
+ "expected enabled cloud-init on sysvinit",
+ "Cloud-init enabled on sysvinit",
+ id="false_on_sysvinit",
+ ),
+ # When using systemd and disable_file is present return disabled.
+ pytest.param(
+ lambda config: config.disable_file,
+ True,
+ "root=/dev/my-root not-important",
+ True,
+ "expected disabled cloud-init",
+ lambda config: f"Cloud-init disabled by {config.disable_file}",
+ id="true_on_disable_file",
+ ),
+ # Not disabled when using systemd and enabled via commandline.
+ pytest.param(
+ lambda config: config.disable_file,
+ True,
+ "something cloud-init=enabled else",
+ False,
+ "expected enabled cloud-init",
+ "Cloud-init enabled by kernel command line cloud-init=enabled",
+ id="false_on_kernel_cmdline_enable",
+ ),
+ # When kernel command line disables cloud-init return True.
+ pytest.param(
+ None,
+ True,
+ "something cloud-init=disabled else",
+ True,
+ "expected disabled cloud-init",
+ "Cloud-init disabled by kernel parameter cloud-init=disabled",
+ id="true_on_kernel_cmdline",
+ ),
+ # When cloud-init-generator writes disabled file return True.
+ pytest.param(
+ lambda config: os.path.join(config.paths.run_dir, "disabled"),
+ True,
+ "something",
+ True,
+ "expected disabled cloud-init",
+ "Cloud-init disabled by cloud-init-generator",
+ id="true_when_generator_disables",
+ ),
+ # Report enabled when systemd generator creates the enabled file.
+ pytest.param(
+ lambda config: os.path.join(config.paths.run_dir, "enabled"),
+ True,
+ "something ignored",
+ False,
+ "expected enabled cloud-init",
+ "Cloud-init enabled by systemd cloud-init-generator",
+ id="false_when_enabled_in_systemd",
+ ),
+ ],
+ )
+ def test__is_cloudinit_disabled(
+ self,
+ ensured_file: Optional[Callable],
+ uses_systemd: bool,
+ get_cmdline: str,
+ expected_is_disabled: bool,
+ is_disabled_msg: str,
+ expected_reason: Union[str, Callable],
+ config: Config,
+ ):
+ if ensured_file is not None:
+ ensure_file(ensured_file(config))
(is_disabled, reason) = wrap_and_call(
- "cloudinit.cmd.status",
+ M_NAME,
{
- "uses_systemd": False,
- "get_cmdline": "root=/dev/my-root not-important",
+ "uses_systemd": uses_systemd,
+ "get_cmdline": get_cmdline,
},
status._is_cloudinit_disabled,
- self.disable_file,
- self.paths,
- )
- self.assertFalse(
- is_disabled, "expected enabled cloud-init on sysvinit"
+ config.disable_file,
+ config.paths,
)
- self.assertEqual("Cloud-init enabled on sysvinit", reason)
-
- def test__is_cloudinit_disabled_true_on_disable_file(self):
- """When using systemd and disable_file is present return disabled."""
- ensure_file(self.disable_file) # Create observed disable file
- (is_disabled, reason) = wrap_and_call(
- "cloudinit.cmd.status",
- {
- "uses_systemd": True,
- "get_cmdline": "root=/dev/my-root not-important",
- },
- status._is_cloudinit_disabled,
- self.disable_file,
- self.paths,
- )
- self.assertTrue(is_disabled, "expected disabled cloud-init")
- self.assertEqual(
- "Cloud-init disabled by {0}".format(self.disable_file), reason
- )
-
- def test__is_cloudinit_disabled_false_on_kernel_cmdline_enable(self):
- """Not disabled when using systemd and enabled via commandline."""
- ensure_file(self.disable_file) # Create ignored disable file
- (is_disabled, reason) = wrap_and_call(
- "cloudinit.cmd.status",
- {
- "uses_systemd": True,
- "get_cmdline": "something cloud-init=enabled else",
- },
- status._is_cloudinit_disabled,
- self.disable_file,
- self.paths,
- )
- self.assertFalse(is_disabled, "expected enabled cloud-init")
- self.assertEqual(
- "Cloud-init enabled by kernel command line cloud-init=enabled",
- reason,
- )
-
- def test__is_cloudinit_disabled_true_on_kernel_cmdline(self):
- """When kernel command line disables cloud-init return True."""
- (is_disabled, reason) = wrap_and_call(
- "cloudinit.cmd.status",
- {
- "uses_systemd": True,
- "get_cmdline": "something cloud-init=disabled else",
- },
- status._is_cloudinit_disabled,
- self.disable_file,
- self.paths,
- )
- self.assertTrue(is_disabled, "expected disabled cloud-init")
- self.assertEqual(
- "Cloud-init disabled by kernel parameter cloud-init=disabled",
- reason,
- )
-
- def test__is_cloudinit_disabled_true_when_generator_disables(self):
- """When cloud-init-generator writes disabled file return True."""
- disabled_file = os.path.join(self.paths.run_dir, "disabled")
- ensure_file(disabled_file)
- (is_disabled, reason) = wrap_and_call(
- "cloudinit.cmd.status",
- {"uses_systemd": True, "get_cmdline": "something"},
- status._is_cloudinit_disabled,
- self.disable_file,
- self.paths,
- )
- self.assertTrue(is_disabled, "expected disabled cloud-init")
- self.assertEqual("Cloud-init disabled by cloud-init-generator", reason)
-
- def test__is_cloudinit_disabled_false_when_enabled_in_systemd(self):
- """Report enabled when systemd generator creates the enabled file."""
- enabled_file = os.path.join(self.paths.run_dir, "enabled")
- ensure_file(enabled_file)
- (is_disabled, reason) = wrap_and_call(
- "cloudinit.cmd.status",
- {"uses_systemd": True, "get_cmdline": "something ignored"},
- status._is_cloudinit_disabled,
- self.disable_file,
- self.paths,
- )
- self.assertFalse(is_disabled, "expected enabled cloud-init")
- self.assertEqual(
- "Cloud-init enabled by systemd cloud-init-generator", reason
- )
-
- def test_status_returns_not_run(self):
+ assert is_disabled == expected_is_disabled, is_disabled_msg
+ if isinstance(expected_reason, str):
+ assert reason == expected_reason
+ else:
+ assert reason == expected_reason(config)
+
+ @mock.patch(M_PATH + "read_cfg_paths")
+ def test_status_returns_not_run(self, m_read_cfg_paths, config: Config):
"""When status.json does not exist yet, return 'not run'."""
- self.assertFalse(
- os.path.exists(self.status_file), "Unexpected status.json found"
- )
- cmdargs = myargs(long=False, wait=False)
+ m_read_cfg_paths.return_value = config.paths
+ assert not os.path.exists(
+ config.status_file
+ ), "Unexpected status.json found"
+ cmdargs = MyArgs(long=False, wait=False)
with mock.patch("sys.stdout", new_callable=StringIO) as m_stdout:
retcode = wrap_and_call(
- "cloudinit.cmd.status",
- {
- "_is_cloudinit_disabled": (False, ""),
- "Init": {"side_effect": self.init_class},
- },
+ M_NAME,
+ {"_is_cloudinit_disabled": (False, "")},
status.handle_status_args,
"ignored",
cmdargs,
)
- self.assertEqual(0, retcode)
- self.assertEqual("status: not run\n", m_stdout.getvalue())
+ assert retcode == 0
+ assert m_stdout.getvalue() == "status: not run\n"
- def test_status_returns_disabled_long_on_presence_of_disable_file(self):
+ @mock.patch(M_PATH + "read_cfg_paths")
+ def test_status_returns_disabled_long_on_presence_of_disable_file(
+ self, m_read_cfg_paths, config: Config
+ ):
"""When cloudinit is disabled, return disabled reason."""
-
+ m_read_cfg_paths.return_value = config.paths
checked_files = []
def fakeexists(filepath):
checked_files.append(filepath)
- status_file = os.path.join(self.paths.run_dir, "status.json")
+ status_file = os.path.join(config.paths.run_dir, "status.json")
return bool(not filepath == status_file)
- cmdargs = myargs(long=True, wait=False)
+ cmdargs = MyArgs(long=True, wait=False)
with mock.patch("sys.stdout", new_callable=StringIO) as m_stdout:
retcode = wrap_and_call(
- "cloudinit.cmd.status",
+ M_NAME,
{
"os.path.exists": {"side_effect": fakeexists},
"_is_cloudinit_disabled": (
True,
"disabled for some reason",
),
- "Init": {"side_effect": self.init_class},
},
status.handle_status_args,
"ignored",
cmdargs,
)
- self.assertEqual(0, retcode)
- self.assertEqual(
- [os.path.join(self.paths.run_dir, "status.json")], checked_files
- )
+ assert retcode == 0
+ assert checked_files == [
+ os.path.join(config.paths.run_dir, "status.json")
+ ]
expected = dedent(
"""\
status: disabled
@@ -193,246 +194,217 @@ class TestStatus(CiTestCase):
disabled for some reason
"""
)
- self.assertEqual(expected, m_stdout.getvalue())
-
- def test_status_returns_running_on_no_results_json(self):
- """Report running when status.json exists but result.json does not."""
- result_file = self.tmp_path("result.json", self.new_root)
- write_json(self.status_file, {})
- self.assertFalse(
- os.path.exists(result_file), "Unexpected result.json found"
- )
- cmdargs = myargs(long=False, wait=False)
- with mock.patch("sys.stdout", new_callable=StringIO) as m_stdout:
- retcode = wrap_and_call(
- "cloudinit.cmd.status",
- {
- "_is_cloudinit_disabled": (False, ""),
- "Init": {"side_effect": self.init_class},
- },
- status.handle_status_args,
- "ignored",
- cmdargs,
- )
- self.assertEqual(0, retcode)
- self.assertEqual("status: running\n", m_stdout.getvalue())
-
- def test_status_returns_running(self):
- """Report running when status exists with an unfinished stage."""
- ensure_file(self.tmp_path("result.json", self.new_root))
- write_json(
- self.status_file, {"v1": {"init": {"start": 1, "finished": None}}}
- )
- cmdargs = myargs(long=False, wait=False)
- with mock.patch("sys.stdout", new_callable=StringIO) as m_stdout:
- retcode = wrap_and_call(
- "cloudinit.cmd.status",
+ assert m_stdout.getvalue() == expected
+
+ @pytest.mark.parametrize(
+ [
+ "ensured_file",
+ "status_content",
+ "assert_file",
+ "cmdargs",
+ "expected_retcode",
+ "expected_status",
+ ],
+ [
+ # Report running when status.json exists but result.json does not.
+ pytest.param(
+ None,
+ {},
+ lambda config: config.result_file,
+ MyArgs(long=False, wait=False),
+ 0,
+ "status: running\n",
+ id="running_on_no_results_json",
+ ),
+ # Report running when status exists with an unfinished stage.
+ pytest.param(
+ lambda config: config.result_file,
+ {"v1": {"init": {"start": 1, "finished": None}}},
+ None,
+ MyArgs(long=False, wait=False),
+ 0,
+ "status: running\n",
+ id="running",
+ ),
+ # Report done results.json exists no stages are unfinished.
+ pytest.param(
+ lambda config: config.result_file,
{
- "_is_cloudinit_disabled": (False, ""),
- "Init": {"side_effect": self.init_class},
+ "v1": {
+ "stage": None, # No current stage running
+ "datasource": (
+ "DataSourceNoCloud "
+ "[seed=/var/.../seed/nocloud-net]"
+ "[dsmode=net]"
+ ),
+ "blah": {"finished": 123.456},
+ "init": {
+ "errors": [],
+ "start": 124.567,
+ "finished": 125.678,
+ },
+ "init-local": {"start": 123.45, "finished": 123.46},
+ }
},
- status.handle_status_args,
- "ignored",
- cmdargs,
- )
- self.assertEqual(0, retcode)
- self.assertEqual("status: running\n", m_stdout.getvalue())
-
- def test_status_returns_done(self):
- """Report done results.json exists no stages are unfinished."""
- ensure_file(self.tmp_path("result.json", self.new_root))
- write_json(
- self.status_file,
- {
- "v1": {
- "stage": None, # No current stage running
- "datasource": (
- "DataSourceNoCloud [seed=/var/.../seed/nocloud-net]"
- "[dsmode=net]"
- ),
- "blah": {"finished": 123.456},
- "init": {
- "errors": [],
- "start": 124.567,
- "finished": 125.678,
- },
- "init-local": {"start": 123.45, "finished": 123.46},
- }
- },
- )
- cmdargs = myargs(long=False, wait=False)
- with mock.patch("sys.stdout", new_callable=StringIO) as m_stdout:
- retcode = wrap_and_call(
- "cloudinit.cmd.status",
+ None,
+ MyArgs(long=False, wait=False),
+ 0,
+ "status: done\n",
+ id="done",
+ ),
+ # Long format of done status includes datasource info.
+ pytest.param(
+ lambda config: config.result_file,
{
- "_is_cloudinit_disabled": (False, ""),
- "Init": {"side_effect": self.init_class},
+ "v1": {
+ "stage": None,
+ "datasource": (
+ "DataSourceNoCloud "
+ "[seed=/var/.../seed/nocloud-net]"
+ "[dsmode=net]"
+ ),
+ "init": {"start": 124.567, "finished": 125.678},
+ "init-local": {"start": 123.45, "finished": 123.46},
+ }
},
- status.handle_status_args,
- "ignored",
- cmdargs,
- )
- self.assertEqual(0, retcode)
- self.assertEqual("status: done\n", m_stdout.getvalue())
-
- def test_status_returns_done_long(self):
- """Long format of done status includes datasource info."""
- ensure_file(self.tmp_path("result.json", self.new_root))
- write_json(
- self.status_file,
- {
- "v1": {
- "stage": None,
- "datasource": (
- "DataSourceNoCloud [seed=/var/.../seed/nocloud-net]"
- "[dsmode=net]"
- ),
- "init": {"start": 124.567, "finished": 125.678},
- "init-local": {"start": 123.45, "finished": 123.46},
- }
- },
- )
- cmdargs = myargs(long=True, wait=False)
- with mock.patch("sys.stdout", new_callable=StringIO) as m_stdout:
- retcode = wrap_and_call(
- "cloudinit.cmd.status",
+ None,
+ MyArgs(long=True, wait=False),
+ 0,
+ dedent(
+ """\
+ status: done
+ time: Thu, 01 Jan 1970 00:02:05 +0000
+ detail:
+ DataSourceNoCloud [seed=/var/.../seed/nocloud-net]\
+[dsmode=net]
+ """
+ ),
+ id="returns_done_long",
+ ),
+ # Reports error when any stage has errors.
+ pytest.param(
+ None,
{
- "_is_cloudinit_disabled": (False, ""),
- "Init": {"side_effect": self.init_class},
+ "v1": {
+ "stage": None,
+ "blah": {"errors": [], "finished": 123.456},
+ "init": {
+ "errors": ["error1"],
+ "start": 124.567,
+ "finished": 125.678,
+ },
+ "init-local": {"start": 123.45, "finished": 123.46},
+ }
},
- status.handle_status_args,
- "ignored",
- cmdargs,
- )
- self.assertEqual(0, retcode)
- expected = dedent(
- """\
- status: done
- time: Thu, 01 Jan 1970 00:02:05 +0000
- detail:
- DataSourceNoCloud [seed=/var/.../seed/nocloud-net][dsmode=net]
- """
- )
- self.assertEqual(expected, m_stdout.getvalue())
-
- def test_status_on_errors(self):
- """Reports error when any stage has errors."""
- write_json(
- self.status_file,
- {
- "v1": {
- "stage": None,
- "blah": {"errors": [], "finished": 123.456},
- "init": {
- "errors": ["error1"],
- "start": 124.567,
- "finished": 125.678,
- },
- "init-local": {"start": 123.45, "finished": 123.46},
- }
- },
- )
- cmdargs = myargs(long=False, wait=False)
- with mock.patch("sys.stdout", new_callable=StringIO) as m_stdout:
- retcode = wrap_and_call(
- "cloudinit.cmd.status",
+ None,
+ MyArgs(long=False, wait=False),
+ 1,
+ "status: error\n",
+ id="on_errors",
+ ),
+ # Long format of error status includes all error messages.
+ pytest.param(
+ None,
{
- "_is_cloudinit_disabled": (False, ""),
- "Init": {"side_effect": self.init_class},
+ "v1": {
+ "stage": None,
+ "datasource": (
+ "DataSourceNoCloud "
+ "[seed=/var/.../seed/nocloud-net]"
+ "[dsmode=net]"
+ ),
+ "init": {
+ "errors": ["error1"],
+ "start": 124.567,
+ "finished": 125.678,
+ },
+ "init-local": {
+ "errors": ["error2", "error3"],
+ "start": 123.45,
+ "finished": 123.46,
+ },
+ }
},
- status.handle_status_args,
- "ignored",
- cmdargs,
- )
- self.assertEqual(1, retcode)
- self.assertEqual("status: error\n", m_stdout.getvalue())
-
- def test_status_on_errors_long(self):
- """Long format of error status includes all error messages."""
- write_json(
- self.status_file,
- {
- "v1": {
- "stage": None,
- "datasource": (
- "DataSourceNoCloud [seed=/var/.../seed/nocloud-net]"
- "[dsmode=net]"
- ),
- "init": {
- "errors": ["error1"],
- "start": 124.567,
- "finished": 125.678,
- },
- "init-local": {
- "errors": ["error2", "error3"],
- "start": 123.45,
- "finished": 123.46,
- },
- }
- },
- )
- cmdargs = myargs(long=True, wait=False)
- with mock.patch("sys.stdout", new_callable=StringIO) as m_stdout:
- retcode = wrap_and_call(
- "cloudinit.cmd.status",
+ None,
+ MyArgs(long=True, wait=False),
+ 1,
+ dedent(
+ """\
+ status: error
+ time: Thu, 01 Jan 1970 00:02:05 +0000
+ detail:
+ error1
+ error2
+ error3
+ """
+ ),
+ id="on_errors_long",
+ ),
+ # Long format reports the stage in which we are running.
+ pytest.param(
+ None,
{
- "_is_cloudinit_disabled": (False, ""),
- "Init": {"side_effect": self.init_class},
+ "v1": {
+ "stage": "init",
+ "init": {"start": 124.456, "finished": None},
+ "init-local": {"start": 123.45, "finished": 123.46},
+ }
},
- status.handle_status_args,
- "ignored",
- cmdargs,
- )
- self.assertEqual(1, retcode)
- expected = dedent(
- """\
- status: error
- time: Thu, 01 Jan 1970 00:02:05 +0000
- detail:
- error1
- error2
- error3
- """
- )
- self.assertEqual(expected, m_stdout.getvalue())
-
- def test_status_returns_running_long_format(self):
- """Long format reports the stage in which we are running."""
+ None,
+ MyArgs(long=True, wait=False),
+ 0,
+ dedent(
+ """\
+ status: running
+ time: Thu, 01 Jan 1970 00:02:04 +0000
+ detail:
+ Running in stage: init
+ """
+ ),
+ id="running_long_format",
+ ),
+ ],
+ )
+ @mock.patch(M_PATH + "read_cfg_paths")
+ def test_status_output(
+ self,
+ m_read_cfg_paths,
+ ensured_file: Optional[Callable],
+ status_content: Dict,
+ assert_file,
+ cmdargs: MyArgs,
+ expected_retcode: int,
+ expected_status: str,
+ config: Config,
+ ):
+ m_read_cfg_paths.return_value = config.paths
+ if ensured_file:
+ ensure_file(ensured_file(config))
write_json(
- self.status_file,
- {
- "v1": {
- "stage": "init",
- "init": {"start": 124.456, "finished": None},
- "init-local": {"start": 123.45, "finished": 123.46},
- }
- },
+ config.status_file,
+ status_content,
)
- cmdargs = myargs(long=True, wait=False)
+ if assert_file:
+ assert not os.path.exists(
+ config.result_file
+ ), f"Unexpected {config.result_file} found"
with mock.patch("sys.stdout", new_callable=StringIO) as m_stdout:
retcode = wrap_and_call(
- "cloudinit.cmd.status",
- {
- "_is_cloudinit_disabled": (False, ""),
- "Init": {"side_effect": self.init_class},
- },
+ M_NAME,
+ {"_is_cloudinit_disabled": (False, "")},
status.handle_status_args,
"ignored",
cmdargs,
)
- self.assertEqual(0, retcode)
- expected = dedent(
- """\
- status: running
- time: Thu, 01 Jan 1970 00:02:04 +0000
- detail:
- Running in stage: init
- """
- )
- self.assertEqual(expected, m_stdout.getvalue())
+ assert retcode == expected_retcode
+ assert m_stdout.getvalue() == expected_status
- def test_status_wait_blocks_until_done(self):
+ @mock.patch(M_PATH + "read_cfg_paths")
+ def test_status_wait_blocks_until_done(
+ self, m_read_cfg_paths, config: Config
+ ):
"""Specifying wait will poll every 1/4 second until done state."""
+ m_read_cfg_paths.return_value = config.paths
running_json = {
"v1": {
"stage": "init",
@@ -448,37 +420,41 @@ class TestStatus(CiTestCase):
}
}
- self.sleep_calls = 0
+ sleep_calls = 0
def fake_sleep(interval):
- self.assertEqual(0.25, interval)
- self.sleep_calls += 1
- if self.sleep_calls == 2:
- write_json(self.status_file, running_json)
- elif self.sleep_calls == 3:
- write_json(self.status_file, done_json)
- result_file = self.tmp_path("result.json", self.new_root)
+ nonlocal sleep_calls
+ assert interval == 0.25
+ sleep_calls += 1
+ if sleep_calls == 2:
+ write_json(config.status_file, running_json)
+ elif sleep_calls == 3:
+ write_json(config.status_file, done_json)
+ result_file = config.result_file
ensure_file(result_file)
- cmdargs = myargs(long=False, wait=True)
+ cmdargs = MyArgs(long=False, wait=True)
with mock.patch("sys.stdout", new_callable=StringIO) as m_stdout:
retcode = wrap_and_call(
- "cloudinit.cmd.status",
+ M_NAME,
{
"sleep": {"side_effect": fake_sleep},
"_is_cloudinit_disabled": (False, ""),
- "Init": {"side_effect": self.init_class},
},
status.handle_status_args,
"ignored",
cmdargs,
)
- self.assertEqual(0, retcode)
- self.assertEqual(4, self.sleep_calls)
- self.assertEqual("....\nstatus: done\n", m_stdout.getvalue())
-
- def test_status_wait_blocks_until_error(self):
+ assert retcode == 0
+ assert sleep_calls == 4
+ assert m_stdout.getvalue() == "....\nstatus: done\n"
+
+ @mock.patch(M_PATH + "read_cfg_paths")
+ def test_status_wait_blocks_until_error(
+ self, m_read_cfg_paths, config: Config
+ ):
"""Specifying wait will poll every 1/4 second until error state."""
+ m_read_cfg_paths.return_value = config.paths
running_json = {
"v1": {
"stage": "init",
@@ -498,51 +474,53 @@ class TestStatus(CiTestCase):
}
}
- self.sleep_calls = 0
+ sleep_calls = 0
def fake_sleep(interval):
- self.assertEqual(0.25, interval)
- self.sleep_calls += 1
- if self.sleep_calls == 2:
- write_json(self.status_file, running_json)
- elif self.sleep_calls == 3:
- write_json(self.status_file, error_json)
-
- cmdargs = myargs(long=False, wait=True)
+ nonlocal sleep_calls
+ assert interval == 0.25
+ sleep_calls += 1
+ if sleep_calls == 2:
+ write_json(config.status_file, running_json)
+ elif sleep_calls == 3:
+ write_json(config.status_file, error_json)
+
+ cmdargs = MyArgs(long=False, wait=True)
with mock.patch("sys.stdout", new_callable=StringIO) as m_stdout:
retcode = wrap_and_call(
- "cloudinit.cmd.status",
+ M_NAME,
{
"sleep": {"side_effect": fake_sleep},
"_is_cloudinit_disabled": (False, ""),
- "Init": {"side_effect": self.init_class},
},
status.handle_status_args,
"ignored",
cmdargs,
)
- self.assertEqual(1, retcode)
- self.assertEqual(4, self.sleep_calls)
- self.assertEqual("....\nstatus: error\n", m_stdout.getvalue())
+ assert retcode == 1
+ assert sleep_calls == 4
+ assert m_stdout.getvalue() == "....\nstatus: error\n"
- def test_status_main(self):
+ @mock.patch(M_PATH + "read_cfg_paths")
+ def test_status_main(self, m_read_cfg_paths, config: Config):
"""status.main can be run as a standalone script."""
+ m_read_cfg_paths.return_value = config.paths
write_json(
- self.status_file, {"v1": {"init": {"start": 1, "finished": None}}}
+ config.status_file,
+ {"v1": {"init": {"start": 1, "finished": None}}},
)
- with self.assertRaises(SystemExit) as context_manager:
+ with pytest.raises(SystemExit) as e:
with mock.patch("sys.stdout", new_callable=StringIO) as m_stdout:
wrap_and_call(
- "cloudinit.cmd.status",
+ M_NAME,
{
"sys.argv": {"new": ["status"]},
"_is_cloudinit_disabled": (False, ""),
- "Init": {"side_effect": self.init_class},
},
status.main,
)
- self.assertEqual(0, context_manager.exception.code)
- self.assertEqual("status: running\n", m_stdout.getvalue())
+ assert e.value.code == 0
+ assert m_stdout.getvalue() == "status: running\n"
# vi: ts=4 expandtab syntax=python
diff --git a/tests/unittests/config/test_apt_key.py b/tests/unittests/config/test_apt_key.py
index 9fcf3039..bbffe7d2 100644
--- a/tests/unittests/config/test_apt_key.py
+++ b/tests/unittests/config/test_apt_key.py
@@ -3,6 +3,7 @@ from unittest import mock
from cloudinit import subp, util
from cloudinit.config import cc_apt_configure
+from cloudinit.subp import SubpResult
TEST_KEY_HUMAN = """
/etc/apt/cloud-init.gpg.d/my_key.gpg
@@ -38,7 +39,7 @@ class TestAptKey:
Class to test apt-key commands
"""
- @mock.patch.object(subp, "subp", return_value=("fakekey", ""))
+ @mock.patch.object(subp, "subp", return_value=SubpResult("fakekey", ""))
@mock.patch.object(util, "write_file")
def _apt_key_add_success_helper(self, directory, *args, hardened=False):
file = cc_apt_configure.apt_key(
diff --git a/tests/unittests/config/test_apt_source_v1.py b/tests/unittests/config/test_apt_source_v1.py
index fbc2bf45..371963b1 100644
--- a/tests/unittests/config/test_apt_source_v1.py
+++ b/tests/unittests/config/test_apt_source_v1.py
@@ -663,7 +663,12 @@ class TestAptSourceConfig(TestCase):
with mock.patch.object(subp, "subp") as mockobj:
cc_apt_configure.handle("test", cfg, self.fakecloud, None, None)
mockobj.assert_called_once_with(
- ["add-apt-repository", "ppa:smoser/cloud-init-test"], target=None
+ [
+ "add-apt-repository",
+ "--no-update",
+ "ppa:smoser/cloud-init-test",
+ ],
+ target=None,
)
# adding ppa should ignore filename (uses add-apt-repository)
@@ -689,15 +694,27 @@ class TestAptSourceConfig(TestCase):
cc_apt_configure.handle("test", cfg, self.fakecloud, None, None)
calls = [
call(
- ["add-apt-repository", "ppa:smoser/cloud-init-test"],
+ [
+ "add-apt-repository",
+ "--no-update",
+ "ppa:smoser/cloud-init-test",
+ ],
target=None,
),
call(
- ["add-apt-repository", "ppa:smoser/cloud-init-test2"],
+ [
+ "add-apt-repository",
+ "--no-update",
+ "ppa:smoser/cloud-init-test2",
+ ],
target=None,
),
call(
- ["add-apt-repository", "ppa:smoser/cloud-init-test3"],
+ [
+ "add-apt-repository",
+ "--no-update",
+ "ppa:smoser/cloud-init-test3",
+ ],
target=None,
),
]
diff --git a/tests/unittests/config/test_apt_source_v3.py b/tests/unittests/config/test_apt_source_v3.py
index 75adc647..8aceff06 100644
--- a/tests/unittests/config/test_apt_source_v3.py
+++ b/tests/unittests/config/test_apt_source_v3.py
@@ -601,7 +601,12 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase):
cfg, TARGET, template_params=params, aa_repo_match=self.matcher
)
mockobj.assert_any_call(
- ["add-apt-repository", "ppa:smoser/cloud-init-test"], target=TARGET
+ [
+ "add-apt-repository",
+ "--no-update",
+ "ppa:smoser/cloud-init-test",
+ ],
+ target=TARGET,
)
# adding ppa should ignore filename (uses add-apt-repository)
@@ -622,15 +627,27 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase):
)
calls = [
call(
- ["add-apt-repository", "ppa:smoser/cloud-init-test"],
+ [
+ "add-apt-repository",
+ "--no-update",
+ "ppa:smoser/cloud-init-test",
+ ],
target=TARGET,
),
call(
- ["add-apt-repository", "ppa:smoser/cloud-init-test2"],
+ [
+ "add-apt-repository",
+ "--no-update",
+ "ppa:smoser/cloud-init-test2",
+ ],
target=TARGET,
),
call(
- ["add-apt-repository", "ppa:smoser/cloud-init-test3"],
+ [
+ "add-apt-repository",
+ "--no-update",
+ "ppa:smoser/cloud-init-test3",
+ ],
target=TARGET,
),
]
diff --git a/tests/unittests/config/test_cc_fan.py b/tests/unittests/config/test_cc_fan.py
new file mode 100644
index 00000000..3d0730d5
--- /dev/null
+++ b/tests/unittests/config/test_cc_fan.py
@@ -0,0 +1,33 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+"""Tests cc_fan module"""
+
+import pytest
+
+from cloudinit.config.schema import (
+ SchemaValidationError,
+ get_schema,
+ validate_cloudconfig_schema,
+)
+from tests.unittests.helpers import skipUnlessJsonSchema
+
+
+class TestFanSchema:
+ @pytest.mark.parametrize(
+ "config, error_msg",
+ (
+ ({"fan": {"config": ["a", "b"]}}, "is not of type 'string'"),
+ (
+ {"fan": {"config_path": "/a/b"}},
+ "'config' is a required property",
+ ),
+ ),
+ )
+ @skipUnlessJsonSchema()
+ def test_schema_validation(self, config, error_msg):
+ """Assert expected schema validation and error messages."""
+ schema = get_schema()
+ if error_msg is None:
+ validate_cloudconfig_schema(config, schema, strict=True)
+ else:
+ with pytest.raises(SchemaValidationError, match=error_msg):
+ validate_cloudconfig_schema(config, schema, strict=True)
diff --git a/tests/unittests/config/test_cc_growpart.py b/tests/unittests/config/test_cc_growpart.py
index ba66f136..24e92c88 100644
--- a/tests/unittests/config/test_cc_growpart.py
+++ b/tests/unittests/config/test_cc_growpart.py
@@ -8,11 +8,19 @@ import shutil
import stat
import unittest
from contextlib import ExitStack
+from itertools import chain
from unittest import mock
+import pytest
+
from cloudinit import cloud, subp, temp_utils
from cloudinit.config import cc_growpart
-from tests.unittests.helpers import TestCase
+from cloudinit.config.schema import (
+ SchemaValidationError,
+ get_schema,
+ validate_cloudconfig_schema,
+)
+from tests.unittests.helpers import TestCase, skipUnlessJsonSchema
# growpart:
# mode: auto # off, on, auto, 'growpart'
@@ -312,8 +320,8 @@ class TestResize(unittest.TestCase):
raise e
return real_stat(path)
+ opinfo = cc_growpart.device_part_info
try:
- opinfo = cc_growpart.device_part_info
cc_growpart.device_part_info = simple_device_part_info
os.stat = mystat
@@ -342,6 +350,233 @@ class TestResize(unittest.TestCase):
os.stat = real_stat
+class TestEncrypted:
+ """Attempt end-to-end scenarios using encrypted devices.
+
+ Things are mocked such that:
+ - "/fake_encrypted" is mounted onto "/dev/mapper/fake"
+ - "/dev/mapper/fake" is a LUKS device and symlinked to /dev/dm-1
+ - The partition backing "/dev/mapper/fake" is "/dev/vdx1"
+ - "/" is not encrypted and mounted onto "/dev/vdz1"
+
+ Note that we don't (yet) support non-encrypted mapped drives, such
+ as LVM volumes. If our mount point is /dev/mapper/*, then we will
+ not resize it if it is not encrypted.
+ """
+
+ def _subp_side_effect(self, value, good=True, **kwargs):
+ if value[0] == "dmsetup":
+ return ("1 dependencies : (vdx1)",)
+ return mock.Mock()
+
+ def _device_part_info_side_effect(self, value):
+ if value.startswith("/dev/mapper/"):
+ raise TypeError(f"{value} not a partition")
+ return (1024, 1024)
+
+ def _devent2dev_side_effect(self, value):
+ if value == "/fake_encrypted":
+ return "/dev/mapper/fake"
+ elif value == "/":
+ return "/dev/vdz"
+ elif value.startswith("/dev"):
+ return value
+ raise Exception(f"unexpected value {value}")
+
+ def _realpath_side_effect(self, value):
+ return "/dev/dm-1" if value.startswith("/dev/mapper") else value
+
+ def assert_resize_and_cleanup(self):
+ all_subp_args = list(
+ chain(*[args[0][0] for args in self.m_subp.call_args_list])
+ )
+ assert "resize" in all_subp_args
+ assert "luksKillSlot" in all_subp_args
+ self.m_unlink.assert_called_once()
+
+ def assert_no_resize_or_cleanup(self):
+ all_subp_args = list(
+ chain(*[args[0][0] for args in self.m_subp.call_args_list])
+ )
+ assert "resize" not in all_subp_args
+ assert "luksKillSlot" not in all_subp_args
+ self.m_unlink.assert_not_called()
+
+ @pytest.fixture
+ def common_mocks(self, mocker):
+ # These are all "happy path" mocks which will get overridden
+ # when needed
+ mocker.patch(
+ "cloudinit.config.cc_growpart.device_part_info",
+ side_effect=self._device_part_info_side_effect,
+ )
+ mocker.patch("os.stat")
+ mocker.patch("stat.S_ISBLK")
+ mocker.patch("stat.S_ISCHR")
+ mocker.patch(
+ "cloudinit.config.cc_growpart.devent2dev",
+ side_effect=self._devent2dev_side_effect,
+ )
+ mocker.patch(
+ "os.path.realpath", side_effect=self._realpath_side_effect
+ )
+ # Only place subp.which is used in cc_growpart is for cryptsetup
+ mocker.patch(
+ "cloudinit.config.cc_growpart.subp.which",
+ return_value="/usr/sbin/cryptsetup",
+ )
+ self.m_subp = mocker.patch(
+ "cloudinit.config.cc_growpart.subp.subp",
+ side_effect=self._subp_side_effect,
+ )
+ mocker.patch(
+ "pathlib.Path.open",
+ new_callable=mock.mock_open,
+ read_data=(
+ '{"key":"XFmCwX2FHIQp0LBWaLEMiHIyfxt1SGm16VvUAVledlY=",'
+ '"slot":5}'
+ ),
+ )
+ mocker.patch("pathlib.Path.exists", return_value=True)
+ self.m_unlink = mocker.patch("pathlib.Path.unlink", autospec=True)
+
+ self.resizer = mock.Mock()
+ self.resizer.resize = mock.Mock(return_value=(1024, 1024))
+
+ def test_resize_when_encrypted(self, common_mocks, caplog):
+ info = cc_growpart.resize_devices(self.resizer, ["/fake_encrypted"])
+ assert len(info) == 2
+ assert info[0][0] == "/dev/vdx1"
+ assert info[0][2].startswith("no change necessary")
+ assert info[1][0] == "/fake_encrypted"
+ assert (
+ info[1][2]
+ == "Successfully resized encrypted volume '/dev/mapper/fake'"
+ )
+ assert (
+ "/dev/mapper/fake is a mapped device pointing to /dev/dm-1"
+ in caplog.text
+ )
+ assert "Determined that /dev/dm-1 is encrypted" in caplog.text
+
+ self.assert_resize_and_cleanup()
+
+ def test_resize_when_unencrypted(self, common_mocks):
+ info = cc_growpart.resize_devices(self.resizer, ["/"])
+ assert len(info) == 1
+ assert info[0][0] == "/"
+ assert "encrypted" not in info[0][2]
+ self.assert_no_resize_or_cleanup()
+
+ def test_encrypted_but_cryptsetup_not_found(
+ self, common_mocks, mocker, caplog
+ ):
+ mocker.patch(
+ "cloudinit.config.cc_growpart.subp.which",
+ return_value=None,
+ )
+ info = cc_growpart.resize_devices(self.resizer, ["/fake_encrypted"])
+
+ assert len(info) == 1
+ assert "skipped as it is not encrypted" in info[0][2]
+ assert "cryptsetup not found" in caplog.text
+ self.assert_no_resize_or_cleanup()
+
+ def test_dmsetup_not_found(self, common_mocks, mocker, caplog):
+ def _subp_side_effect(value, **kwargs):
+ if value[0] == "dmsetup":
+ raise subp.ProcessExecutionError()
+
+ mocker.patch(
+ "cloudinit.config.cc_growpart.subp.subp",
+ side_effect=_subp_side_effect,
+ )
+ info = cc_growpart.resize_devices(self.resizer, ["/fake_encrypted"])
+ assert len(info) == 1
+ assert info[0][0] == "/fake_encrypted"
+ assert info[0][1] == "FAILED"
+ assert (
+ "Resizing encrypted device (/dev/mapper/fake) failed" in info[0][2]
+ )
+ self.assert_no_resize_or_cleanup()
+
+ def test_unparsable_dmsetup(self, common_mocks, mocker, caplog):
+ def _subp_side_effect(value, **kwargs):
+ if value[0] == "dmsetup":
+ return ("2 dependencies",)
+ return mock.Mock()
+
+ mocker.patch(
+ "cloudinit.config.cc_growpart.subp.subp",
+ side_effect=_subp_side_effect,
+ )
+ info = cc_growpart.resize_devices(self.resizer, ["/fake_encrypted"])
+ assert len(info) == 1
+ assert info[0][0] == "/fake_encrypted"
+ assert info[0][1] == "FAILED"
+ assert (
+ "Resizing encrypted device (/dev/mapper/fake) failed" in info[0][2]
+ )
+ self.assert_no_resize_or_cleanup()
+
+ def test_missing_keydata(self, common_mocks, mocker, caplog):
+ # Note that this will be standard behavior after first boot
+ # on a system with an encrypted root partition
+ mocker.patch("pathlib.Path.open", side_effect=FileNotFoundError())
+ info = cc_growpart.resize_devices(self.resizer, ["/fake_encrypted"])
+ assert len(info) == 2
+ assert info[0][0] == "/dev/vdx1"
+ assert info[0][2].startswith("no change necessary")
+ assert info[1][0] == "/fake_encrypted"
+ assert info[1][1] == "FAILED"
+ assert (
+ info[1][2]
+ == "Resizing encrypted device (/dev/mapper/fake) failed: Could "
+ "not load encryption key. This is expected if the volume has "
+ "been previously resized."
+ )
+ self.assert_no_resize_or_cleanup()
+
+ def test_resize_failed(self, common_mocks, mocker, caplog):
+ def _subp_side_effect(value, **kwargs):
+ if value[0] == "dmsetup":
+ return ("1 dependencies : (vdx1)",)
+ elif value[0] == "cryptsetup" and "resize" in value:
+ raise subp.ProcessExecutionError()
+ return mock.Mock()
+
+ self.m_subp = mocker.patch(
+ "cloudinit.config.cc_growpart.subp.subp",
+ side_effect=_subp_side_effect,
+ )
+
+ info = cc_growpart.resize_devices(self.resizer, ["/fake_encrypted"])
+ assert len(info) == 2
+ assert info[0][0] == "/dev/vdx1"
+ assert info[0][2].startswith("no change necessary")
+ assert info[1][0] == "/fake_encrypted"
+ assert info[1][1] == "FAILED"
+ assert (
+ "Resizing encrypted device (/dev/mapper/fake) failed" in info[1][2]
+ )
+ # Assert we still cleanup
+ all_subp_args = list(
+ chain(*[args[0][0] for args in self.m_subp.call_args_list])
+ )
+ assert "luksKillSlot" in all_subp_args
+ self.m_unlink.assert_called_once()
+
+ def test_resize_skipped(self, common_mocks, mocker, caplog):
+ mocker.patch("pathlib.Path.exists", return_value=False)
+ info = cc_growpart.resize_devices(self.resizer, ["/fake_encrypted"])
+ assert len(info) == 2
+ assert info[1] == (
+ "/fake_encrypted",
+ "SKIPPED",
+ "No encryption keyfile found",
+ )
+
+
def simple_device_part_info(devpath):
# simple stupid return (/dev/vda, 1) for /dev/vda
ret = re.search("([^0-9]*)([0-9]*)$", devpath)
@@ -354,4 +589,39 @@ class Bunch(object):
self.__dict__.update(kwds)
-# vi: ts=4 expandtab
+class TestGrowpartSchema:
+ @pytest.mark.parametrize(
+ "config, error_msg",
+ (
+ ({"growpart": {"mode": "off"}}, None),
+ ({"growpart": {"mode": False}}, None),
+ (
+ {"growpart": {"mode": "false"}},
+ "'false' is not one of "
+ r"\[False, 'auto', 'growpart', 'gpart', 'off'\]",
+ ),
+ (
+ {"growpart": {"mode": "a"}},
+ "'a' is not one of "
+ r"\[False, 'auto', 'growpart', 'gpart', 'off'\]",
+ ),
+ ({"growpart": {"devices": "/"}}, "'/' is not of type 'array'"),
+ (
+ {"growpart": {"ignore_growroot_disabled": "off"}},
+ "'off' is not of type 'boolean'",
+ ),
+ (
+ {"growpart": {"a": "b"}},
+ "Additional properties are not allowed",
+ ),
+ ),
+ )
+ @skipUnlessJsonSchema()
+ def test_schema_validation(self, config, error_msg):
+ """Assert expected schema validation and error messages."""
+ schema = get_schema()
+ if error_msg is None:
+ validate_cloudconfig_schema(config, schema, strict=True)
+ else:
+ with pytest.raises(SchemaValidationError, match=error_msg):
+ validate_cloudconfig_schema(config, schema, strict=True)
diff --git a/tests/unittests/config/test_cc_grub_dpkg.py b/tests/unittests/config/test_cc_grub_dpkg.py
index 5151a7b5..9bdc9c74 100644
--- a/tests/unittests/config/test_cc_grub_dpkg.py
+++ b/tests/unittests/config/test_cc_grub_dpkg.py
@@ -6,7 +6,13 @@ from unittest import mock
import pytest
from cloudinit.config.cc_grub_dpkg import fetch_idevs, handle
+from cloudinit.config.schema import (
+ SchemaValidationError,
+ get_schema,
+ validate_cloudconfig_schema,
+)
from cloudinit.subp import ProcessExecutionError
+from tests.unittests.helpers import skipUnlessJsonSchema
class TestFetchIdevs:
@@ -141,7 +147,7 @@ class TestHandle:
(
# idevs set, idevs_empty set
"/dev/vda",
- "false",
+ False,
"/dev/disk/by-id/company-user-1",
(
"Setting grub debconf-set-selections with ",
@@ -152,7 +158,7 @@ class TestHandle:
# idevs set, idevs_empty set
# Respect what the user defines, even if its logically wrong
"/dev/nvme0n1",
- "true",
+ True,
"",
(
"Setting grub debconf-set-selections with ",
@@ -162,14 +168,12 @@ class TestHandle:
],
)
@mock.patch("cloudinit.config.cc_grub_dpkg.fetch_idevs")
- @mock.patch("cloudinit.config.cc_grub_dpkg.util.get_cfg_option_str")
@mock.patch("cloudinit.config.cc_grub_dpkg.util.logexc")
@mock.patch("cloudinit.config.cc_grub_dpkg.subp.subp")
def test_handle(
self,
m_subp,
m_logexc,
- m_get_cfg_str,
m_fetch_idevs,
cfg_idevs,
cfg_idevs_empty,
@@ -177,11 +181,39 @@ class TestHandle:
expected_log_output,
):
"""Test setting of correct debconf database entries"""
- m_get_cfg_str.side_effect = [cfg_idevs, cfg_idevs_empty]
m_fetch_idevs.return_value = fetch_idevs_output
log = mock.Mock(spec=Logger)
- handle(mock.Mock(), mock.Mock(), mock.Mock(), log, mock.Mock())
+ cfg = {"grub_dpkg": {}}
+ if cfg_idevs is not None:
+ cfg["grub_dpkg"]["grub-pc/install_devices"] = cfg_idevs
+ if cfg_idevs_empty is not None:
+ cfg["grub_dpkg"]["grub-pc/install_devices_empty"] = cfg_idevs_empty
+ handle(mock.Mock(), cfg, mock.Mock(), log, mock.Mock())
log.debug.assert_called_with("".join(expected_log_output))
-# vi: ts=4 expandtab
+class TestGrubDpkgSchema:
+ @pytest.mark.parametrize(
+ "config, error_msg",
+ (
+ ({"grub_dpkg": {"grub-pc/install_devices_empty": False}}, None),
+ ({"grub_dpkg": {"grub-pc/install_devices_empty": "off"}}, None),
+ (
+ {"grub_dpkg": {"enabled": "yes"}},
+ "'yes' is not of type 'boolean'",
+ ),
+ (
+ {"grub_dpkg": {"grub-pc/install_devices": ["/dev/sda"]}},
+ r"\['/dev/sda'\] is not of type 'string'",
+ ),
+ ),
+ )
+ @skipUnlessJsonSchema()
+ def test_schema_validation(self, config, error_msg):
+ """Assert expected schema validation and error messages."""
+ schema = get_schema()
+ if error_msg is None:
+ validate_cloudconfig_schema(config, schema, strict=True)
+ else:
+ with pytest.raises(SchemaValidationError, match=error_msg):
+ validate_cloudconfig_schema(config, schema, strict=True)
diff --git a/tests/unittests/config/test_cc_keyboard.py b/tests/unittests/config/test_cc_keyboard.py
new file mode 100644
index 00000000..00fad9ff
--- /dev/null
+++ b/tests/unittests/config/test_cc_keyboard.py
@@ -0,0 +1,77 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+"""Tests cc_keyboard module"""
+
+import re
+
+import pytest
+
+from cloudinit.config.schema import (
+ SchemaValidationError,
+ get_schema,
+ validate_cloudconfig_schema,
+)
+from tests.unittests.helpers import skipUnlessJsonSchema
+
+
+class TestKeyboard:
+ @pytest.mark.parametrize(
+ "config, error_msg",
+ (
+ # Valid schemas
+ ({"keyboard": {"layout": "somestring"}}, None),
+ # Invalid schemas
+ (
+ {"keyboard": {}},
+ "Cloud config schema errors: keyboard: 'layout' is a"
+ " required property",
+ ),
+ (
+ {"keyboard": "bogus"},
+ "Cloud config schema errors: keyboard: 'bogus' is not"
+ " of type 'object'",
+ ),
+ (
+ {"keyboard": {"layout": 1}},
+ "Cloud config schema errors: keyboard.layout: 1 is not"
+ " of type 'string'",
+ ),
+ (
+ {"keyboard": {"layout": "somestr", "model": None}},
+ "Cloud config schema errors: keyboard.model: None is not"
+ " of type 'string'",
+ ),
+ (
+ {"keyboard": {"layout": "somestr", "variant": [1]}},
+ re.escape(
+ "Cloud config schema errors: keyboard.variant: [1] is"
+ " not of type 'string'"
+ ),
+ ),
+ (
+ {"keyboard": {"layout": "somestr", "options": {}}},
+ "Cloud config schema errors: keyboard.options: {} is not"
+ " of type 'string'",
+ ),
+ (
+ {"keyboard": {"layout": "somestr", "extraprop": "somestr"}},
+ re.escape(
+ "Cloud config schema errors: keyboard: Additional"
+ " properties are not allowed ('extraprop' was unexpected)"
+ ),
+ ),
+ ),
+ )
+ @skipUnlessJsonSchema()
+ def test_schema_validation(self, config, error_msg):
+ """Assert expected schema validation and error messages."""
+ # New-style schema $defs exist in config/cloud-init-schema*.json
+ schema = get_schema()
+ if error_msg is None:
+ validate_cloudconfig_schema(config, schema, strict=True)
+ else:
+ with pytest.raises(SchemaValidationError, match=error_msg):
+ validate_cloudconfig_schema(config, schema, strict=True)
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/config/test_cc_keys_to_console.py b/tests/unittests/config/test_cc_keys_to_console.py
index 9efc2b48..61f62e96 100644
--- a/tests/unittests/config/test_cc_keys_to_console.py
+++ b/tests/unittests/config/test_cc_keys_to_console.py
@@ -1,9 +1,16 @@
"""Tests for cc_keys_to_console."""
-from unittest import mock
+
+import re
import pytest
from cloudinit.config import cc_keys_to_console
+from cloudinit.config.schema import (
+ SchemaValidationError,
+ get_schema,
+ validate_cloudconfig_schema,
+)
+from tests.unittests.helpers import mock, skipUnlessJsonSchema
class TestHandle:
@@ -38,3 +45,75 @@ class TestHandle:
cc_keys_to_console.handle("name", cfg, mock.Mock(), mock.Mock(), ())
assert subp_called == (m_subp.call_count == 1)
+
+
+class TestKeysToConsoleSchema:
+ @pytest.mark.parametrize(
+ "config, error_msg",
+ (
+ # Valid schemas are covered by meta examples tests in test_schema
+ # Invalid schemas
+ (
+ {"ssh": {}},
+ "Cloud config schema errors: ssh: 'emit_keys_to_console' is"
+ " a required property",
+ ),
+ ( # Avoid common failure giving a string 'false' instead of false
+ {"ssh": {"emit_keys_to_console": "false"}},
+ "Cloud config schema errors: ssh.emit_keys_to_console: 'false'"
+ " is not of type 'boolean'",
+ ),
+ (
+ {"ssh": {"noextraprop": False, "emit_keys_to_console": False}},
+ re.escape(
+ "Cloud config schema errors: ssh: Additional properties"
+ " are not allowed ('noextraprop' was unexpected)"
+ ),
+ ),
+ ( # Avoid common failure giving a string 'false' instead of false
+ {"ssh": {"emit_keys_to_console": "false"}},
+ "Cloud config schema errors: ssh.emit_keys_to_console: 'false'"
+ " is not of type 'boolean'",
+ ),
+ ( # Avoid common failure giving a string 'false' instead of false
+ {"ssh_key_console_blacklist": False},
+ "Cloud config schema errors: ssh_key_console_blacklist: False"
+ " is not of type 'array'",
+ ),
+ ( # Avoid common failure giving a string 'false' instead of false
+ {"ssh_key_console_blacklist": [1]},
+ "Cloud config schema errors: ssh_key_console_blacklist.0: 1 is"
+ " not of type 'string'",
+ ),
+ ( # Avoid common failure giving a string 'false' instead of false
+ {"ssh_key_console_blacklist": [1]},
+ "Cloud config schema errors: ssh_key_console_blacklist.0: 1 is"
+ " not of type 'string'",
+ ),
+ ( # Avoid common failure giving a string 'false' instead of false
+ {"ssh_fp_console_blacklist": None},
+ "Cloud config schema errors: ssh_fp_console_blacklist: None"
+ " is not of type 'array'",
+ ),
+ ( # Avoid common failure giving a string 'false' instead of false
+ {"ssh_fp_console_blacklist": [1]},
+ "Cloud config schema errors: ssh_fp_console_blacklist.0: 1 is"
+ " not of type 'string'",
+ ),
+ ( # Avoid common failure giving a string 'false' instead of false
+ {"ssh_fp_console_blacklist": [1]},
+ "Cloud config schema errors: ssh_fp_console_blacklist.0: 1 is"
+ " not of type 'string'",
+ ),
+ ),
+ )
+ @skipUnlessJsonSchema()
+ def test_schema_validation(self, config, error_msg):
+ """Assert expected schema validation and error messages."""
+ # New-style schema $defs exist in config/cloud-init-schema*.json
+ schema = get_schema()
+ with pytest.raises(SchemaValidationError, match=error_msg):
+ validate_cloudconfig_schema(config, schema, strict=True)
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/config/test_cc_landscape.py b/tests/unittests/config/test_cc_landscape.py
index efddc1b6..79ea6b0a 100644
--- a/tests/unittests/config/test_cc_landscape.py
+++ b/tests/unittests/config/test_cc_landscape.py
@@ -1,13 +1,20 @@
# This file is part of cloud-init. See LICENSE file for license information.
import logging
+import pytest
from configobj import ConfigObj
from cloudinit import util
from cloudinit.config import cc_landscape
+from cloudinit.config.schema import (
+ SchemaValidationError,
+ get_schema,
+ validate_cloudconfig_schema,
+)
from tests.unittests.helpers import (
FilesystemMockingTestCase,
mock,
+ skipUnlessJsonSchema,
wrap_and_call,
)
from tests.unittests.util import get_cloud
@@ -168,3 +175,30 @@ class TestLandscape(FilesystemMockingTestCase):
"Wrote landscape config file to {0}".format(self.conf),
self.logs.getvalue(),
)
+
+
+class TestLandscapeSchema:
+ @pytest.mark.parametrize(
+ "config, error_msg",
+ [
+ # Allow undocumented keys client keys without error
+ ({"landscape": {"client": {"allow_additional_keys": 1}}}, None),
+ # tags are comma-delimited
+ ({"landscape": {"client": {"tags": "1,2,3"}}}, None),
+ ({"landscape": {"client": {"tags": "1"}}}, None),
+ # Require client key
+ ({"landscape": {}}, "'client' is a required property"),
+ # tags are not whitespace-delimited
+ (
+ {"landscape": {"client": {"tags": "1, 2,3"}}},
+ "'1, 2,3' does not match",
+ ),
+ ],
+ )
+ @skipUnlessJsonSchema()
+ def test_schema_validation(self, config, error_msg):
+ if error_msg is None:
+ validate_cloudconfig_schema(config, get_schema(), strict=True)
+ else:
+ with pytest.raises(SchemaValidationError, match=error_msg):
+ validate_cloudconfig_schema(config, get_schema(), strict=True)
diff --git a/tests/unittests/config/test_cc_locale.py b/tests/unittests/config/test_cc_locale.py
index 7190bc68..d64610b6 100644
--- a/tests/unittests/config/test_cc_locale.py
+++ b/tests/unittests/config/test_cc_locale.py
@@ -8,19 +8,28 @@ import os
import shutil
import tempfile
from io import BytesIO
-from unittest import mock
+import pytest
from configobj import ConfigObj
from cloudinit import util
from cloudinit.config import cc_locale
-from tests.unittests import helpers as t_help
+from cloudinit.config.schema import (
+ SchemaValidationError,
+ get_schema,
+ validate_cloudconfig_schema,
+)
+from tests.unittests.helpers import (
+ FilesystemMockingTestCase,
+ mock,
+ skipUnlessJsonSchema,
+)
from tests.unittests.util import get_cloud
LOG = logging.getLogger(__name__)
-class TestLocale(t_help.FilesystemMockingTestCase):
+class TestLocale(FilesystemMockingTestCase):
def setUp(self):
super(TestLocale, self).setUp()
self.new_root = tempfile.mkdtemp()
@@ -120,4 +129,27 @@ class TestLocale(t_help.FilesystemMockingTestCase):
)
+class TestLocaleSchema:
+ @pytest.mark.parametrize(
+ "config, error_msg",
+ (
+ # Valid schemas tested via meta['examples'] in test_schema.py
+ # Invalid schemas
+ ({"locale": 1}, "locale: 1 is not of type 'string'"),
+ (
+ {"locale_configfile": 1},
+ "locale_configfile: 1 is not of type 'string'",
+ ),
+ ),
+ )
+ @skipUnlessJsonSchema()
+ def test_schema_validation(self, config, error_msg):
+ schema = get_schema()
+ if error_msg is None:
+ validate_cloudconfig_schema(config, schema, strict=True)
+ else:
+ with pytest.raises(SchemaValidationError, match=error_msg):
+ validate_cloudconfig_schema(config, schema, strict=True)
+
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/config/test_cc_lxd.py b/tests/unittests/config/test_cc_lxd.py
index 720274d6..3b444127 100644
--- a/tests/unittests/config/test_cc_lxd.py
+++ b/tests/unittests/config/test_cc_lxd.py
@@ -1,7 +1,15 @@
# This file is part of cloud-init. See LICENSE file for license information.
+import re
from unittest import mock
+import pytest
+
from cloudinit.config import cc_lxd
+from cloudinit.config.schema import (
+ SchemaValidationError,
+ get_schema,
+ validate_cloudconfig_schema,
+)
from tests.unittests import helpers as t_help
from tests.unittests.util import get_cloud
@@ -269,4 +277,27 @@ class TestLxdMaybeCleanupDefault(t_help.CiTestCase):
)
+class TestLXDSchema:
+ @pytest.mark.parametrize(
+ "config, error_msg",
+ [
+ # Only allow init and bridge keys
+ ({"lxd": {"bridgeo": 1}}, "Additional properties are not allowed"),
+ # Only allow init.storage_backend values zfs and dir
+ (
+ {"lxd": {"init": {"storage_backend": "1zfs"}}},
+ re.escape("not one of ['zfs', 'dir']"),
+ ),
+ # Require bridge.mode
+ ({"lxd": {"bridge": {}}}, "bridge: 'mode' is a required property"),
+ # Require init or bridge keys
+ ({"lxd": {}}, "does not have enough properties"),
+ ],
+ )
+ @t_help.skipUnlessJsonSchema()
+ def test_schema_validation(self, config, error_msg):
+ with pytest.raises(SchemaValidationError, match=error_msg):
+ validate_cloudconfig_schema(config, get_schema(), strict=True)
+
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/config/test_cc_mcollective.py b/tests/unittests/config/test_cc_mcollective.py
index 5cbdeb76..aa726dd3 100644
--- a/tests/unittests/config/test_cc_mcollective.py
+++ b/tests/unittests/config/test_cc_mcollective.py
@@ -6,9 +6,15 @@ import tempfile
from io import BytesIO
import configobj
+import pytest
from cloudinit import util
from cloudinit.config import cc_mcollective
+from cloudinit.config.schema import (
+ SchemaValidationError,
+ get_schema,
+ validate_cloudconfig_schema,
+)
from tests.unittests import helpers as t_help
from tests.unittests.util import get_cloud
@@ -155,4 +161,30 @@ class TestHandler(t_help.TestCase):
)
+class TestMcollectiveSchema:
+ @pytest.mark.parametrize(
+ "config, error_msg",
+ [
+ # Disallow undocumented keys client 'mcollective' without error
+ (
+ {"mcollective": {"customkey": True}},
+ "mcollective: Additional properties are not allowed",
+ ),
+ # Allow undocumented keys client keys below 'conf' without error
+ ({"mcollective": {"conf": {"customkey": 1}}}, None),
+ (
+ {"mcollective": {"conf": {"public-cert": 1}}},
+ "mcollective.conf.public-cert: 1 is not of type 'string'",
+ ),
+ ],
+ )
+ @t_help.skipUnlessJsonSchema()
+ def test_schema_validation(self, config, error_msg):
+ if error_msg is None:
+ validate_cloudconfig_schema(config, get_schema(), strict=True)
+ else:
+ with pytest.raises(SchemaValidationError, match=error_msg):
+ validate_cloudconfig_schema(config, get_schema(), strict=True)
+
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/config/test_cc_mounts.py b/tests/unittests/config/test_cc_mounts.py
index 084faacd..8ae28099 100644
--- a/tests/unittests/config/test_cc_mounts.py
+++ b/tests/unittests/config/test_cc_mounts.py
@@ -1,12 +1,18 @@
# This file is part of cloud-init. See LICENSE file for license information.
import os.path
+import re
from unittest import mock
import pytest
from cloudinit.config import cc_mounts
from cloudinit.config.cc_mounts import create_swapfile
+from cloudinit.config.schema import (
+ SchemaValidationError,
+ get_schema,
+ validate_cloudconfig_schema,
+)
from cloudinit.subp import ProcessExecutionError
from tests.unittests import helpers as test_helpers
@@ -519,4 +525,56 @@ class TestCreateSwapfile:
assert msg in caplog.text
+class TestMountsSchema:
+ @pytest.mark.parametrize(
+ "config, error_msg",
+ [
+ # We expect to see one mount if provided in user-data.
+ ({"mounts": []}, re.escape("mounts: [] is too short")),
+ # Disallow less than 1 item per mount entry
+ ({"mounts": [[]]}, re.escape("mounts.0: [] is too short")),
+ # Disallow more than 6 items per mount entry
+ ({"mounts": [["1"] * 7]}, "mounts.0:.* is too long"),
+ # Disallow mount_default_fields will anything other than 6 items
+ (
+ {"mount_default_fields": ["1"] * 5},
+ "mount_default_fields:.* is too short",
+ ),
+ (
+ {"mount_default_fields": ["1"] * 7},
+ "mount_default_fields:.* is too long",
+ ),
+ (
+ {"swap": {"invalidprop": True}},
+ re.escape(
+ "Additional properties are not allowed ('invalidprop'"
+ ),
+ ),
+ # Swap size/maxsize positive test cases
+ ({"swap": {"size": ".5T", "maxsize": ".5T"}}, None),
+ ({"swap": {"size": "1G", "maxsize": "1G"}}, None),
+ ({"swap": {"size": "200K", "maxsize": "200K"}}, None),
+ ({"swap": {"size": "10485760B", "maxsize": "10485760B"}}, None),
+ # Swap size/maxsize negative test cases
+ ({"swap": {"size": "1.5MB"}}, "swap.size:"),
+ (
+ {"swap": {"maxsize": "1.5MT"}},
+ "swap.maxsize: '1.5MT' is not valid",
+ ),
+ (
+ {"swap": {"maxsize": "..5T"}},
+ "swap.maxsize: '..5T' is not valid",
+ ),
+ ({"swap": {"size": "K"}}, "swap.size: 'K' is not valid"),
+ ],
+ )
+ @test_helpers.skipUnlessJsonSchema()
+ def test_schema_validation(self, config, error_msg):
+ if error_msg is None:
+ validate_cloudconfig_schema(config, get_schema(), strict=True)
+ else:
+ with pytest.raises(SchemaValidationError, match=error_msg):
+ validate_cloudconfig_schema(config, get_schema(), strict=True)
+
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/config/test_cc_ntp.py b/tests/unittests/config/test_cc_ntp.py
index fba141aa..c2bce2a3 100644
--- a/tests/unittests/config/test_cc_ntp.py
+++ b/tests/unittests/config/test_cc_ntp.py
@@ -1,12 +1,20 @@
# This file is part of cloud-init. See LICENSE file for license information.
import copy
import os
+import re
import shutil
from functools import partial
from os.path import dirname
+import pytest
+
from cloudinit import helpers, util
from cloudinit.config import cc_ntp
+from cloudinit.config.schema import (
+ SchemaValidationError,
+ get_schema,
+ validate_cloudconfig_schema,
+)
from tests.unittests.helpers import (
CiTestCase,
FilesystemMockingTestCase,
@@ -389,119 +397,6 @@ class TestNtp(FilesystemMockingTestCase):
"Invalid cloud-config provided:", self.logs.getvalue()
)
- @skipUnlessJsonSchema()
- @mock.patch("cloudinit.config.cc_ntp.select_ntp_client")
- def test_ntp_handler_schema_validation_warns_non_string_item_type(
- self, m_sel
- ):
- """Ntp schema validation warns of non-strings in pools or servers.
-
- Schema validation is not strict, so ntp config is still be rendered.
- """
- invalid_config = {"ntp": {"pools": [123], "servers": ["valid", None]}}
- for distro in cc_ntp.distros:
- mycloud = self._get_cloud(distro)
- ntpconfig = self._mock_ntp_client_config(distro=distro)
- confpath = ntpconfig["confpath"]
- m_sel.return_value = ntpconfig
- cc_ntp.handle("cc_ntp", invalid_config, mycloud, None, [])
- self.assertIn(
- "Invalid cloud-config provided:\nntp.pools.0: 123 is not of"
- " type 'string'\nntp.servers.1: None is not of type 'string'",
- self.logs.getvalue(),
- )
- self.assertEqual(
- "servers ['valid', None]\npools [123]\n",
- util.load_file(confpath),
- )
-
- @skipUnlessJsonSchema()
- @mock.patch("cloudinit.config.cc_ntp.select_ntp_client")
- def test_ntp_handler_schema_validation_warns_of_non_array_type(
- self, m_select
- ):
- """Ntp schema validation warns of non-array pools or servers types.
-
- Schema validation is not strict, so ntp config is still be rendered.
- """
- invalid_config = {"ntp": {"pools": 123, "servers": "non-array"}}
-
- for distro in cc_ntp.distros:
- mycloud = self._get_cloud(distro)
- ntpconfig = self._mock_ntp_client_config(distro=distro)
- confpath = ntpconfig["confpath"]
- m_select.return_value = ntpconfig
- cc_ntp.handle("cc_ntp", invalid_config, mycloud, None, [])
- self.assertIn(
- "Invalid cloud-config provided:\nntp.pools: 123 is not of type"
- " 'array'\nntp.servers: 'non-array' is not of type 'array'",
- self.logs.getvalue(),
- )
- self.assertEqual(
- "servers non-array\npools 123\n", util.load_file(confpath)
- )
-
- @skipUnlessJsonSchema()
- @mock.patch("cloudinit.config.cc_ntp.select_ntp_client")
- def test_ntp_handler_schema_validation_warns_invalid_key_present(
- self, m_select
- ):
- """Ntp schema validation warns of invalid keys present in ntp config.
-
- Schema validation is not strict, so ntp config is still be rendered.
- """
- invalid_config = {
- "ntp": {"invalidkey": 1, "pools": ["0.mycompany.pool.ntp.org"]}
- }
- for distro in cc_ntp.distros:
- if distro != "alpine":
- mycloud = self._get_cloud(distro)
- ntpconfig = self._mock_ntp_client_config(distro=distro)
- confpath = ntpconfig["confpath"]
- m_select.return_value = ntpconfig
- cc_ntp.handle("cc_ntp", invalid_config, mycloud, None, [])
- self.assertIn(
- "Invalid cloud-config provided:\nntp: Additional"
- " properties are not allowed ('invalidkey' was"
- " unexpected)",
- self.logs.getvalue(),
- )
- self.assertEqual(
- "servers []\npools ['0.mycompany.pool.ntp.org']\n",
- util.load_file(confpath),
- )
-
- @skipUnlessJsonSchema()
- @mock.patch("cloudinit.config.cc_ntp.select_ntp_client")
- def test_ntp_handler_schema_validation_warns_of_duplicates(self, m_select):
- """Ntp schema validation warns of duplicates in servers or pools.
-
- Schema validation is not strict, so ntp config is still be rendered.
- """
- invalid_config = {
- "ntp": {
- "pools": ["0.mypool.org", "0.mypool.org"],
- "servers": ["10.0.0.1", "10.0.0.1"],
- }
- }
- for distro in cc_ntp.distros:
- mycloud = self._get_cloud(distro)
- ntpconfig = self._mock_ntp_client_config(distro=distro)
- confpath = ntpconfig["confpath"]
- m_select.return_value = ntpconfig
- cc_ntp.handle("cc_ntp", invalid_config, mycloud, None, [])
- self.assertIn(
- "Invalid cloud-config provided:\nntp.pools: ['0.mypool.org',"
- " '0.mypool.org'] has non-unique elements\nntp.servers: "
- "['10.0.0.1', '10.0.0.1'] has non-unique elements",
- self.logs.getvalue(),
- )
- self.assertEqual(
- "servers ['10.0.0.1', '10.0.0.1']\n"
- "pools ['0.mypool.org', '0.mypool.org']\n",
- util.load_file(confpath),
- )
-
@mock.patch("cloudinit.config.cc_ntp.select_ntp_client")
def test_ntp_handler_timesyncd(self, m_select):
"""Test ntp handler configures timesyncd"""
@@ -867,4 +762,59 @@ class TestSupplementalSchemaValidation(CiTestCase):
self.assertIn(error, error_msg)
+class TestNTPSchema:
+ @pytest.mark.parametrize(
+ "config, error_msg",
+ (
+ # Allow empty ntp config
+ ({"ntp": None}, None),
+ (
+ {
+ "ntp": {
+ "invalidkey": 1,
+ "pools": ["0.mycompany.pool.ntp.org"],
+ }
+ },
+ re.escape(
+ "ntp: Additional properties are not allowed ('invalidkey'"
+ ),
+ ),
+ (
+ {
+ "ntp": {
+ "pools": ["0.mypool.org", "0.mypool.org"],
+ "servers": ["10.0.0.1", "10.0.0.1"],
+ }
+ },
+ re.escape(
+ "ntp.pools: ['0.mypool.org', '0.mypool.org'] has"
+ " non-unique elements"
+ ),
+ ),
+ (
+ {
+ "ntp": {
+ "pools": [123],
+ "servers": ["www.example.com", None],
+ }
+ },
+ "ntp.pools.0: 123 is not of type 'string'.*"
+ "ntp.servers.1: None is not of type 'string'",
+ ),
+ (
+ {"ntp": {"pools": 123, "servers": "non-array"}},
+ "ntp.pools: 123 is not of type 'array'.*"
+ "ntp.servers: 'non-array' is not of type 'array'",
+ ),
+ ),
+ )
+ @skipUnlessJsonSchema()
+ def test_schema_validation(self, config, error_msg):
+ if error_msg is None:
+ validate_cloudconfig_schema(config, get_schema(), strict=True)
+ else:
+ with pytest.raises(SchemaValidationError, match=error_msg):
+ validate_cloudconfig_schema(config, get_schema(), strict=True)
+
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/config/test_cc_package_update_upgrade_install.py b/tests/unittests/config/test_cc_package_update_upgrade_install.py
new file mode 100644
index 00000000..1bdddfcc
--- /dev/null
+++ b/tests/unittests/config/test_cc_package_update_upgrade_install.py
@@ -0,0 +1,26 @@
+import pytest
+
+from cloudinit.config.schema import (
+ SchemaValidationError,
+ get_schema,
+ validate_cloudconfig_schema,
+)
+from tests.unittests.helpers import skipUnlessJsonSchema
+
+
+class TestPackageUpdateUpgradeSchema:
+ @pytest.mark.parametrize(
+ "config, error_msg",
+ [
+ # packages list with single entry (2 required)
+ ({"packages": ["p1", ["p2"]]}, ""),
+ # packages list with three entries (2 required)
+ ({"packages": ["p1", ["p2", "p3", "p4"]]}, ""),
+ # empty packages list
+ ({"packages": []}, "is too short"),
+ ],
+ )
+ @skipUnlessJsonSchema()
+ def test_schema_validation(self, config, error_msg):
+ with pytest.raises(SchemaValidationError, match=error_msg):
+ validate_cloudconfig_schema(config, get_schema(), strict=True)
diff --git a/tests/unittests/config/test_cc_phone_home.py b/tests/unittests/config/test_cc_phone_home.py
new file mode 100644
index 00000000..7264dda1
--- /dev/null
+++ b/tests/unittests/config/test_cc_phone_home.py
@@ -0,0 +1,26 @@
+import pytest
+
+from cloudinit.config.schema import (
+ SchemaValidationError,
+ get_schema,
+ validate_cloudconfig_schema,
+)
+from tests.unittests.helpers import skipUnlessJsonSchema
+
+
+class TestPhoneHomeSchema:
+ @pytest.mark.parametrize(
+ "config",
+ [
+ # phone_home definition with url
+ {"phone_home": {"post": ["pub_key_dsa"]}},
+ # post using string other than "all"
+ {"phone_home": {"url": "test_url", "post": "pub_key_dsa"}},
+ # post using list with misspelled entry
+ {"phone_home": {"url": "test_url", "post": ["pub_kye_dsa"]}},
+ ],
+ )
+ @skipUnlessJsonSchema()
+ def test_schema_validation(self, config):
+ with pytest.raises(SchemaValidationError):
+ validate_cloudconfig_schema(config, get_schema(), strict=True)
diff --git a/tests/unittests/config/test_cc_power_state_change.py b/tests/unittests/config/test_cc_power_state_change.py
index 47eb0d58..cdd36fe0 100644
--- a/tests/unittests/config/test_cc_power_state_change.py
+++ b/tests/unittests/config/test_cc_power_state_change.py
@@ -2,10 +2,17 @@
import sys
+import pytest
+
from cloudinit import distros, helpers
from cloudinit.config import cc_power_state_change as psc
+from cloudinit.config.schema import (
+ SchemaValidationError,
+ get_schema,
+ validate_cloudconfig_schema,
+)
from tests.unittests import helpers as t_help
-from tests.unittests.helpers import mock
+from tests.unittests.helpers import mock, skipUnlessJsonSchema
class TestLoadPowerState(t_help.TestCase):
@@ -156,4 +163,40 @@ def check_lps_ret(psc_return, mode=None):
raise Exception("\n".join(lines))
+class TestPowerStateChangeSchema:
+ @pytest.mark.parametrize(
+ "config, error_msg",
+ [
+ # Invalid mode
+ (
+ {"power_state": {"mode": "test"}},
+ r"'test' is not one of \['poweroff', 'reboot', 'halt'\]",
+ ),
+ # Delay can be a number, a +number, or "now"
+ ({"power_state": {"mode": "halt", "delay": "5"}}, None),
+ ({"power_state": {"mode": "halt", "delay": "now"}}, None),
+ ({"power_state": {"mode": "halt", "delay": "+5"}}, None),
+ ({"power_state": {"mode": "halt", "delay": "+"}}, ""),
+ ({"power_state": {"mode": "halt", "delay": "++5"}}, ""),
+ ({"power_state": {"mode": "halt", "delay": "-5"}}, ""),
+ ({"power_state": {"mode": "halt", "delay": "test"}}, ""),
+ # Condition
+ ({"power_state": {"mode": "halt", "condition": False}}, None),
+ ({"power_state": {"mode": "halt", "condition": "ls /tmp"}}, None),
+ (
+ {"power_state": {"mode": "halt", "condition": ["ls", "/tmp"]}},
+ None,
+ ),
+ ({"power_state": {"mode": "halt", "condition": 5}}, ""),
+ ],
+ )
+ @skipUnlessJsonSchema()
+ def test_schema_validation(self, config, error_msg):
+ if error_msg is None:
+ validate_cloudconfig_schema(config, get_schema(), strict=True)
+ else:
+ with pytest.raises(SchemaValidationError, match=error_msg):
+ validate_cloudconfig_schema(config, get_schema(), strict=True)
+
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/config/test_cc_puppet.py b/tests/unittests/config/test_cc_puppet.py
index 2c4481da..72e031ba 100644
--- a/tests/unittests/config/test_cc_puppet.py
+++ b/tests/unittests/config/test_cc_puppet.py
@@ -2,18 +2,33 @@
import logging
import textwrap
+import pytest
+
from cloudinit import util
from cloudinit.config import cc_puppet
-from tests.unittests.helpers import CiTestCase, HttprettyTestCase, mock
+from cloudinit.config.schema import (
+ SchemaValidationError,
+ get_schema,
+ validate_cloudconfig_schema,
+)
+from tests.unittests.helpers import (
+ CiTestCase,
+ HttprettyTestCase,
+ mock,
+ skipUnlessJsonSchema,
+)
from tests.unittests.util import get_cloud
LOG = logging.getLogger(__name__)
+@mock.patch("cloudinit.config.cc_puppet.subp.which")
@mock.patch("cloudinit.config.cc_puppet.subp.subp")
@mock.patch("cloudinit.config.cc_puppet.os")
class TestAutostartPuppet(CiTestCase):
- def test_wb_autostart_puppet_updates_puppet_default(self, m_os, m_subp):
+ def test_wb_autostart_puppet_updates_puppet_default(
+ self, m_os, m_subp, m_subpw
+ ):
"""Update /etc/default/puppet to autostart if it exists."""
def _fake_exists(path):
@@ -37,27 +52,28 @@ class TestAutostartPuppet(CiTestCase):
m_subp.call_args_list,
)
- def test_wb_autostart_pupppet_enables_puppet_systemctl(self, m_os, m_subp):
+ def test_wb_autostart_pupppet_enables_puppet_systemctl(
+ self, m_os, m_subp, m_subpw
+ ):
"""If systemctl is present, enable puppet via systemctl."""
- def _fake_exists(path):
- return path == "/bin/systemctl"
-
- m_os.path.exists.side_effect = _fake_exists
+ m_os.path.exists.return_value = False
+ m_subpw.return_value = "/usr/bin/systemctl"
cc_puppet._autostart_puppet(LOG)
expected_calls = [
- mock.call(
- ["/bin/systemctl", "enable", "puppet.service"], capture=False
- )
+ mock.call(["systemctl", "enable", "puppet.service"], capture=False)
]
self.assertEqual(expected_calls, m_subp.call_args_list)
- def test_wb_autostart_pupppet_enables_puppet_chkconfig(self, m_os, m_subp):
+ def test_wb_autostart_pupppet_enables_puppet_chkconfig(
+ self, m_os, m_subp, m_subpw
+ ):
"""If chkconfig is present, enable puppet via checkcfg."""
def _fake_exists(path):
return path == "/sbin/chkconfig"
+ m_subpw.return_value = None
m_os.path.exists.side_effect = _fake_exists
cc_puppet._autostart_puppet(LOG)
expected_calls = [
@@ -448,3 +464,74 @@ class TestInstallPuppetAio(HttprettyTestCase):
self.assertEqual(
[mock.call([mock.ANY], capture=False)], m_subp.call_args_list
)
+
+
+class TestPuppetSchema:
+ @pytest.mark.parametrize(
+ "config, error_msg",
+ [
+ # Some validity checks
+ ({"puppet": {"conf": {"main": {"key": "val"}}}}, None),
+ ({"puppet": {"conf": {"server": {"key": "val"}}}}, None),
+ ({"puppet": {"conf": {"agent": {"key": "val"}}}}, None),
+ ({"puppet": {"conf": {"user": {"key": "val"}}}}, None),
+ ({"puppet": {"conf": {"main": {}}}}, None),
+ (
+ {
+ "puppet": {
+ "conf": {
+ "agent": {
+ "server": "val",
+ "certname": "val",
+ }
+ }
+ }
+ },
+ None,
+ ),
+ (
+ {
+ "puppet": {
+ "conf": {
+ "main": {"key": "val"},
+ "server": {"key": "val"},
+ "agent": {"key": "val"},
+ "user": {"key": "val"},
+ "ca_cert": "val",
+ }
+ }
+ },
+ None,
+ ),
+ (
+ {
+ "puppet": {
+ "csr_attributes": {
+ "custom_attributes": {"key": "val"},
+ "extension_requests": {"key": "val"},
+ },
+ }
+ },
+ None,
+ ),
+ # Invalid package
+ (
+ {"puppet": {"install_type": "package"}},
+ r"'package' is not one of \['packages', 'aio'\]",
+ ),
+ # Additional key in "conf"
+ ({"puppet": {"conf": {"test": {}}}}, "'test' was unexpected"),
+ # Additional key in "csr_attributes"
+ (
+ {"puppet": {"csr_attributes": {"test": {}}}},
+ "'test' was unexpected",
+ ),
+ ],
+ )
+ @skipUnlessJsonSchema()
+ def test_schema_validation(self, config, error_msg):
+ if error_msg is None:
+ validate_cloudconfig_schema(config, get_schema(), strict=True)
+ else:
+ with pytest.raises(SchemaValidationError, match=error_msg):
+ validate_cloudconfig_schema(config, get_schema(), strict=True)
diff --git a/tests/unittests/config/test_cc_resizefs.py b/tests/unittests/config/test_cc_resizefs.py
index 9981dcea..44659f7d 100644
--- a/tests/unittests/config/test_cc_resizefs.py
+++ b/tests/unittests/config/test_cc_resizefs.py
@@ -3,6 +3,8 @@
import logging
from collections import namedtuple
+import pytest
+
from cloudinit.config.cc_resizefs import (
_resize_btrfs,
_resize_ext,
@@ -13,6 +15,11 @@ from cloudinit.config.cc_resizefs import (
handle,
maybe_get_writable_device_path,
)
+from cloudinit.config.schema import (
+ SchemaValidationError,
+ get_schema,
+ validate_cloudconfig_schema,
+)
from cloudinit.subp import ProcessExecutionError
from tests.unittests.helpers import (
CiTestCase,
@@ -82,25 +89,6 @@ class TestResizefs(CiTestCase):
self.logs.getvalue(),
)
- @skipUnlessJsonSchema()
- def test_handle_schema_validation_logs_invalid_resize_rootfs_value(self):
- """The handle reports json schema violations as a warning.
-
- Invalid values for resize_rootfs result in disabling the module.
- """
- cfg = {"resize_rootfs": "junk"}
- handle("cc_resizefs", cfg, _cloud=None, log=LOG, args=[])
- logs = self.logs.getvalue()
- self.assertIn(
- "WARNING: Invalid cloud-config provided:\nresize_rootfs: 'junk' is"
- " not one of [True, False, 'noblock']",
- logs,
- )
- self.assertIn(
- "DEBUG: Skipping module named cc_resizefs, resizing disabled\n",
- logs,
- )
-
@mock.patch("cloudinit.config.cc_resizefs.util.get_mount_info")
def test_handle_warns_on_unknown_mount_info(self, m_get_mount_info):
"""handle warns when get_mount_info sees unknown filesystem for /."""
@@ -487,4 +475,24 @@ class TestMaybeGetDevicePathAsWritableBlock(CiTestCase):
self.assertEqual("gpt/system", devpth)
+class TestResizefsSchema:
+ @pytest.mark.parametrize(
+ "config, error_msg",
+ [
+ ({"resize_rootfs": True}, None),
+ (
+ {"resize_rootfs": "wrong"},
+ r"'wrong' is not one of \[True, False, 'noblock'\]",
+ ),
+ ],
+ )
+ @skipUnlessJsonSchema()
+ def test_schema_validation(self, config, error_msg):
+ if error_msg is None:
+ validate_cloudconfig_schema(config, get_schema(), strict=True)
+ else:
+ with pytest.raises(SchemaValidationError, match=error_msg):
+ validate_cloudconfig_schema(config, get_schema(), strict=True)
+
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/config/test_cc_resolv_conf.py b/tests/unittests/config/test_cc_resolv_conf.py
index 8896a4e8..4ae9b3f3 100644
--- a/tests/unittests/config/test_cc_resolv_conf.py
+++ b/tests/unittests/config/test_cc_resolv_conf.py
@@ -12,7 +12,16 @@ import pytest
from cloudinit import cloud, distros, helpers, util
from cloudinit.config import cc_resolv_conf
from cloudinit.config.cc_resolv_conf import generate_resolv_conf
-from tests.unittests import helpers as t_help
+from cloudinit.config.schema import (
+ SchemaValidationError,
+ get_schema,
+ validate_cloudconfig_schema,
+)
+from tests.unittests.helpers import (
+ FilesystemMockingTestCase,
+ cloud_init_project_dir,
+ skipUnlessJsonSchema,
+)
from tests.unittests.util import MockDistro
LOG = logging.getLogger(__name__)
@@ -24,7 +33,7 @@ EXPECTED_HEADER = """\
#\n\n"""
-class TestResolvConf(t_help.FilesystemMockingTestCase):
+class TestResolvConf(FilesystemMockingTestCase):
with_logs = True
cfg = {"manage_resolv_conf": True, "resolv_conf": {}}
@@ -117,7 +126,7 @@ class TestResolvConf(t_help.FilesystemMockingTestCase):
class TestGenerateResolvConf:
dist = MockDistro()
- tmpl_fn = t_help.cloud_init_project_dir("templates/resolv.conf.tmpl")
+ tmpl_fn = cloud_init_project_dir("templates/resolv.conf.tmpl")
@mock.patch("cloudinit.config.cc_resolv_conf.templater.render_to_file")
def test_dist_resolv_conf_fn(self, m_render_to_file):
@@ -194,4 +203,64 @@ class TestGenerateResolvConf:
] == m_write_file.call_args_list
+class TestResolvConfSchema:
+ @pytest.mark.parametrize(
+ "config, error_msg",
+ [
+ # Valid
+ ({"manage_resolv_conf": False}, None),
+ ({"resolv_conf": {"options": {"any": "thing"}}}, None),
+ # Invalid
+ (
+ {"manage_resolv_conf": "asdf"},
+ "'asdf' is not of type 'boolean'",
+ ),
+ # What may be some common misunderstandings of the template
+ (
+ {"resolv_conf": {"nameserver": ["1.1.1.1"]}},
+ "Additional properties are not allowed",
+ ),
+ (
+ {"resolv_conf": {"nameservers": "1.1.1.1"}},
+ "'1.1.1.1' is not of type 'array'",
+ ),
+ (
+ {"resolv_conf": {"search": ["foo.com"]}},
+ "Additional properties are not allowed",
+ ),
+ (
+ {"resolv_conf": {"searchdomains": "foo.com"}},
+ "'foo.com' is not of type 'array'",
+ ),
+ (
+ {"resolv_conf": {"domain": ["foo.com"]}},
+ r"\['foo.com'\] is not of type 'string'",
+ ),
+ (
+ {"resolv_conf": {"sortlist": "1.2.3.4"}},
+ "'1.2.3.4' is not of type 'array'",
+ ),
+ (
+ {"resolv_conf": {"options": "timeout: 1"}},
+ "'timeout: 1' is not of type 'object'",
+ ),
+ (
+ {"resolv_conf": {"options": "rotate"}},
+ "'rotate' is not of type 'object'",
+ ),
+ (
+ {"resolv_conf": {"options": ["rotate"]}},
+ r"\['rotate'\] is not of type 'object'",
+ ),
+ ],
+ )
+ @skipUnlessJsonSchema()
+ def test_schema_validation(self, config, error_msg):
+ if error_msg is None:
+ validate_cloudconfig_schema(config, get_schema(), strict=True)
+ else:
+ with pytest.raises(SchemaValidationError, match=error_msg):
+ validate_cloudconfig_schema(config, get_schema(), strict=True)
+
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/config/test_cc_rh_subscription.py b/tests/unittests/config/test_cc_rh_subscription.py
index fcc7db34..57313361 100644
--- a/tests/unittests/config/test_cc_rh_subscription.py
+++ b/tests/unittests/config/test_cc_rh_subscription.py
@@ -5,9 +5,16 @@
import copy
import logging
+import pytest
+
from cloudinit import subp
from cloudinit.config import cc_rh_subscription
-from tests.unittests.helpers import CiTestCase, mock
+from cloudinit.config.schema import (
+ SchemaValidationError,
+ get_schema,
+ validate_cloudconfig_schema,
+)
+from tests.unittests.helpers import CiTestCase, mock, skipUnlessJsonSchema
SUBMGR = cc_rh_subscription.SubscriptionManager
SUB_MAN_CLI = "cloudinit.config.cc_rh_subscription._sub_man_cli"
@@ -317,4 +324,35 @@ class TestBadInput(CiTestCase):
)
+class TestRhSubscriptionSchema:
+ @pytest.mark.parametrize(
+ "config, error_msg",
+ [
+ (
+ {"rh_subscription": {"bad": "input"}},
+ "Additional properties are not allowed",
+ ),
+ (
+ {"rh_subscription": {"add-pool": [1]}},
+ "1 is not of type 'string'",
+ ),
+ (
+ {"rh_subscription": {"enable-repo": "name"}},
+ "'name' is not of type 'array'",
+ ),
+ (
+ {"rh_subscription": {"disable-repo": "name"}},
+ "'name' is not of type 'array'",
+ ),
+ ],
+ )
+ @skipUnlessJsonSchema()
+ def test_schema_validation(self, config, error_msg):
+ if error_msg is None:
+ validate_cloudconfig_schema(config, get_schema(), strict=True)
+ else:
+ with pytest.raises(SchemaValidationError, match=error_msg):
+ validate_cloudconfig_schema(config, get_schema(), strict=True)
+
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/config/test_cc_rsyslog.py b/tests/unittests/config/test_cc_rsyslog.py
index e5d06ca2..7a48dcf8 100644
--- a/tests/unittests/config/test_cc_rsyslog.py
+++ b/tests/unittests/config/test_cc_rsyslog.py
@@ -1,8 +1,13 @@
# This file is part of cloud-init. See LICENSE file for license information.
import os
+import re
import shutil
import tempfile
+from functools import partial
+from typing import Optional
+
+import pytest
from cloudinit import util
from cloudinit.config.cc_rsyslog import (
@@ -14,10 +19,15 @@ from cloudinit.config.cc_rsyslog import (
parse_remotes_line,
remotes_to_rsyslog_cfg,
)
-from tests.unittests import helpers as t_help
+from cloudinit.config.schema import (
+ SchemaValidationError,
+ get_schema,
+ validate_cloudconfig_schema,
+)
+from tests.unittests.helpers import TestCase, skipUnlessJsonSchema
-class TestLoadConfig(t_help.TestCase):
+class TestLoadConfig(TestCase):
def setUp(self):
super(TestLoadConfig, self).setUp()
self.basecfg = {
@@ -63,7 +73,7 @@ class TestLoadConfig(t_help.TestCase):
)
-class TestApplyChanges(t_help.TestCase):
+class TestApplyChanges(TestCase):
def setUp(self):
self.tmp = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, self.tmp)
@@ -136,7 +146,7 @@ class TestApplyChanges(t_help.TestCase):
self.assertEqual(expected_content, found_content)
-class TestParseRemotesLine(t_help.TestCase):
+class TestParseRemotesLine(TestCase):
def test_valid_port(self):
r = parse_remotes_line("foo:9")
self.assertEqual(9, r.port)
@@ -164,7 +174,7 @@ class TestParseRemotesLine(t_help.TestCase):
self.assertEqual("*.* @syslog.host # foobar", str(r))
-class TestRemotesToSyslog(t_help.TestCase):
+class TestRemotesToSyslog(TestCase):
def test_simple(self):
# str rendered line must appear in remotes_to_ryslog_cfg return
mycfg = "*.* myhost"
@@ -195,4 +205,92 @@ class TestRemotesToSyslog(t_help.TestCase):
self.assertTrue(myline in r.splitlines())
+class TestRsyslogSchema:
+ @pytest.mark.parametrize(
+ "config, error_msg",
+ [
+ ({"rsyslog": {"remotes": {"any": "string"}}}, None),
+ (
+ {"rsyslog": {"unknown": "a"}},
+ "Additional properties are not allowed",
+ ),
+ ({"rsyslog": {"configs": [{"filename": "a"}]}}, ""),
+ (
+ {
+ "rsyslog": {
+ "configs": [
+ {"filename": "a", "content": "a", "a": "a"}
+ ]
+ }
+ },
+ "",
+ ),
+ (
+ {"rsyslog": {"remotes": ["a"]}},
+ r"\['a'\] is not of type 'object'",
+ ),
+ ({"rsyslog": {"remotes": "a"}}, "'a' is not of type 'object"),
+ ({"rsyslog": {"service_reload_command": "a"}}, ""),
+ ],
+ )
+ @skipUnlessJsonSchema()
+ def test_schema_validation(self, config, error_msg):
+ if error_msg is None:
+ validate_cloudconfig_schema(config, get_schema(), strict=True)
+ else:
+ with pytest.raises(SchemaValidationError, match=error_msg):
+ validate_cloudconfig_schema(config, get_schema(), strict=True)
+
+
+class TestInvalidKeyType:
+ @pytest.mark.parametrize(
+ "config, error_msg",
+ [
+ (
+ {"rsyslog": {"configs": 1}},
+ (
+ "Invalid type for key `configs`. Expected type(s): "
+ "<class 'list'>. Current type: <class 'int'>"
+ ),
+ ),
+ (
+ {"rsyslog": {"configs": [], "config_dir": 1}},
+ (
+ "Invalid type for key `config_dir`. Expected type(s): "
+ "<class 'str'>. Current type: <class 'int'>"
+ ),
+ ),
+ (
+ {"rsyslog": {"configs": [], "config_filename": True}},
+ (
+ "Invalid type for key `config_filename`. Expected type(s):"
+ " <class 'str'>. Current type: <class 'bool'>"
+ ),
+ ),
+ (
+ {"rsyslog": {"service_reload_command": 3.14}},
+ (
+ "Invalid type for key `service_reload_command`. "
+ "Expected type(s): (<class 'str'>, <class 'list'>). "
+ "Current type: <class 'float'>"
+ ),
+ ),
+ (
+ {"rsyslog": {"remotes": ["1", 2, 3.14]}},
+ (
+ "Invalid type for key `remotes`. Expected type(s): "
+ "<class 'dict'>. Current type: <class 'list'>"
+ ),
+ ),
+ ],
+ )
+ def test_invalid_key_types(self, config: dict, error_msg: Optional[str]):
+ callable_ = partial(load_config, config)
+ if error_msg is None:
+ callable_()
+ else:
+ with pytest.raises(ValueError, match=re.escape(error_msg)):
+ callable_()
+
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/config/test_cc_runcmd.py b/tests/unittests/config/test_cc_runcmd.py
index 59490d67..ab5733a7 100644
--- a/tests/unittests/config/test_cc_runcmd.py
+++ b/tests/unittests/config/test_cc_runcmd.py
@@ -4,12 +4,17 @@ import os
import stat
from unittest.mock import patch
+import pytest
+
from cloudinit import helpers, subp, util
-from cloudinit.config.cc_runcmd import handle, schema
+from cloudinit.config.cc_runcmd import handle
+from cloudinit.config.schema import (
+ SchemaValidationError,
+ get_schema,
+ validate_cloudconfig_schema,
+)
from tests.unittests.helpers import (
- CiTestCase,
FilesystemMockingTestCase,
- SchemaTestCaseMixin,
skipUnlessJsonSchema,
)
from tests.unittests.util import get_cloud
@@ -61,45 +66,6 @@ class TestRuncmd(FilesystemMockingTestCase):
str(cm.exception),
)
- @skipUnlessJsonSchema()
- def test_handler_schema_validation_warns_non_array_type(self):
- """Schema validation warns of non-array type for runcmd key.
-
- Schema validation is not strict, so runcmd attempts to shellify the
- invalid content.
- """
- invalid_config = {"runcmd": 1}
- cc = get_cloud(paths=self.paths)
- with self.assertRaises(TypeError) as cm:
- handle("cc_runcmd", invalid_config, cc, LOG, [])
- self.assertIn(
- "Invalid cloud-config provided:\nruncmd: 1 is not of type 'array'",
- self.logs.getvalue(),
- )
- self.assertIn("Failed to shellify", str(cm.exception))
-
- @skipUnlessJsonSchema()
- def test_handler_schema_validation_warns_non_array_item_type(self):
- """Schema validation warns of non-array or string runcmd items.
-
- Schema validation is not strict, so runcmd attempts to shellify the
- invalid content.
- """
- invalid_config = {
- "runcmd": ["ls /", 20, ["wget", "http://stuff/blah"], {"a": "n"}]
- }
- cc = get_cloud(paths=self.paths)
- with self.assertRaises(TypeError) as cm:
- handle("cc_runcmd", invalid_config, cc, LOG, [])
- expected_warnings = [
- "runcmd.1: 20 is not valid under any of the given schemas",
- "runcmd.3: {'a': 'n'} is not valid under any of the given schema",
- ]
- logs = self.logs.getvalue()
- for warning in expected_warnings:
- self.assertIn(warning, logs)
- self.assertIn("Failed to shellify", str(cm.exception))
-
def test_handler_write_valid_runcmd_schema_to_file(self):
"""Valid runcmd schema is written to a runcmd shell script."""
valid_config = {"runcmd": [["ls", "/"]]}
@@ -115,23 +81,36 @@ class TestRuncmd(FilesystemMockingTestCase):
@skipUnlessJsonSchema()
-class TestSchema(CiTestCase, SchemaTestCaseMixin):
- """Directly test schema rather than through handle."""
-
- schema = schema
-
- def test_duplicates_are_fine_array_array(self):
- """Duplicated commands array/array entries are allowed."""
- self.assertSchemaValid(
- [["echo", "bye"], ["echo", "bye"]],
- "command entries can be duplicate.",
- )
-
- def test_duplicates_are_fine_array_string(self):
- """Duplicated commands array/string entries are allowed."""
- self.assertSchemaValid(
- ["echo bye", "echo bye"], "command entries can be duplicate."
- )
+class TestRunCmdSchema:
+ @pytest.mark.parametrize(
+ "config, error_msg",
+ (
+ # Ensure duplicate commands are valid
+ ({"runcmd": [["echo", "bye"], ["echo", "bye"]]}, None),
+ ({"runcmd": ["echo bye", "echo bye"]}, None),
+ # Invalid schemas
+ ({"runcmd": 1}, "1 is not of type 'array'"),
+ ({"runcmd": []}, r"runcmd: \[\] is too short"),
+ (
+ {
+ "runcmd": [
+ "ls /",
+ 20,
+ ["wget", "http://stuff/blah"],
+ {"a": "n"},
+ ]
+ },
+ "",
+ ),
+ ),
+ )
+ @skipUnlessJsonSchema()
+ def test_schema_validation(self, config, error_msg):
+ if error_msg is None:
+ validate_cloudconfig_schema(config, get_schema(), strict=True)
+ else:
+ with pytest.raises(SchemaValidationError, match=error_msg):
+ validate_cloudconfig_schema(config, get_schema(), strict=True)
# vi: ts=4 expandtab
diff --git a/tests/unittests/config/test_cc_scripts_vendor.py b/tests/unittests/config/test_cc_scripts_vendor.py
new file mode 100644
index 00000000..a8cbfb4f
--- /dev/null
+++ b/tests/unittests/config/test_cc_scripts_vendor.py
@@ -0,0 +1,28 @@
+import pytest
+
+from cloudinit.config.schema import (
+ SchemaValidationError,
+ get_schema,
+ validate_cloudconfig_schema,
+)
+from tests.unittests.helpers import skipUnlessJsonSchema
+
+
+class TestScriptsVendorSchema:
+ @pytest.mark.parametrize(
+ "config, error_msg",
+ (
+ ({"vendor_data": {"enabled": True}}, None),
+ ({"vendor_data": {"enabled": "yes"}}, None),
+ ),
+ )
+ @skipUnlessJsonSchema()
+ def test_schema_validation(self, config, error_msg):
+ """Assert expected schema validation and error messages."""
+ # New-style schema $defs exist in config/cloud-init-schema*.json
+ schema = get_schema()
+ if error_msg is None:
+ validate_cloudconfig_schema(config, schema, strict=True)
+ else:
+ with pytest.raises(SchemaValidationError, match=error_msg):
+ validate_cloudconfig_schema(config, schema, strict=True)
diff --git a/tests/unittests/config/test_cc_seed_random.py b/tests/unittests/config/test_cc_seed_random.py
index 8b2fdcdd..0f43d858 100644
--- a/tests/unittests/config/test_cc_seed_random.py
+++ b/tests/unittests/config/test_cc_seed_random.py
@@ -12,15 +12,22 @@ import logging
import tempfile
from io import BytesIO
+import pytest
+
from cloudinit import subp, util
from cloudinit.config import cc_seed_random
-from tests.unittests import helpers as t_help
+from cloudinit.config.schema import (
+ SchemaValidationError,
+ get_schema,
+ validate_cloudconfig_schema,
+)
+from tests.unittests.helpers import TestCase, skipUnlessJsonSchema
from tests.unittests.util import get_cloud
LOG = logging.getLogger(__name__)
-class TestRandomSeed(t_help.TestCase):
+class TestRandomSeed(TestCase):
def setUp(self):
super(TestRandomSeed, self).setUp()
self._seed_file = tempfile.mktemp()
@@ -218,4 +225,36 @@ def apply_patches(patches):
return ret
+class TestSeedRandomSchema:
+ @pytest.mark.parametrize(
+ "config, error_msg",
+ [
+ (
+ {"random_seed": {"encoding": "bad"}},
+ "'bad' is not one of "
+ r"\['raw', 'base64', 'b64', 'gzip', 'gz'\]",
+ ),
+ (
+ {"random_seed": {"command": "foo"}},
+ "'foo' is not of type 'array'",
+ ),
+ (
+ {"random_seed": {"command_required": "true"}},
+ "'true' is not of type 'boolean'",
+ ),
+ (
+ {"random_seed": {"bad": "key"}},
+ "Additional properties are not allowed",
+ ),
+ ],
+ )
+ @skipUnlessJsonSchema()
+ def test_schema_validation(self, config, error_msg):
+ if error_msg is None:
+ validate_cloudconfig_schema(config, get_schema(), strict=True)
+ else:
+ with pytest.raises(SchemaValidationError, match=error_msg):
+ validate_cloudconfig_schema(config, get_schema(), strict=True)
+
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/config/test_cc_set_passwords.py b/tests/unittests/config/test_cc_set_passwords.py
index bc81214b..ac7abadb 100644
--- a/tests/unittests/config/test_cc_set_passwords.py
+++ b/tests/unittests/config/test_cc_set_passwords.py
@@ -1,77 +1,262 @@
# This file is part of cloud-init. See LICENSE file for license information.
+import logging
from unittest import mock
-from cloudinit import util
+import pytest
+
+from cloudinit import subp, util
from cloudinit.config import cc_set_passwords as setpass
-from tests.unittests.helpers import CiTestCase
+from cloudinit.config.schema import (
+ SchemaValidationError,
+ get_schema,
+ validate_cloudconfig_schema,
+)
+from tests.unittests.helpers import CiTestCase, skipUnlessJsonSchema
+from tests.unittests.util import get_cloud
MODPATH = "cloudinit.config.cc_set_passwords."
-class TestHandleSshPwauth(CiTestCase):
- """Test cc_set_passwords handling of ssh_pwauth in handle_ssh_pwauth."""
+@pytest.fixture()
+def mock_uses_systemd(mocker):
+ mocker.patch("cloudinit.distros.uses_systemd", return_value=True)
- with_logs = True
+class TestHandleSSHPwauth:
+ @pytest.mark.parametrize(
+ "uses_systemd,cmd",
+ (
+ (True, ["systemctl", "status", "ssh"]),
+ (False, ["service", "ssh", "status"]),
+ ),
+ )
@mock.patch("cloudinit.distros.subp.subp")
- def test_unknown_value_logs_warning(self, m_subp):
- cloud = self.tmp_cloud(distro="ubuntu")
- setpass.handle_ssh_pwauth("floo", cloud.distro)
- self.assertIn(
- "Unrecognized value: ssh_pwauth=floo", self.logs.getvalue()
- )
- m_subp.assert_not_called()
+ def test_unknown_value_logs_warning(
+ self, m_subp, uses_systemd, cmd, caplog
+ ):
+ cloud = get_cloud("ubuntu")
+ with mock.patch.object(
+ cloud.distro, "uses_systemd", return_value=uses_systemd
+ ):
+ setpass.handle_ssh_pwauth("floo", cloud.distro)
+ assert "Unrecognized value: ssh_pwauth=floo" in caplog.text
+ assert [mock.call(cmd, capture=True)] == m_subp.call_args_list
- @mock.patch(MODPATH + "update_ssh_config", return_value=True)
+ @pytest.mark.parametrize(
+ "uses_systemd,ssh_updated,cmd,expected_log",
+ (
+ (
+ True,
+ True,
+ ["systemctl", "restart", "ssh"],
+ "Restarted the SSH daemon.",
+ ),
+ (
+ True,
+ False,
+ ["systemctl", "status", "ssh"],
+ "No need to restart SSH",
+ ),
+ (
+ False,
+ True,
+ ["service", "ssh", "restart"],
+ "Restarted the SSH daemon.",
+ ),
+ (
+ False,
+ False,
+ ["service", "ssh", "status"],
+ "No need to restart SSH",
+ ),
+ ),
+ )
+ @mock.patch(MODPATH + "update_ssh_config")
@mock.patch("cloudinit.distros.subp.subp")
- def test_systemctl_as_service_cmd(self, m_subp, m_update_ssh_config):
- """If systemctl in service cmd: systemctl restart name."""
- cloud = self.tmp_cloud(distro="ubuntu")
- cloud.distro.init_cmd = ["systemctl"]
- setpass.handle_ssh_pwauth(True, cloud.distro)
- m_subp.assert_called_with(
- ["systemctl", "restart", "ssh"], capture=True
+ def test_restart_ssh_only_when_changes_made_and_ssh_installed(
+ self,
+ m_subp,
+ update_ssh_config,
+ uses_systemd,
+ ssh_updated,
+ cmd,
+ expected_log,
+ caplog,
+ ):
+ update_ssh_config.return_value = ssh_updated
+ cloud = get_cloud("ubuntu")
+ with mock.patch.object(
+ cloud.distro, "uses_systemd", return_value=uses_systemd
+ ):
+ setpass.handle_ssh_pwauth(True, cloud.distro)
+ if ssh_updated:
+ m_subp.assert_called_with(cmd, capture=True)
+ else:
+ assert [mock.call(cmd, capture=True)] == m_subp.call_args_list
+ assert expected_log in "\n".join(
+ r.msg for r in caplog.records if r.levelname == "DEBUG"
)
- @mock.patch(MODPATH + "update_ssh_config", return_value=False)
- @mock.patch("cloudinit.distros.subp.subp")
- def test_not_restarted_if_not_updated(self, m_subp, m_update_ssh_config):
- """If config is not updated, then no system restart should be done."""
- cloud = self.tmp_cloud(distro="ubuntu")
- setpass.handle_ssh_pwauth(True, cloud.distro)
- m_subp.assert_not_called()
- self.assertIn("No need to restart SSH", self.logs.getvalue())
-
@mock.patch(MODPATH + "update_ssh_config", return_value=True)
@mock.patch("cloudinit.distros.subp.subp")
- def test_unchanged_does_nothing(self, m_subp, m_update_ssh_config):
+ def test_unchanged_value_does_nothing(
+ self, m_subp, update_ssh_config, mock_uses_systemd
+ ):
"""If 'unchanged', then no updates to config and no restart."""
- cloud = self.tmp_cloud(distro="ubuntu")
+ update_ssh_config.assert_not_called()
+ cloud = get_cloud("ubuntu")
setpass.handle_ssh_pwauth("unchanged", cloud.distro)
- m_update_ssh_config.assert_not_called()
- m_subp.assert_not_called()
+ assert [
+ mock.call(["systemctl", "status", "ssh"], capture=True)
+ ] == m_subp.call_args_list
+ @pytest.mark.allow_subp_for("systemctl")
@mock.patch("cloudinit.distros.subp.subp")
- def test_valid_change_values(self, m_subp):
- """If value is a valid changen value, then update should be called."""
- cloud = self.tmp_cloud(distro="ubuntu")
+ def test_valid_value_changes_updates_ssh(self, m_subp, mock_uses_systemd):
+ """If value is a valid changed value, then update will be called."""
+ cloud = get_cloud("ubuntu")
upname = MODPATH + "update_ssh_config"
optname = "PasswordAuthentication"
- for value in util.FALSE_STRINGS + util.TRUE_STRINGS:
+ for n, value in enumerate(util.FALSE_STRINGS + util.TRUE_STRINGS, 1):
optval = "yes" if value in util.TRUE_STRINGS else "no"
with mock.patch(upname, return_value=False) as m_update:
setpass.handle_ssh_pwauth(value, cloud.distro)
- m_update.assert_called_with({optname: optval})
- m_subp.assert_not_called()
+ assert (
+ mock.call({optname: optval}) == m_update.call_args_list[-1]
+ )
+ assert m_subp.call_count == n
+ @pytest.mark.parametrize(
+ [
+ "uses_systemd",
+ "raised_error",
+ "warning_log",
+ "debug_logs",
+ "update_ssh_call_count",
+ ],
+ (
+ (
+ True,
+ subp.ProcessExecutionError(
+ stderr="Service is not running.", exit_code=3
+ ),
+ None,
+ [
+ "Writing config 'ssh_pwauth: True'. SSH service"
+ " 'ssh' will not be restarted because it is stopped.",
+ "Not restarting SSH service: service is stopped.",
+ ],
+ 1,
+ ),
+ (
+ True,
+ subp.ProcessExecutionError(
+ stderr="Service is not installed.", exit_code=4
+ ),
+ "Ignoring config 'ssh_pwauth: True'. SSH service 'ssh' is"
+ " not installed.",
+ [],
+ 0,
+ ),
+ (
+ True,
+ subp.ProcessExecutionError(
+ stderr="Service is not available.", exit_code=2
+ ),
+ "Ignoring config 'ssh_pwauth: True'. SSH service 'ssh'"
+ " is not available. Error: ",
+ [],
+ 0,
+ ),
+ (
+ False,
+ subp.ProcessExecutionError(
+ stderr="Service is not available.", exit_code=25
+ ),
+ None,
+ [
+ "Writing config 'ssh_pwauth: True'. SSH service"
+ " 'ssh' will not be restarted because it is not running"
+ " or not available.",
+ "Not restarting SSH service: service is stopped.",
+ ],
+ 1,
+ ),
+ (
+ False,
+ subp.ProcessExecutionError(
+ stderr="Service is not available.", exit_code=3
+ ),
+ None,
+ [
+ "Writing config 'ssh_pwauth: True'. SSH service"
+ " 'ssh' will not be restarted because it is not running"
+ " or not available.",
+ "Not restarting SSH service: service is stopped.",
+ ],
+ 1,
+ ),
+ (
+ False,
+ subp.ProcessExecutionError(
+ stderr="Service is not available.", exit_code=4
+ ),
+ None,
+ [
+ "Writing config 'ssh_pwauth: True'. SSH service"
+ " 'ssh' will not be restarted because it is not running"
+ " or not available.",
+ "Not restarting SSH service: service is stopped.",
+ ],
+ 1,
+ ),
+ ),
+ )
+ @mock.patch(MODPATH + "update_ssh_config", return_value=True)
+ @mock.patch("cloudinit.distros.subp.subp")
+ def test_no_restart_when_service_is_not_running(
+ self,
+ m_subp,
+ m_update_ssh_config,
+ uses_systemd,
+ raised_error,
+ warning_log,
+ debug_logs,
+ update_ssh_call_count,
+ caplog,
+ ):
+ """Write config but don't restart SSH service when not running."""
+ cloud = get_cloud("ubuntu")
+ cloud.distro.manage_service = mock.Mock(side_effect=raised_error)
+ cloud.distro.uses_systemd = mock.Mock(return_value=uses_systemd)
+ setpass.handle_ssh_pwauth(True, cloud.distro)
+ logs_by_level = {logging.WARNING: [], logging.DEBUG: []}
+ for _, level, msg in caplog.record_tuples:
+ logs_by_level[level].append(msg)
+ if warning_log:
+ assert warning_log in "\n".join(
+ logs_by_level[logging.WARNING]
+ ), logs_by_level
+ for debug_log in debug_logs:
+ assert debug_log in logs_by_level[logging.DEBUG]
+ assert [
+ mock.call("status", "ssh")
+ ] == cloud.distro.manage_service.call_args_list
+ assert m_update_ssh_config.call_count == update_ssh_call_count
+ assert m_subp.call_count == 0
+ assert cloud.distro.uses_systemd.call_count == 1
+
+
+@pytest.mark.usefixtures("mock_uses_systemd")
class TestSetPasswordsHandle(CiTestCase):
"""Test cc_set_passwords.handle"""
with_logs = True
- def test_handle_on_empty_config(self, *args):
+ @mock.patch(MODPATH + "subp.subp")
+ def test_handle_on_empty_config(self, m_subp):
"""handle logs that no password has changed when config is empty."""
cloud = self.tmp_cloud(distro="ubuntu")
setpass.handle(
@@ -82,8 +267,13 @@ class TestSetPasswordsHandle(CiTestCase):
"ssh_pwauth=None\n",
self.logs.getvalue(),
)
+ self.assertEqual(
+ [mock.call(["systemctl", "status", "ssh"], capture=True)],
+ m_subp.call_args_list,
+ )
- def test_handle_on_chpasswd_list_parses_common_hashes(self):
+ @mock.patch(MODPATH + "subp.subp")
+ def test_handle_on_chpasswd_list_parses_common_hashes(self, m_subp):
"""handle parses command password hashes."""
cloud = self.tmp_cloud(distro="ubuntu")
valid_hashed_pwds = [
@@ -108,19 +298,21 @@ class TestSetPasswordsHandle(CiTestCase):
called = chpasswd.call_args[0][1]
self.assertEqual(valid, called)
- @mock.patch(MODPATH + "util.is_BSD")
+ @mock.patch(MODPATH + "util.is_BSD", return_value=True)
@mock.patch(MODPATH + "subp.subp")
def test_bsd_calls_custom_pw_cmds_to_set_and_expire_passwords(
self, m_subp, m_is_bsd
):
"""BSD don't use chpasswd"""
- m_is_bsd.return_value = True
- cloud = self.tmp_cloud(distro="freebsd")
+ cloud = get_cloud(distro="freebsd")
valid_pwds = ["ubuntu:passw0rd"]
cfg = {"chpasswd": {"list": valid_pwds}}
- setpass.handle(
- "IGNORED", cfg=cfg, cloud=cloud, log=self.logger, args=[]
- )
+ with mock.patch.object(
+ cloud.distro, "uses_systemd", return_value=False
+ ):
+ setpass.handle(
+ "IGNORED", cfg=cfg, cloud=cloud, log=self.logger, args=[]
+ )
self.assertEqual(
[
mock.call(
@@ -129,6 +321,7 @@ class TestSetPasswordsHandle(CiTestCase):
logstring="chpasswd for ubuntu",
),
mock.call(["pw", "usermod", "ubuntu", "-p", "01-Jan-1970"]),
+ mock.call(["service", "sshd", "status"], capture=True),
],
m_subp.call_args_list,
)
@@ -174,4 +367,35 @@ class TestSetPasswordsHandle(CiTestCase):
self.fail("Password not emitted to console")
+class TestSetPasswordsSchema:
+ @pytest.mark.parametrize(
+ "config, error_msg",
+ [
+ # Test both formats still work
+ ({"ssh_pwauth": True}, None),
+ ({"ssh_pwauth": "yes"}, None),
+ ({"ssh_pwauth": "unchanged"}, None),
+ ({"chpasswd": {"list": "blah"}}, None),
+ # Test regex
+ ({"chpasswd": {"list": ["user:pass"]}}, None),
+ # Test valid
+ ({"password": "pass"}, None),
+ # Test invalid values
+ (
+ {"chpasswd": {"expire": "yes"}},
+ "'yes' is not of type 'boolean'",
+ ),
+ ({"chpasswd": {"list": ["user"]}}, ""),
+ ({"chpasswd": {"list": []}}, r"\[\] is too short"),
+ ],
+ )
+ @skipUnlessJsonSchema()
+ def test_schema_validation(self, config, error_msg):
+ if error_msg is None:
+ validate_cloudconfig_schema(config, get_schema(), strict=True)
+ else:
+ with pytest.raises(SchemaValidationError, match=error_msg):
+ validate_cloudconfig_schema(config, get_schema(), strict=True)
+
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/config/test_cc_snap.py b/tests/unittests/config/test_cc_snap.py
index 1632676d..855c23fc 100644
--- a/tests/unittests/config/test_cc_snap.py
+++ b/tests/unittests/config/test_cc_snap.py
@@ -3,19 +3,22 @@
import re
from io import StringIO
+import pytest
+
from cloudinit import util
from cloudinit.config.cc_snap import (
ASSERTIONS_FILE,
add_assertions,
handle,
- maybe_install_squashfuse,
run_commands,
- schema,
)
-from cloudinit.config.schema import validate_cloudconfig_schema
+from cloudinit.config.schema import (
+ SchemaValidationError,
+ get_schema,
+ validate_cloudconfig_schema,
+)
from tests.unittests.helpers import (
CiTestCase,
- SchemaTestCaseMixin,
mock,
skipUnlessJsonSchema,
wrap_and_call,
@@ -275,184 +278,65 @@ class TestRunCommands(CiTestCase):
@skipUnlessJsonSchema()
-class TestSchema(CiTestCase, SchemaTestCaseMixin):
-
- with_logs = True
- schema = schema
-
- def test_schema_warns_on_snap_not_as_dict(self):
- """If the snap configuration is not a dict, emit a warning."""
- validate_cloudconfig_schema({"snap": "wrong type"}, schema)
- self.assertEqual(
- "WARNING: Invalid cloud-config provided:\nsnap: 'wrong type'"
- " is not of type 'object'\n",
- self.logs.getvalue(),
- )
-
- @mock.patch("cloudinit.config.cc_snap.run_commands")
- def test_schema_disallows_unknown_keys(self, _):
- """Unknown keys in the snap configuration emit warnings."""
- validate_cloudconfig_schema(
- {"snap": {"commands": ["ls"], "invalid-key": ""}}, schema
- )
- self.assertIn(
- "WARNING: Invalid cloud-config provided:\nsnap: Additional"
- " properties are not allowed ('invalid-key' was unexpected)",
- self.logs.getvalue(),
- )
-
- def test_warn_schema_requires_either_commands_or_assertions(self):
- """Warn when snap configuration lacks both commands and assertions."""
- validate_cloudconfig_schema({"snap": {}}, schema)
- self.assertIn(
- "WARNING: Invalid cloud-config provided:\nsnap: {} does not"
- " have enough properties",
- self.logs.getvalue(),
- )
-
- @mock.patch("cloudinit.config.cc_snap.run_commands")
- def test_warn_schema_commands_is_not_list_or_dict(self, _):
- """Warn when snap:commands config is not a list or dict."""
- validate_cloudconfig_schema({"snap": {"commands": "broken"}}, schema)
- self.assertEqual(
- "WARNING: Invalid cloud-config provided:\nsnap.commands: 'broken'"
- " is not of type 'object', 'array'\n",
- self.logs.getvalue(),
- )
-
- @mock.patch("cloudinit.config.cc_snap.run_commands")
- def test_warn_schema_when_commands_is_empty(self, _):
- """Emit warnings when snap:commands is an empty list or dict."""
- validate_cloudconfig_schema({"snap": {"commands": []}}, schema)
- validate_cloudconfig_schema({"snap": {"commands": {}}}, schema)
- self.assertEqual(
- "WARNING: Invalid cloud-config provided:\nsnap.commands: [] is"
- " too short\nWARNING: Invalid cloud-config provided:\n"
- "snap.commands: {} does not have enough properties\n",
- self.logs.getvalue(),
- )
-
- @mock.patch("cloudinit.config.cc_snap.run_commands")
- def test_schema_when_commands_are_list_or_dict(self, _):
- """No warnings when snap:commands are either a list or dict."""
- validate_cloudconfig_schema({"snap": {"commands": ["valid"]}}, schema)
- validate_cloudconfig_schema(
- {"snap": {"commands": {"01": "also valid"}}}, schema
- )
- self.assertEqual("", self.logs.getvalue())
-
- @mock.patch("cloudinit.config.cc_snap.run_commands")
- def test_schema_when_commands_values_are_invalid_type(self, _):
- """Warnings when snap:commands values are invalid type (e.g. int)"""
- validate_cloudconfig_schema({"snap": {"commands": [123]}}, schema)
- validate_cloudconfig_schema(
- {"snap": {"commands": {"01": 123}}}, schema
- )
- self.assertEqual(
- "WARNING: Invalid cloud-config provided:\n"
- "snap.commands.0: 123 is not valid under any of the given"
- " schemas\n"
- "WARNING: Invalid cloud-config provided:\n"
- "snap.commands.01: 123 is not valid under any of the given"
- " schemas\n",
- self.logs.getvalue(),
- )
-
- @mock.patch("cloudinit.config.cc_snap.run_commands")
- def test_schema_when_commands_list_values_are_invalid_type(self, _):
- """Warnings when snap:commands list values are wrong type (e.g. int)"""
- validate_cloudconfig_schema(
- {"snap": {"commands": [["snap", "install", 123]]}}, schema
- )
- validate_cloudconfig_schema(
- {"snap": {"commands": {"01": ["snap", "install", 123]}}}, schema
- )
- self.assertEqual(
- "WARNING: Invalid cloud-config provided:\n"
- "snap.commands.0: ['snap', 'install', 123] is not valid under any"
- " of the given schemas\n",
- "WARNING: Invalid cloud-config provided:\n"
- "snap.commands.0: ['snap', 'install', 123] is not valid under any"
- " of the given schemas\n",
- self.logs.getvalue(),
- )
-
- @mock.patch("cloudinit.config.cc_snap.run_commands")
- def test_schema_when_assertions_values_are_invalid_type(self, _):
- """Warnings when snap:assertions values are invalid type (e.g. int)"""
- validate_cloudconfig_schema({"snap": {"assertions": [123]}}, schema)
- validate_cloudconfig_schema(
- {"snap": {"assertions": {"01": 123}}}, schema
- )
- self.assertEqual(
- "WARNING: Invalid cloud-config provided:\n"
- "snap.assertions.0: 123 is not of type 'string'\n"
- "WARNING: Invalid cloud-config provided:\n"
- "snap.assertions.01: 123 is not of type 'string'\n",
- self.logs.getvalue(),
- )
-
- @mock.patch("cloudinit.config.cc_snap.add_assertions")
- def test_warn_schema_assertions_is_not_list_or_dict(self, _):
- """Warn when snap:assertions config is not a list or dict."""
- validate_cloudconfig_schema({"snap": {"assertions": "broken"}}, schema)
- self.assertEqual(
- "WARNING: Invalid cloud-config provided:\nsnap.assertions:"
- " 'broken' is not of type 'object', 'array'\n",
- self.logs.getvalue(),
- )
-
- @mock.patch("cloudinit.config.cc_snap.add_assertions")
- def test_warn_schema_when_assertions_is_empty(self, _):
- """Emit warnings when snap:assertions is an empty list or dict."""
- validate_cloudconfig_schema({"snap": {"assertions": []}}, schema)
- validate_cloudconfig_schema({"snap": {"assertions": {}}}, schema)
- self.assertEqual(
- "WARNING: Invalid cloud-config provided:\nsnap.assertions: []"
- " is too short\n"
- "WARNING: Invalid cloud-config provided:\nsnap.assertions: {}"
- " does not have enough properties\n",
- self.logs.getvalue(),
- )
-
- @mock.patch("cloudinit.config.cc_snap.add_assertions")
- def test_schema_when_assertions_are_list_or_dict(self, _):
- """No warnings when snap:assertions are a list or dict."""
- validate_cloudconfig_schema(
- {"snap": {"assertions": ["valid"]}}, schema
- )
- validate_cloudconfig_schema(
- {"snap": {"assertions": {"01": "also valid"}}}, schema
- )
- self.assertEqual("", self.logs.getvalue())
-
- def test_duplicates_are_fine_array_array(self):
- """Duplicated commands array/array entries are allowed."""
- self.assertSchemaValid(
- {"commands": [["echo", "bye"], ["echo", "bye"]]},
- "command entries can be duplicate.",
- )
-
- def test_duplicates_are_fine_array_string(self):
- """Duplicated commands array/string entries are allowed."""
- self.assertSchemaValid(
- {"commands": ["echo bye", "echo bye"]},
- "command entries can be duplicate.",
- )
-
- def test_duplicates_are_fine_dict_array(self):
- """Duplicated commands dict/array entries are allowed."""
- self.assertSchemaValid(
- {"commands": {"00": ["echo", "bye"], "01": ["echo", "bye"]}},
- "command entries can be duplicate.",
- )
-
- def test_duplicates_are_fine_dict_string(self):
- """Duplicated commands dict/string entries are allowed."""
- self.assertSchemaValid(
- {"commands": {"00": "echo bye", "01": "echo bye"}},
- "command entries can be duplicate.",
- )
+class TestSnapSchema:
+ @pytest.mark.parametrize(
+ "config, error_msg",
+ [
+ # Valid
+ ({"snap": {"commands": ["valid"]}}, None),
+ ({"snap": {"commands": {"01": "also valid"}}}, None),
+ ({"snap": {"assertions": ["valid"]}}, None),
+ ({"snap": {"assertions": {"01": "also valid"}}}, None),
+ ({"commands": [["echo", "bye"], ["echo", "bye"]]}, None),
+ ({"commands": ["echo bye", "echo bye"]}, None),
+ (
+ {"commands": {"00": ["echo", "bye"], "01": ["echo", "bye"]}},
+ None,
+ ),
+ ({"commands": {"00": "echo bye", "01": "echo bye"}}, None),
+ # Invalid
+ ({"snap": "wrong type"}, "'wrong type' is not of type 'object'"),
+ (
+ {"snap": {"commands": ["ls"], "invalid-key": ""}},
+ "Additional properties are not allowed",
+ ),
+ ({"snap": {}}, "{} does not have enough properties"),
+ (
+ {"snap": {"commands": "broken"}},
+ "'broken' is not of type 'object', 'array'",
+ ),
+ ({"snap": {"commands": []}}, r"snap.commands: \[\] is too short"),
+ (
+ {"snap": {"commands": {}}},
+ r"snap.commands: {} does not have enough properties",
+ ),
+ ({"snap": {"commands": [123]}}, ""),
+ ({"snap": {"commands": {"01": 123}}}, ""),
+ ({"snap": {"commands": [["snap", "install", 123]]}}, ""),
+ ({"snap": {"commands": {"01": ["snap", "install", 123]}}}, ""),
+ ({"snap": {"assertions": [123]}}, "123 is not of type 'string'"),
+ (
+ {"snap": {"assertions": {"01": 123}}},
+ "123 is not of type 'string'",
+ ),
+ (
+ {"snap": {"assertions": "broken"}},
+ "'broken' is not of type 'object', 'array'",
+ ),
+ ({"snap": {"assertions": []}}, r"\[\] is too short"),
+ (
+ {"snap": {"assertions": {}}},
+ r"\{} does not have enough properties",
+ ),
+ ],
+ )
+ @skipUnlessJsonSchema()
+ def test_schema_validation(self, config, error_msg):
+ if error_msg is None:
+ validate_cloudconfig_schema(config, get_schema(), strict=True)
+ else:
+ with pytest.raises(SchemaValidationError, match=error_msg):
+ validate_cloudconfig_schema(config, get_schema(), strict=True)
class TestHandle(CiTestCase):
@@ -463,77 +347,6 @@ class TestHandle(CiTestCase):
super(TestHandle, self).setUp()
self.tmp = self.tmp_dir()
- @mock.patch("cloudinit.config.cc_snap.run_commands")
- @mock.patch("cloudinit.config.cc_snap.add_assertions")
- @mock.patch("cloudinit.config.cc_snap.validate_cloudconfig_schema")
- def test_handle_no_config(self, m_schema, m_add, m_run):
- """When no snap-related configuration is provided, nothing happens."""
- cfg = {}
- handle("snap", cfg=cfg, cloud=None, log=self.logger, args=None)
- self.assertIn(
- "DEBUG: Skipping module named snap, no 'snap' key in config",
- self.logs.getvalue(),
- )
- m_schema.assert_not_called()
- m_add.assert_not_called()
- m_run.assert_not_called()
-
- @mock.patch("cloudinit.config.cc_snap.run_commands")
- @mock.patch("cloudinit.config.cc_snap.add_assertions")
- @mock.patch("cloudinit.config.cc_snap.maybe_install_squashfuse")
- def test_handle_skips_squashfuse_when_unconfigured(
- self, m_squash, m_add, m_run
- ):
- """When squashfuse_in_container is unset, don't attempt to install."""
- handle(
- "snap", cfg={"snap": {}}, cloud=None, log=self.logger, args=None
- )
- handle(
- "snap",
- cfg={"snap": {"squashfuse_in_container": None}},
- cloud=None,
- log=self.logger,
- args=None,
- )
- handle(
- "snap",
- cfg={"snap": {"squashfuse_in_container": False}},
- cloud=None,
- log=self.logger,
- args=None,
- )
- self.assertEqual([], m_squash.call_args_list) # No calls
- # snap configuration missing assertions and commands will default to []
- self.assertIn(mock.call([]), m_add.call_args_list)
- self.assertIn(mock.call([]), m_run.call_args_list)
-
- @mock.patch("cloudinit.config.cc_snap.maybe_install_squashfuse")
- def test_handle_tries_to_install_squashfuse(self, m_squash):
- """If squashfuse_in_container is True, try installing squashfuse."""
- cfg = {"snap": {"squashfuse_in_container": True}}
- mycloud = FakeCloud(None)
- handle("snap", cfg=cfg, cloud=mycloud, log=self.logger, args=None)
- self.assertEqual([mock.call(mycloud)], m_squash.call_args_list)
-
- def test_handle_runs_commands_provided(self):
- """If commands are specified as a list, run them."""
- outfile = self.tmp_path("output.log", dir=self.tmp)
-
- cfg = {
- "snap": {
- "commands": [
- 'echo "HI" >> %s' % outfile,
- 'echo "MOM" >> %s' % outfile,
- ]
- }
- }
- mock_path = "cloudinit.config.cc_snap.sys.stderr"
- with self.allow_subp([CiTestCase.SUBP_SHELL_TRUE]):
- with mock.patch(mock_path, new_callable=StringIO):
- handle("snap", cfg=cfg, cloud=None, log=self.logger, args=None)
-
- self.assertEqual("HI\nMOM\n", util.load_file(outfile))
-
@mock.patch("cloudinit.config.cc_snap.subp.subp")
def test_handle_adds_assertions(self, m_subp):
"""Any configured snap assertions are provided to add_assertions."""
@@ -558,83 +371,5 @@ class TestHandle(CiTestCase):
util.load_file(compare_file), util.load_file(assert_file)
)
- @mock.patch("cloudinit.config.cc_snap.subp.subp")
- @skipUnlessJsonSchema()
- def test_handle_validates_schema(self, m_subp):
- """Any provided configuration is runs validate_cloudconfig_schema."""
- assert_file = self.tmp_path("snapd.assertions", dir=self.tmp)
- cfg = {"snap": {"invalid": ""}} # Generates schema warning
- wrap_and_call(
- "cloudinit.config.cc_snap",
- {"ASSERTIONS_FILE": {"new": assert_file}},
- handle,
- "snap",
- cfg=cfg,
- cloud=None,
- log=self.logger,
- args=None,
- )
- self.assertEqual(
- "WARNING: Invalid cloud-config provided:\nsnap: Additional"
- " properties are not allowed ('invalid' was unexpected)\n",
- self.logs.getvalue(),
- )
-
-
-class TestMaybeInstallSquashFuse(CiTestCase):
-
- with_logs = True
-
- def setUp(self):
- super(TestMaybeInstallSquashFuse, self).setUp()
- self.tmp = self.tmp_dir()
-
- @mock.patch("cloudinit.config.cc_snap.util.is_container")
- def test_maybe_install_squashfuse_skips_non_containers(self, m_container):
- """maybe_install_squashfuse does nothing when not on a container."""
- m_container.return_value = False
- maybe_install_squashfuse(cloud=FakeCloud(None))
- self.assertEqual([mock.call()], m_container.call_args_list)
- self.assertEqual("", self.logs.getvalue())
-
- @mock.patch("cloudinit.config.cc_snap.util.is_container")
- def test_maybe_install_squashfuse_raises_install_errors(self, m_container):
- """maybe_install_squashfuse logs and raises package install errors."""
- m_container.return_value = True
- distro = mock.MagicMock()
- distro.update_package_sources.side_effect = RuntimeError(
- "Some apt error"
- )
- with self.assertRaises(RuntimeError) as context_manager:
- maybe_install_squashfuse(cloud=FakeCloud(distro))
- self.assertEqual("Some apt error", str(context_manager.exception))
- self.assertIn("Package update failed\nTraceback", self.logs.getvalue())
-
- @mock.patch("cloudinit.config.cc_snap.util.is_container")
- def test_maybe_install_squashfuse_raises_update_errors(self, m_container):
- """maybe_install_squashfuse logs and raises package update errors."""
- m_container.return_value = True
- distro = mock.MagicMock()
- distro.update_package_sources.side_effect = RuntimeError(
- "Some apt error"
- )
- with self.assertRaises(RuntimeError) as context_manager:
- maybe_install_squashfuse(cloud=FakeCloud(distro))
- self.assertEqual("Some apt error", str(context_manager.exception))
- self.assertIn("Package update failed\nTraceback", self.logs.getvalue())
-
- @mock.patch("cloudinit.config.cc_snap.util.is_container")
- def test_maybe_install_squashfuse_happy_path(self, m_container):
- """maybe_install_squashfuse logs and raises package install errors."""
- m_container.return_value = True
- distro = mock.MagicMock() # No errors raised
- maybe_install_squashfuse(cloud=FakeCloud(distro))
- self.assertEqual(
- [mock.call()], distro.update_package_sources.call_args_list
- )
- self.assertEqual(
- [mock.call(["squashfuse"])], distro.install_packages.call_args_list
- )
-
# vi: ts=4 expandtab
diff --git a/tests/unittests/config/test_cc_ssh.py b/tests/unittests/config/test_cc_ssh.py
index d66cc4cb..47c0c777 100644
--- a/tests/unittests/config/test_cc_ssh.py
+++ b/tests/unittests/config/test_cc_ssh.py
@@ -2,10 +2,20 @@
import logging
import os.path
+from typing import Optional
+from unittest import mock
+
+import pytest
from cloudinit import ssh_util
from cloudinit.config import cc_ssh
-from tests.unittests.helpers import CiTestCase, mock
+from cloudinit.config.schema import (
+ SchemaValidationError,
+ get_schema,
+ validate_cloudconfig_schema,
+)
+from tests.unittests.helpers import skipUnlessJsonSchema
+from tests.unittests.util import get_cloud
LOG = logging.getLogger(__name__)
@@ -15,80 +25,80 @@ KEY_NAMES_NO_DSA = [
]
-@mock.patch(MODPATH + "ssh_util.setup_user_keys")
-class TestHandleSsh(CiTestCase):
- """Test cc_ssh handling of ssh config."""
-
- def _publish_hostkey_test_setup(self):
- self.test_hostkeys = {
- "dsa": ("ssh-dss", "AAAAB3NzaC1kc3MAAACB"),
- "ecdsa": ("ecdsa-sha2-nistp256", "AAAAE2VjZ"),
- "ed25519": ("ssh-ed25519", "AAAAC3NzaC1lZDI"),
- "rsa": ("ssh-rsa", "AAAAB3NzaC1yc2EAAA"),
- }
- self.test_hostkey_files = []
- hostkey_tmpdir = self.tmp_dir()
- for key_type in cc_ssh.GENERATE_KEY_NAMES:
- key_data = self.test_hostkeys[key_type]
- filename = "ssh_host_%s_key.pub" % key_type
- filepath = os.path.join(hostkey_tmpdir, filename)
- self.test_hostkey_files.append(filepath)
- with open(filepath, "w") as f:
- f.write(" ".join(key_data))
-
- cc_ssh.KEY_FILE_TPL = os.path.join(hostkey_tmpdir, "ssh_host_%s_key")
-
- def test_apply_credentials_with_user(self, m_setup_keys):
- """Apply keys for the given user and root."""
- keys = ["key1"]
- user = "clouduser"
- cc_ssh.apply_credentials(keys, user, False, ssh_util.DISABLE_USER_OPTS)
- self.assertEqual(
- [
- mock.call(set(keys), user),
- mock.call(set(keys), "root", options=""),
- ],
- m_setup_keys.call_args_list,
- )
+@pytest.fixture(scope="function")
+def publish_hostkey_test_setup(tmpdir):
+ test_hostkeys = {
+ "dsa": ("ssh-dss", "AAAAB3NzaC1kc3MAAACB"),
+ "ecdsa": ("ecdsa-sha2-nistp256", "AAAAE2VjZ"),
+ "ed25519": ("ssh-ed25519", "AAAAC3NzaC1lZDI"),
+ "rsa": ("ssh-rsa", "AAAAB3NzaC1yc2EAAA"),
+ }
+ test_hostkey_files = []
+ hostkey_tmpdir = tmpdir
+ for key_type in cc_ssh.GENERATE_KEY_NAMES:
+ filename = "ssh_host_%s_key.pub" % key_type
+ filepath = os.path.join(hostkey_tmpdir, filename)
+ test_hostkey_files.append(filepath)
+ with open(filepath, "w") as f:
+ f.write(" ".join(test_hostkeys[key_type]))
+
+ cc_ssh.KEY_FILE_TPL = os.path.join(hostkey_tmpdir, "ssh_host_%s_key")
+ yield test_hostkeys, test_hostkey_files
+
+
+def _replace_options(user: Optional[str] = None) -> str:
+ options = ssh_util.DISABLE_USER_OPTS
+ if user:
+ new_user = user
+ else:
+ new_user = "NONE"
+ options = options.replace("$USER", new_user)
+ options = options.replace("$DISABLE_USER", "root")
+ return options
- def test_apply_credentials_with_no_user(self, m_setup_keys):
- """Apply keys for root only."""
- keys = ["key1"]
- user = None
- cc_ssh.apply_credentials(keys, user, False, ssh_util.DISABLE_USER_OPTS)
- self.assertEqual(
- [mock.call(set(keys), "root", options="")],
- m_setup_keys.call_args_list,
- )
- def test_apply_credentials_with_user_disable_root(self, m_setup_keys):
- """Apply keys for the given user and disable root ssh."""
- keys = ["key1"]
- user = "clouduser"
- options = ssh_util.DISABLE_USER_OPTS
- cc_ssh.apply_credentials(keys, user, True, options)
- options = options.replace("$USER", user)
- options = options.replace("$DISABLE_USER", "root")
- self.assertEqual(
- [
- mock.call(set(keys), user),
- mock.call(set(keys), "root", options=options),
- ],
- m_setup_keys.call_args_list,
- )
+@mock.patch(MODPATH + "ssh_util.setup_user_keys")
+class TestHandleSsh:
+ """Test cc_ssh handling of ssh config."""
- def test_apply_credentials_with_no_user_disable_root(self, m_setup_keys):
- """Apply keys no user and disable root ssh."""
- keys = ["key1"]
- user = None
+ @pytest.mark.parametrize(
+ "keys,user,disable_root_opts",
+ [
+ # For the given user and root.
+ pytest.param(["key1"], "clouduser", False, id="with_user"),
+ # For root only.
+ pytest.param(["key1"], None, False, id="with_no_user"),
+ # For the given user and disable root ssh.
+ pytest.param(
+ ["key1"],
+ "clouduser",
+ True,
+ id="with_user_disable_root",
+ ),
+ # No user and disable root ssh.
+ pytest.param(
+ ["key1"],
+ None,
+ True,
+ id="with_no_user_disable_root",
+ ),
+ ],
+ )
+ def test_apply_credentials(
+ self, m_setup_keys, keys, user, disable_root_opts
+ ):
options = ssh_util.DISABLE_USER_OPTS
- cc_ssh.apply_credentials(keys, user, True, options)
- options = options.replace("$USER", "NONE")
- options = options.replace("$DISABLE_USER", "root")
- self.assertEqual(
- [mock.call(set(keys), "root", options=options)],
- m_setup_keys.call_args_list,
- )
+ cc_ssh.apply_credentials(keys, user, disable_root_opts, options)
+ if not disable_root_opts:
+ expected_options = ""
+ else:
+ expected_options = _replace_options(user)
+ expected_calls = [
+ mock.call(set(keys), "root", options=expected_options)
+ ]
+ if user:
+ expected_calls = [mock.call(set(keys), user)] + expected_calls
+ assert expected_calls == m_setup_keys.call_args_list
@mock.patch(MODPATH + "glob.glob")
@mock.patch(MODPATH + "ug_util.normalize_users_groups")
@@ -102,24 +112,20 @@ class TestHandleSsh(CiTestCase):
m_path_exists.return_value = True
m_nug.return_value = ([], {})
cc_ssh.PUBLISH_HOST_KEYS = False
- cloud = self.tmp_cloud(distro="ubuntu", metadata={"public-keys": keys})
+ cloud = get_cloud(distro="ubuntu", metadata={"public-keys": keys})
cc_ssh.handle("name", cfg, cloud, LOG, None)
options = ssh_util.DISABLE_USER_OPTS.replace("$USER", "NONE")
options = options.replace("$DISABLE_USER", "root")
m_glob.assert_called_once_with("/etc/ssh/ssh_host_*key*")
- self.assertIn(
- [
- mock.call("/etc/ssh/ssh_host_rsa_key"),
- mock.call("/etc/ssh/ssh_host_dsa_key"),
- mock.call("/etc/ssh/ssh_host_ecdsa_key"),
- mock.call("/etc/ssh/ssh_host_ed25519_key"),
- ],
- m_path_exists.call_args_list,
- )
- self.assertEqual(
- [mock.call(set(keys), "root", options=options)],
- m_setup_keys.call_args_list,
- )
+ assert [
+ mock.call("/etc/ssh/ssh_host_rsa_key"),
+ mock.call("/etc/ssh/ssh_host_dsa_key"),
+ mock.call("/etc/ssh/ssh_host_ecdsa_key"),
+ mock.call("/etc/ssh/ssh_host_ed25519_key"),
+ ] in m_path_exists.call_args_list
+ assert [
+ mock.call(set(keys), "root", options=options)
+ ] == m_setup_keys.call_args_list
@mock.patch(MODPATH + "glob.glob")
@mock.patch(MODPATH + "ug_util.normalize_users_groups")
@@ -137,110 +143,115 @@ class TestHandleSsh(CiTestCase):
# Mock os.path.exits to True to short-circuit the key writing logic
m_path_exists.return_value = True
m_nug.return_value = ({user: {"default": user}}, {})
- cloud = self.tmp_cloud(distro="ubuntu", metadata={"public-keys": keys})
+ cloud = get_cloud(distro="ubuntu", metadata={"public-keys": keys})
cc_ssh.handle("name", cfg, cloud, LOG, None)
options = ssh_util.DISABLE_USER_OPTS.replace("$USER", user)
options = options.replace("$DISABLE_USER", "root")
- self.assertEqual(
- [
- mock.call(set(), user),
- mock.call(set(), "root", options=options),
- ],
- m_setup_keys.call_args_list,
- )
-
+ assert [
+ mock.call(set(), user),
+ mock.call(set(), "root", options=options),
+ ] == m_setup_keys.call_args_list
+
+ @pytest.mark.parametrize(
+ "cfg,mock_get_public_ssh_keys,empty_opts",
+ [
+ pytest.param({}, False, False, id="no_cfg"),
+ pytest.param(
+ {"disable_root": True},
+ False,
+ False,
+ id="explicit_disable_root",
+ ),
+ # When disable_root == False, the ssh redirect for root is skipped
+ pytest.param(
+ {"disable_root": False},
+ True,
+ True,
+ id="cfg_without_disable_root",
+ ),
+ ],
+ )
@mock.patch(MODPATH + "glob.glob")
@mock.patch(MODPATH + "ug_util.normalize_users_groups")
@mock.patch(MODPATH + "os.path.exists")
- def test_handle_no_cfg_and_default_root(
- self, m_path_exists, m_nug, m_glob, m_setup_keys
+ def test_handle_default_root(
+ self,
+ m_path_exists,
+ m_nug,
+ m_glob,
+ m_setup_keys,
+ cfg,
+ mock_get_public_ssh_keys,
+ empty_opts,
):
- """Test handle with no config and a default distro user."""
- cfg = {}
+ """Test handle with a default distro user."""
keys = ["key1"]
user = "clouduser"
m_glob.return_value = [] # Return no matching keys to prevent removal
# Mock os.path.exits to True to short-circuit the key writing logic
m_path_exists.return_value = True
m_nug.return_value = ({user: {"default": user}}, {})
- cloud = self.tmp_cloud(distro="ubuntu", metadata={"public-keys": keys})
+ cloud = get_cloud(distro="ubuntu", metadata={"public-keys": keys})
+ if mock_get_public_ssh_keys:
+ cloud.get_public_ssh_keys = mock.Mock(return_value=keys)
cc_ssh.handle("name", cfg, cloud, LOG, None)
- options = ssh_util.DISABLE_USER_OPTS.replace("$USER", user)
- options = options.replace("$DISABLE_USER", "root")
- self.assertEqual(
- [
- mock.call(set(keys), user),
- mock.call(set(keys), "root", options=options),
- ],
- m_setup_keys.call_args_list,
- )
-
+ if empty_opts:
+ options = ""
+ else:
+ options = _replace_options(user)
+ assert [
+ mock.call(set(keys), user),
+ mock.call(set(keys), "root", options=options),
+ ] == m_setup_keys.call_args_list
+
+ @pytest.mark.parametrize(
+ "cfg, expected_key_types",
+ [
+ pytest.param({}, KEY_NAMES_NO_DSA, id="default"),
+ pytest.param(
+ {"ssh_publish_hostkeys": {"enabled": True}},
+ KEY_NAMES_NO_DSA,
+ id="config_enable",
+ ),
+ pytest.param(
+ {"ssh_publish_hostkeys": {"enabled": False}},
+ None,
+ id="config_disable",
+ ),
+ pytest.param(
+ {
+ "ssh_publish_hostkeys": {
+ "enabled": True,
+ "blacklist": ["dsa", "rsa"],
+ }
+ },
+ ["ecdsa", "ed25519"],
+ id="config_blacklist",
+ ),
+ pytest.param(
+ {"ssh_publish_hostkeys": {"enabled": True, "blacklist": []}},
+ cc_ssh.GENERATE_KEY_NAMES,
+ id="empty_blacklist",
+ ),
+ ],
+ )
@mock.patch(MODPATH + "glob.glob")
@mock.patch(MODPATH + "ug_util.normalize_users_groups")
@mock.patch(MODPATH + "os.path.exists")
- def test_handle_cfg_with_explicit_disable_root(
- self, m_path_exists, m_nug, m_glob, m_setup_keys
- ):
- """Test handle with explicit disable_root and a default distro user."""
- # This test is identical to test_handle_no_cfg_and_default_root,
- # except this uses an explicit cfg value
- cfg = {"disable_root": True}
- keys = ["key1"]
- user = "clouduser"
- m_glob.return_value = [] # Return no matching keys to prevent removal
- # Mock os.path.exits to True to short-circuit the key writing logic
- m_path_exists.return_value = True
- m_nug.return_value = ({user: {"default": user}}, {})
- cloud = self.tmp_cloud(distro="ubuntu", metadata={"public-keys": keys})
- cc_ssh.handle("name", cfg, cloud, LOG, None)
-
- options = ssh_util.DISABLE_USER_OPTS.replace("$USER", user)
- options = options.replace("$DISABLE_USER", "root")
- self.assertEqual(
- [
- mock.call(set(keys), user),
- mock.call(set(keys), "root", options=options),
- ],
- m_setup_keys.call_args_list,
- )
-
- @mock.patch(MODPATH + "glob.glob")
- @mock.patch(MODPATH + "ug_util.normalize_users_groups")
- @mock.patch(MODPATH + "os.path.exists")
- def test_handle_cfg_without_disable_root(
- self, m_path_exists, m_nug, m_glob, m_setup_keys
- ):
- """Test handle with disable_root == False."""
- # When disable_root == False, the ssh redirect for root is skipped
- cfg = {"disable_root": False}
- keys = ["key1"]
- user = "clouduser"
- m_glob.return_value = [] # Return no matching keys to prevent removal
- # Mock os.path.exits to True to short-circuit the key writing logic
- m_path_exists.return_value = True
- m_nug.return_value = ({user: {"default": user}}, {})
- cloud = self.tmp_cloud(distro="ubuntu", metadata={"public-keys": keys})
- cloud.get_public_ssh_keys = mock.Mock(return_value=keys)
- cc_ssh.handle("name", cfg, cloud, LOG, None)
-
- self.assertEqual(
- [
- mock.call(set(keys), user),
- mock.call(set(keys), "root", options=""),
- ],
- m_setup_keys.call_args_list,
- )
-
- @mock.patch(MODPATH + "glob.glob")
- @mock.patch(MODPATH + "ug_util.normalize_users_groups")
- @mock.patch(MODPATH + "os.path.exists")
- def test_handle_publish_hostkeys_default(
- self, m_path_exists, m_nug, m_glob, m_setup_keys
+ def test_handle_publish_hostkeys(
+ self,
+ m_path_exists,
+ m_nug,
+ m_glob,
+ m_setup_keys,
+ publish_hostkey_test_setup,
+ cfg,
+ expected_key_types,
):
"""Test handle with various configs for ssh_publish_hostkeys."""
- self._publish_hostkey_test_setup()
+ test_hostkeys, test_hostkey_files = publish_hostkey_test_setup
cc_ssh.PUBLISH_HOST_KEYS = True
keys = ["key1"]
user = "clouduser"
@@ -248,160 +259,28 @@ class TestHandleSsh(CiTestCase):
m_glob.side_effect = iter(
[
[],
- self.test_hostkey_files,
+ test_hostkey_files,
]
)
# Mock os.path.exits to True to short-circuit the key writing logic
m_path_exists.return_value = True
m_nug.return_value = ({user: {"default": user}}, {})
- cloud = self.tmp_cloud(distro="ubuntu", metadata={"public-keys": keys})
+ cloud = get_cloud(distro="ubuntu", metadata={"public-keys": keys})
cloud.datasource.publish_host_keys = mock.Mock()
- cfg = {}
- expected_call = [
- self.test_hostkeys[key_type] for key_type in KEY_NAMES_NO_DSA
- ]
- cc_ssh.handle("name", cfg, cloud, LOG, None)
- self.assertEqual(
- [mock.call(expected_call)],
- cloud.datasource.publish_host_keys.call_args_list,
- )
-
- @mock.patch(MODPATH + "glob.glob")
- @mock.patch(MODPATH + "ug_util.normalize_users_groups")
- @mock.patch(MODPATH + "os.path.exists")
- def test_handle_publish_hostkeys_config_enable(
- self, m_path_exists, m_nug, m_glob, m_setup_keys
- ):
- """Test handle with various configs for ssh_publish_hostkeys."""
- self._publish_hostkey_test_setup()
- cc_ssh.PUBLISH_HOST_KEYS = False
- keys = ["key1"]
- user = "clouduser"
- # Return no matching keys for first glob, test keys for second.
- m_glob.side_effect = iter(
- [
- [],
- self.test_hostkey_files,
- ]
- )
- # Mock os.path.exits to True to short-circuit the key writing logic
- m_path_exists.return_value = True
- m_nug.return_value = ({user: {"default": user}}, {})
- cloud = self.tmp_cloud(distro="ubuntu", metadata={"public-keys": keys})
- cloud.datasource.publish_host_keys = mock.Mock()
-
- cfg = {"ssh_publish_hostkeys": {"enabled": True}}
- expected_call = [
- self.test_hostkeys[key_type] for key_type in KEY_NAMES_NO_DSA
- ]
- cc_ssh.handle("name", cfg, cloud, LOG, None)
- self.assertEqual(
- [mock.call(expected_call)],
- cloud.datasource.publish_host_keys.call_args_list,
- )
-
- @mock.patch(MODPATH + "glob.glob")
- @mock.patch(MODPATH + "ug_util.normalize_users_groups")
- @mock.patch(MODPATH + "os.path.exists")
- def test_handle_publish_hostkeys_config_disable(
- self, m_path_exists, m_nug, m_glob, m_setup_keys
- ):
- """Test handle with various configs for ssh_publish_hostkeys."""
- self._publish_hostkey_test_setup()
- cc_ssh.PUBLISH_HOST_KEYS = True
- keys = ["key1"]
- user = "clouduser"
- # Return no matching keys for first glob, test keys for second.
- m_glob.side_effect = iter(
- [
- [],
- self.test_hostkey_files,
- ]
- )
- # Mock os.path.exits to True to short-circuit the key writing logic
- m_path_exists.return_value = True
- m_nug.return_value = ({user: {"default": user}}, {})
- cloud = self.tmp_cloud(distro="ubuntu", metadata={"public-keys": keys})
- cloud.datasource.publish_host_keys = mock.Mock()
-
- cfg = {"ssh_publish_hostkeys": {"enabled": False}}
- cc_ssh.handle("name", cfg, cloud, LOG, None)
- self.assertFalse(cloud.datasource.publish_host_keys.call_args_list)
- cloud.datasource.publish_host_keys.assert_not_called()
-
- @mock.patch(MODPATH + "glob.glob")
- @mock.patch(MODPATH + "ug_util.normalize_users_groups")
- @mock.patch(MODPATH + "os.path.exists")
- def test_handle_publish_hostkeys_config_blacklist(
- self, m_path_exists, m_nug, m_glob, m_setup_keys
- ):
- """Test handle with various configs for ssh_publish_hostkeys."""
- self._publish_hostkey_test_setup()
- cc_ssh.PUBLISH_HOST_KEYS = True
- keys = ["key1"]
- user = "clouduser"
- # Return no matching keys for first glob, test keys for second.
- m_glob.side_effect = iter(
- [
- [],
- self.test_hostkey_files,
- ]
- )
- # Mock os.path.exits to True to short-circuit the key writing logic
- m_path_exists.return_value = True
- m_nug.return_value = ({user: {"default": user}}, {})
- cloud = self.tmp_cloud(distro="ubuntu", metadata={"public-keys": keys})
- cloud.datasource.publish_host_keys = mock.Mock()
-
- cfg = {
- "ssh_publish_hostkeys": {
- "enabled": True,
- "blacklist": ["dsa", "rsa"],
- }
- }
- expected_call = [
- self.test_hostkeys[key_type] for key_type in ["ecdsa", "ed25519"]
- ]
- cc_ssh.handle("name", cfg, cloud, LOG, None)
- self.assertEqual(
- [mock.call(expected_call)],
- cloud.datasource.publish_host_keys.call_args_list,
- )
-
- @mock.patch(MODPATH + "glob.glob")
- @mock.patch(MODPATH + "ug_util.normalize_users_groups")
- @mock.patch(MODPATH + "os.path.exists")
- def test_handle_publish_hostkeys_empty_blacklist(
- self, m_path_exists, m_nug, m_glob, m_setup_keys
- ):
- """Test handle with various configs for ssh_publish_hostkeys."""
- self._publish_hostkey_test_setup()
- cc_ssh.PUBLISH_HOST_KEYS = True
- keys = ["key1"]
- user = "clouduser"
- # Return no matching keys for first glob, test keys for second.
- m_glob.side_effect = iter(
- [
- [],
- self.test_hostkey_files,
+ expected_calls = []
+ if expected_key_types is not None:
+ expected_calls = [
+ mock.call(
+ [
+ test_hostkeys[key_type]
+ for key_type in expected_key_types
+ ]
+ )
]
- )
- # Mock os.path.exits to True to short-circuit the key writing logic
- m_path_exists.return_value = True
- m_nug.return_value = ({user: {"default": user}}, {})
- cloud = self.tmp_cloud(distro="ubuntu", metadata={"public-keys": keys})
- cloud.datasource.publish_host_keys = mock.Mock()
-
- cfg = {"ssh_publish_hostkeys": {"enabled": True, "blacklist": []}}
- expected_call = [
- self.test_hostkeys[key_type]
- for key_type in cc_ssh.GENERATE_KEY_NAMES
- ]
cc_ssh.handle("name", cfg, cloud, LOG, None)
- self.assertEqual(
- [mock.call(expected_call)],
- cloud.datasource.publish_host_keys.call_args_list,
+ assert (
+ expected_calls == cloud.datasource.publish_host_keys.call_args_list
)
@mock.patch(MODPATH + "ug_util.normalize_users_groups")
@@ -418,7 +297,7 @@ class TestHandleSsh(CiTestCase):
public_name = "{}_public".format(key_type)
cert_name = "{}_certificate".format(key_type)
- # Actual key contents don"t have to be realistic
+ # Actual key contents don't have to be realistic
private_value = "{}_PRIVATE_KEY".format(key_type)
public_value = "{}_PUBLIC_KEY".format(key_type)
cert_value = "{}_CERT_KEY".format(key_type)
@@ -458,10 +337,100 @@ class TestHandleSsh(CiTestCase):
with mock.patch(
MODPATH + "ssh_util.parse_ssh_config", return_value=[]
):
- cc_ssh.handle(
- "name", cfg, self.tmp_cloud(distro="ubuntu"), LOG, None
- )
+ cc_ssh.handle("name", cfg, get_cloud(distro="ubuntu"), LOG, None)
# Check that all expected output has been done.
for call_ in expected_calls:
- self.assertIn(call_, m_write_file.call_args_list)
+ assert call_ in m_write_file.call_args_list
+
+ @pytest.mark.parametrize(
+ "key_type,reason",
+ [
+ ("ecdsa-sk", "unsupported"),
+ ("ed25519-sk", "unsupported"),
+ ("public", "unrecognized"),
+ ],
+ )
+ @mock.patch(MODPATH + "ug_util.normalize_users_groups")
+ @mock.patch(MODPATH + "util.write_file")
+ def test_handle_invalid_ssh_keys_are_skipped(
+ self,
+ m_write_file,
+ m_nug,
+ m_setup_keys,
+ key_type,
+ reason,
+ caplog,
+ ):
+ cfg = {
+ "ssh_keys": {
+ f"{key_type}_private": f"{key_type}_private",
+ f"{key_type}_public": f"{key_type}_public",
+ f"{key_type}_certificate": f"{key_type}_certificate",
+ },
+ "ssh_deletekeys": False,
+ "ssh_publish_hostkeys": {"enabled": False},
+ }
+ # Run the handler.
+ m_nug.return_value = ([], {})
+ with mock.patch(
+ MODPATH + "ssh_util.parse_ssh_config", return_value=[]
+ ):
+ cc_ssh.handle("name", cfg, get_cloud("ubuntu"), LOG, None)
+ assert [] == m_write_file.call_args_list
+ expected_log_msgs = [
+ f'Skipping {reason} ssh_keys entry: "{key_type}_private"',
+ f'Skipping {reason} ssh_keys entry: "{key_type}_public"',
+ f'Skipping {reason} ssh_keys entry: "{key_type}_certificate"',
+ ]
+ for expected_log_msg in expected_log_msgs:
+ assert caplog.text.count(expected_log_msg) == 1
+
+
+class TestSshSchema:
+ @pytest.mark.parametrize(
+ "config, error_msg",
+ (
+ ({"ssh_authorized_keys": ["key1", "key2"]}, None),
+ (
+ {"ssh_keys": {"dsa_private": "key1", "rsa_public": "key2"}},
+ None,
+ ),
+ (
+ {"ssh_keys": {"rsa_a": "key"}},
+ "'rsa_a' does not match any of the regexes",
+ ),
+ (
+ {"ssh_keys": {"a_public": "key"}},
+ "'a_public' does not match any of the regexes",
+ ),
+ (
+ {"ssh_keys": {"ecdsa-sk_public": "key"}},
+ "'ecdsa-sk_public' does not match any of the regexes",
+ ),
+ (
+ {"ssh_keys": {"ed25519-sk_public": "key"}},
+ "'ed25519-sk_public' does not match any of the regexes",
+ ),
+ (
+ {"ssh_authorized_keys": "ssh-rsa blah"},
+ "'ssh-rsa blah' is not of type 'array'",
+ ),
+ ({"ssh_genkeytypes": ["bad"]}, "'bad' is not one of"),
+ (
+ {"disable_root_opts": ["no-port-forwarding"]},
+ r"\['no-port-forwarding'\] is not of type 'string'",
+ ),
+ (
+ {"ssh_publish_hostkeys": {"key": "value"}},
+ "Additional properties are not allowed",
+ ),
+ ),
+ )
+ @skipUnlessJsonSchema()
+ def test_schema_validation(self, config, error_msg):
+ if error_msg is None:
+ validate_cloudconfig_schema(config, get_schema(), strict=True)
+ else:
+ with pytest.raises(SchemaValidationError, match=error_msg):
+ validate_cloudconfig_schema(config, get_schema(), strict=True)
diff --git a/tests/unittests/config/test_cc_ssh_import_id.py b/tests/unittests/config/test_cc_ssh_import_id.py
new file mode 100644
index 00000000..ffeee92e
--- /dev/null
+++ b/tests/unittests/config/test_cc_ssh_import_id.py
@@ -0,0 +1,78 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import logging
+from unittest import mock
+
+import pytest
+
+from cloudinit.config import cc_ssh_import_id
+from tests.unittests.util import get_cloud
+
+LOG = logging.getLogger(__name__)
+
+MODPATH = "cloudinit.config.cc_ssh_import_ids."
+
+
+class TestIsKeyInNestedDict:
+ @pytest.mark.parametrize(
+ "cfg,expected",
+ (
+ ({}, False),
+ ({"users": [{"name": "bob"}]}, False),
+ ({"ssh_import_id": ["yep"]}, True),
+ ({"ssh_import_id": ["yep"], "users": [{"name": "bob"}]}, True),
+ (
+ {
+ "apt": {"preserve_sources_list": True},
+ "ssh_import_id": ["yep"],
+ "users": [{"name": "bob"}],
+ },
+ True,
+ ),
+ (
+ {
+ "apt": [{}],
+ "ssh_import_id": ["yep"],
+ "users": [{"name": "bob"}],
+ },
+ True,
+ ),
+ (
+ {
+ "apt": {"preserve_sources_list": True},
+ "users": [
+ {"name": "bob"},
+ {"name": "judy", "ssh_import_id": ["yep"]},
+ ],
+ },
+ True,
+ ),
+ ),
+ )
+ def test_find_ssh_import_id_directives(self, cfg, expected):
+ assert expected is cc_ssh_import_id.is_key_in_nested_dict(
+ cfg, "ssh_import_id"
+ )
+
+
+class TestHandleSshImportIDs:
+ """Test cc_ssh_import_id handling of config."""
+
+ @pytest.mark.parametrize(
+ "cfg,log",
+ (
+ ({}, "no 'ssh_import_id' directives found"),
+ (
+ {"users": [{"name": "bob"}]},
+ "no 'ssh_import_id' directives found",
+ ),
+ ({"ssh_import_id": ["bobkey"]}, "ssh-import-id is not installed"),
+ ),
+ )
+ @mock.patch("cloudinit.subp.which")
+ def test_skip_inapplicable_configs(self, m_which, cfg, log, caplog):
+ """Skip config without ssh_import_id"""
+ m_which.return_value = None
+ cloud = get_cloud("ubuntu")
+ cc_ssh_import_id.handle("name", cfg, cloud, LOG, [])
+ assert log in caplog.text
diff --git a/tests/unittests/config/test_cc_ubuntu_advantage.py b/tests/unittests/config/test_cc_ubuntu_advantage.py
index 2037c5ed..0c5544e1 100644
--- a/tests/unittests/config/test_cc_ubuntu_advantage.py
+++ b/tests/unittests/config/test_cc_ubuntu_advantage.py
@@ -1,19 +1,20 @@
# This file is part of cloud-init. See LICENSE file for license information.
+import re
+
+import pytest
from cloudinit import subp
from cloudinit.config.cc_ubuntu_advantage import (
configure_ua,
handle,
maybe_install_ua_tools,
- schema,
)
-from cloudinit.config.schema import validate_cloudconfig_schema
-from tests.unittests.helpers import (
- CiTestCase,
- SchemaTestCaseMixin,
- mock,
- skipUnlessJsonSchema,
+from cloudinit.config.schema import (
+ SchemaValidationError,
+ get_schema,
+ validate_cloudconfig_schema,
)
+from tests.unittests.helpers import CiTestCase, mock, skipUnlessJsonSchema
# Module path used in mocks
MPATH = "cloudinit.config.cc_ubuntu_advantage"
@@ -172,64 +173,28 @@ class TestConfigureUA(CiTestCase):
)
-@skipUnlessJsonSchema()
-class TestSchema(CiTestCase, SchemaTestCaseMixin):
-
- with_logs = True
- schema = schema
-
- @mock.patch("%s.maybe_install_ua_tools" % MPATH)
- @mock.patch("%s.configure_ua" % MPATH)
- def test_schema_warns_on_ubuntu_advantage_not_dict(self, _cfg, _):
- """If ubuntu_advantage configuration is not a dict, emit a warning."""
- validate_cloudconfig_schema({"ubuntu_advantage": "wrong type"}, schema)
- self.assertEqual(
- "WARNING: Invalid cloud-config provided:\nubuntu_advantage:"
- " 'wrong type' is not of type 'object'\n",
- self.logs.getvalue(),
- )
-
- @mock.patch("%s.maybe_install_ua_tools" % MPATH)
- @mock.patch("%s.configure_ua" % MPATH)
- def test_schema_disallows_unknown_keys(self, _cfg, _):
- """Unknown keys in ubuntu_advantage configuration emit warnings."""
- validate_cloudconfig_schema(
- {"ubuntu_advantage": {"token": "winner", "invalid-key": ""}},
- schema,
- )
- self.assertIn(
- "WARNING: Invalid cloud-config provided:\nubuntu_advantage:"
- " Additional properties are not allowed ('invalid-key' was"
- " unexpected)",
- self.logs.getvalue(),
- )
-
- @mock.patch("%s.maybe_install_ua_tools" % MPATH)
- @mock.patch("%s.configure_ua" % MPATH)
- def test_warn_schema_requires_token(self, _cfg, _):
- """Warn if ubuntu_advantage configuration lacks token."""
- validate_cloudconfig_schema(
- {"ubuntu_advantage": {"enable": ["esm"]}}, schema
- )
- self.assertEqual(
- "WARNING: Invalid cloud-config provided:\nubuntu_advantage:"
- " 'token' is a required property\n",
- self.logs.getvalue(),
- )
-
- @mock.patch("%s.maybe_install_ua_tools" % MPATH)
- @mock.patch("%s.configure_ua" % MPATH)
- def test_warn_schema_services_is_not_list_or_dict(self, _cfg, _):
- """Warn when ubuntu_advantage:enable config is not a list."""
- validate_cloudconfig_schema(
- {"ubuntu_advantage": {"enable": "needslist"}}, schema
- )
- self.assertEqual(
- "WARNING: Invalid cloud-config provided:\nubuntu_advantage:"
- " 'token' is a required property\nubuntu_advantage.enable:"
- " 'needslist' is not of type 'array'\n",
- self.logs.getvalue(),
- )
+class TestUbuntuAdvantageSchema:
+ @pytest.mark.parametrize(
+ "config, error_msg",
+ [
+ ({"ubuntu_advantage": {}}, "'token' is a required property"),
+ # Strict keys
+ (
+ {"ubuntu_advantage": {"token": "win", "invalidkey": ""}},
+ re.escape(
+ "ubuntu_advantage: Additional properties are not allowed"
+ " ('invalidkey"
+ ),
+ ),
+ ],
+ )
+ @skipUnlessJsonSchema()
+ def test_schema_validation(self, config, error_msg):
+ if error_msg is None:
+ validate_cloudconfig_schema(config, get_schema(), strict=True)
+ else:
+ with pytest.raises(SchemaValidationError, match=error_msg):
+ validate_cloudconfig_schema(config, get_schema(), strict=True)
class TestHandle(CiTestCase):
@@ -240,8 +205,8 @@ class TestHandle(CiTestCase):
super(TestHandle, self).setUp()
self.tmp = self.tmp_dir()
- @mock.patch("%s.validate_cloudconfig_schema" % MPATH)
- def test_handle_no_config(self, m_schema):
+ @mock.patch("%s.maybe_install_ua_tools" % MPATH)
+ def test_handle_no_config(self, m_maybe_install_ua_tools):
"""When no ua-related configuration is provided, nothing happens."""
cfg = {}
handle("ua-test", cfg=cfg, cloud=None, log=self.logger, args=None)
@@ -250,7 +215,7 @@ class TestHandle(CiTestCase):
" configuration found",
self.logs.getvalue(),
)
- m_schema.assert_not_called()
+ self.assertEqual(m_maybe_install_ua_tools.call_count, 0)
@mock.patch("%s.configure_ua" % MPATH)
@mock.patch("%s.maybe_install_ua_tools" % MPATH)
diff --git a/tests/unittests/config/test_cc_ubuntu_drivers.py b/tests/unittests/config/test_cc_ubuntu_drivers.py
index 4987492d..3cbde8b2 100644
--- a/tests/unittests/config/test_cc_ubuntu_drivers.py
+++ b/tests/unittests/config/test_cc_ubuntu_drivers.py
@@ -2,10 +2,14 @@
import copy
import os
+import re
+
+import pytest
from cloudinit.config import cc_ubuntu_drivers as drivers
from cloudinit.config.schema import (
SchemaValidationError,
+ get_schema,
validate_cloudconfig_schema,
)
from cloudinit.subp import ProcessExecutionError
@@ -47,17 +51,6 @@ class TestUbuntuDrivers(CiTestCase):
with_logs = True
- @skipUnlessJsonSchema()
- def test_schema_requires_boolean_for_license_accepted(self):
- with self.assertRaisesRegex(
- SchemaValidationError, ".*license-accepted.*TRUE.*boolean"
- ):
- validate_cloudconfig_schema(
- {"drivers": {"nvidia": {"license-accepted": "TRUE"}}},
- schema=drivers.schema,
- strict=True,
- )
-
@mock.patch(M_TMP_PATH)
@mock.patch(MPATH + "subp.subp", return_value=("", ""))
@mock.patch(MPATH + "subp.which", return_value=False)
@@ -290,4 +283,39 @@ class TestUbuntuDriversWithVersion(TestUbuntuDrivers):
)
+class TestUbuntuAdvantageSchema:
+ @pytest.mark.parametrize(
+ "config, error_msg",
+ [
+ # Strict boolean license-accepted
+ (
+ {"drivers": {"nvidia": {"license-accepted": "TRUE"}}},
+ "drivers.nvidia.license-accepted: 'TRUE' is not of type"
+ " 'boolean'",
+ ),
+ # Additional properties disallowed
+ (
+ {"drivers": {"bogus": {"license-accepted": True}}},
+ re.escape(
+ "drivers: Additional properties are not allowed ('bogus'"
+ ),
+ ),
+ (
+ {"drivers": {"nvidia": {"bogus": True}}},
+ re.escape(
+ "drivers.nvidia: Additional properties are not allowed"
+ " ('bogus' "
+ ),
+ ),
+ ],
+ )
+ @skipUnlessJsonSchema()
+ def test_schema_validation(self, config, error_msg):
+ if error_msg is None:
+ validate_cloudconfig_schema(config, get_schema(), strict=True)
+ else:
+ with pytest.raises(SchemaValidationError, match=error_msg):
+ validate_cloudconfig_schema(config, get_schema(), strict=True)
+
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/config/test_cc_update_etc_hosts.py b/tests/unittests/config/test_cc_update_etc_hosts.py
index 2bbc16f4..f7aafe46 100644
--- a/tests/unittests/config/test_cc_update_etc_hosts.py
+++ b/tests/unittests/config/test_cc_update_etc_hosts.py
@@ -2,10 +2,18 @@
import logging
import os
+import re
import shutil
+import pytest
+
from cloudinit import cloud, distros, helpers, util
from cloudinit.config import cc_update_etc_hosts
+from cloudinit.config.schema import (
+ SchemaValidationError,
+ get_schema,
+ validate_cloudconfig_schema,
+)
from tests.unittests import helpers as t_help
LOG = logging.getLogger(__name__)
@@ -66,3 +74,25 @@ class TestHostsFile(t_help.FilesystemMockingTestCase):
self.assertIsNone("No entry for 127.0.1.1 in etc/hosts")
if "::1 cloud-init.test.us cloud-init" not in contents:
self.assertIsNone("No entry for 127.0.0.1 in etc/hosts")
+
+
+class TestUpdateEtcHosts:
+ @pytest.mark.parametrize(
+ "config, error_msg",
+ [
+ (
+ {"manage_etc_hosts": "templatey"},
+ re.escape(
+ "manage_etc_hosts: 'templatey' is not one of"
+ " [True, False, 'template', 'localhost']"
+ ),
+ ),
+ ],
+ )
+ @t_help.skipUnlessJsonSchema()
+ def test_schema_validation(self, config, error_msg):
+ if error_msg is None:
+ validate_cloudconfig_schema(config, get_schema(), strict=True)
+ else:
+ with pytest.raises(SchemaValidationError, match=error_msg):
+ validate_cloudconfig_schema(config, get_schema(), strict=True)
diff --git a/tests/unittests/config/test_cc_users_groups.py b/tests/unittests/config/test_cc_users_groups.py
index 0bd3c980..af8bdc30 100644
--- a/tests/unittests/config/test_cc_users_groups.py
+++ b/tests/unittests/config/test_cc_users_groups.py
@@ -1,8 +1,15 @@
# This file is part of cloud-init. See LICENSE file for license information.
+import re
+import pytest
from cloudinit.config import cc_users_groups
-from tests.unittests.helpers import CiTestCase, mock
+from cloudinit.config.schema import (
+ SchemaValidationError,
+ get_schema,
+ validate_cloudconfig_schema,
+)
+from tests.unittests.helpers import CiTestCase, mock, skipUnlessJsonSchema
MODPATH = "cloudinit.config.cc_users_groups"
@@ -185,6 +192,27 @@ class TestHandleUsersGroups(CiTestCase):
)
m_group.assert_not_called()
+ def test_users_without_home_cannot_import_ssh_keys(self, m_user, m_group):
+ cfg = {
+ "users": [
+ "default",
+ {
+ "name": "me2",
+ "ssh_import_id": ["snowflake"],
+ "no_create_home": True,
+ },
+ ]
+ }
+ cloud = self.tmp_cloud(distro="ubuntu", sys_cfg={}, metadata={})
+ with self.assertRaises(ValueError) as context_manager:
+ cc_users_groups.handle("modulename", cfg, cloud, None, None)
+ m_group.assert_not_called()
+ self.assertEqual(
+ "Not creating user me2. Key(s) ssh_import_id cannot be provided"
+ " with no_create_home",
+ str(context_manager.exception),
+ )
+
def test_users_with_ssh_redirect_user_non_default(self, m_user, m_group):
"""Warn when ssh_redirect_user is not 'default'."""
cfg = {
@@ -266,3 +294,45 @@ class TestHandleUsersGroups(CiTestCase):
" cloud configuration users: [default, ..].\n",
self.logs.getvalue(),
)
+
+
+class TestUsersGroupsSchema:
+ @pytest.mark.parametrize(
+ "config, error_msg",
+ [
+ # Validate default settings not covered by examples
+ ({"groups": ["anygrp"]}, None),
+ ({"groups": "anygrp,anyothergroup"}, None), # DEPRECATED
+ # Create anygrp with user1 as member
+ ({"groups": [{"anygrp": "user1"}]}, None),
+ # Create anygrp with user1 as member using object/string syntax
+ ({"groups": {"anygrp": "user1"}}, None),
+ # Create anygrp with user1 as member using object/list syntax
+ ({"groups": {"anygrp": ["user1"]}}, None),
+ ({"groups": [{"anygrp": ["user1", "user2"]}]}, None),
+ # Make default username "olddefault": DEPRECATED
+ ({"user": "olddefault"}, None),
+ # Create multiple users, and include default user. DEPRECATED
+ ({"users": "oldstyle,default"}, None),
+ ({"users": ["default"]}, None),
+ ({"users": ["default", ["aaa", "bbb"]]}, None),
+ ({"users": ["foobar"]}, None), # no default user creation
+ ({"users": [{"name": "bbsw"}]}, None),
+ ({"groups": [{"yep": ["user1"]}]}, None),
+ (
+ {"user": ["no_list_allowed"]},
+ re.escape("user: ['no_list_allowed'] is not valid "),
+ ),
+ (
+ {"groups": {"anygrp": 1}},
+ "groups.anygrp: 1 is not of type 'string', 'array'",
+ ),
+ ],
+ )
+ @skipUnlessJsonSchema()
+ def test_schema_validation(self, config, error_msg):
+ if error_msg is None:
+ validate_cloudconfig_schema(config, get_schema(), strict=True)
+ else:
+ with pytest.raises(SchemaValidationError, match=error_msg):
+ validate_cloudconfig_schema(config, get_schema(), strict=True)
diff --git a/tests/unittests/config/test_cc_write_files.py b/tests/unittests/config/test_cc_write_files.py
index faea5885..01c920e8 100644
--- a/tests/unittests/config/test_cc_write_files.py
+++ b/tests/unittests/config/test_cc_write_files.py
@@ -1,19 +1,25 @@
# This file is part of cloud-init. See LICENSE file for license information.
import base64
-import copy
import gzip
import io
+import re
import shutil
import tempfile
+import pytest
+
from cloudinit import log as logging
from cloudinit import util
from cloudinit.config.cc_write_files import decode_perms, handle, write_files
+from cloudinit.config.schema import (
+ SchemaValidationError,
+ get_schema,
+ validate_cloudconfig_schema,
+)
from tests.unittests.helpers import (
CiTestCase,
FilesystemMockingTestCase,
- mock,
skipUnlessJsonSchema,
)
@@ -55,74 +61,6 @@ VALID_SCHEMA = {
]
}
-INVALID_SCHEMA = { # Dropped required path key
- "write_files": [
- {
- "append": False,
- "content": "a",
- "encoding": "gzip",
- "owner": "jeff",
- "permissions": "0777",
- }
- ]
-}
-
-
-@skipUnlessJsonSchema()
-@mock.patch("cloudinit.config.cc_write_files.write_files")
-class TestWriteFilesSchema(CiTestCase):
-
- with_logs = True
-
- def test_schema_validation_warns_missing_path(self, m_write_files):
- """The only required file item property is 'path'."""
- cc = self.tmp_cloud("ubuntu")
- valid_config = {"write_files": [{"path": "/some/path"}]}
- handle("cc_write_file", valid_config, cc, LOG, [])
- self.assertNotIn(
- "Invalid cloud-config provided:", self.logs.getvalue()
- )
- handle("cc_write_file", INVALID_SCHEMA, cc, LOG, [])
- self.assertIn("Invalid cloud-config provided:", self.logs.getvalue())
- self.assertIn("'path' is a required property", self.logs.getvalue())
-
- def test_schema_validation_warns_non_string_type_for_files(
- self, m_write_files
- ):
- """Schema validation warns of non-string values for each file item."""
- cc = self.tmp_cloud("ubuntu")
- for key in VALID_SCHEMA["write_files"][0].keys():
- if key == "append":
- key_type = "boolean"
- else:
- key_type = "string"
- invalid_config = copy.deepcopy(VALID_SCHEMA)
- invalid_config["write_files"][0][key] = 1
- handle("cc_write_file", invalid_config, cc, LOG, [])
- self.assertIn(
- mock.call("cc_write_file", invalid_config["write_files"]),
- m_write_files.call_args_list,
- )
- self.assertIn(
- "write_files.0.%s: 1 is not of type '%s'" % (key, key_type),
- self.logs.getvalue(),
- )
- self.assertIn("Invalid cloud-config provided:", self.logs.getvalue())
-
- def test_schema_validation_warns_on_additional_undefined_propertes(
- self, m_write_files
- ):
- """Schema validation warns on additional undefined file properties."""
- cc = self.tmp_cloud("ubuntu")
- invalid_config = copy.deepcopy(VALID_SCHEMA)
- invalid_config["write_files"][0]["bogus"] = "value"
- handle("cc_write_file", invalid_config, cc, LOG, [])
- self.assertIn(
- "Invalid cloud-config provided:\nwrite_files.0: Additional"
- " properties are not allowed ('bogus' was unexpected)",
- self.logs.getvalue(),
- )
-
class TestWriteFiles(FilesystemMockingTestCase):
@@ -133,19 +71,6 @@ class TestWriteFiles(FilesystemMockingTestCase):
self.tmp = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, self.tmp)
- @skipUnlessJsonSchema()
- def test_handler_schema_validation_warns_non_array_type(self):
- """Schema validation warns of non-array value."""
- invalid_config = {"write_files": 1}
- cc = self.tmp_cloud("ubuntu")
- with self.assertRaises(TypeError):
- handle("cc_write_file", invalid_config, cc, LOG, [])
- self.assertIn(
- "Invalid cloud-config provided:\nwrite_files: 1 is not of type"
- " 'array'",
- self.logs.getvalue(),
- )
-
def test_simple(self):
self.patchUtils(self.tmp)
expected = "hello world\n"
@@ -214,6 +139,27 @@ class TestWriteFiles(FilesystemMockingTestCase):
)
self.assertEqual(len(expected), flen_expected)
+ def test_handle_plain_text(self):
+ self.patchUtils(self.tmp)
+ file_path = "/tmp/file-text-plain"
+ content = "asdf"
+ cfg = {
+ "write_files": [
+ {
+ "content": content,
+ "path": file_path,
+ "encoding": "text/plain",
+ "defer": False,
+ }
+ ]
+ }
+ cc = self.tmp_cloud("ubuntu")
+ handle("ignored", cfg, cc, LOG, [])
+ assert content == util.load_file(file_path)
+ self.assertNotIn(
+ "Unknown encoding type text/plain", self.logs.getvalue()
+ )
+
def test_deferred(self):
self.patchUtils(self.tmp)
file_path = "/tmp/deferred.file"
@@ -264,4 +210,54 @@ def _gzip_bytes(data):
fp.close()
+class TestWriteFilesSchema:
+ @pytest.mark.parametrize(
+ "config, error_msg",
+ [
+ # Top-level write_files type validation
+ ({"write_files": 1}, "write_files: 1 is not of type 'array'"),
+ ({"write_files": []}, re.escape("write_files: [] is too short")),
+ (
+ {"write_files": [{}]},
+ "write_files.0: 'path' is a required property",
+ ),
+ (
+ {"write_files": [{"path": "/some", "bogus": True}]},
+ re.escape(
+ "write_files.0: Additional properties are not allowed"
+ " ('bogus'"
+ ),
+ ),
+ ( # Strict encoding choices
+ {"write_files": [{"path": "/some", "encoding": "g"}]},
+ re.escape(
+ "write_files.0.encoding: 'g' is not one of ['gz', 'gzip',"
+ ),
+ ),
+ (
+ {
+ "write_files": [
+ {
+ "append": False,
+ "content": "a",
+ "encoding": "text/plain",
+ "owner": "jeff",
+ "path": "/some",
+ "permissions": "0777",
+ }
+ ]
+ },
+ None,
+ ),
+ ],
+ )
+ @skipUnlessJsonSchema()
+ def test_schema_validation(self, config, error_msg):
+ if error_msg is not None:
+ with pytest.raises(SchemaValidationError, match=error_msg):
+ validate_cloudconfig_schema(config, get_schema(), strict=True)
+ else:
+ validate_cloudconfig_schema(config, get_schema(), strict=True)
+
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/config/test_cc_write_files_deferred.py b/tests/unittests/config/test_cc_write_files_deferred.py
index 17203233..ed2056bb 100644
--- a/tests/unittests/config/test_cc_write_files_deferred.py
+++ b/tests/unittests/config/test_cc_write_files_deferred.py
@@ -3,56 +3,24 @@
import shutil
import tempfile
+import pytest
+
from cloudinit import log as logging
from cloudinit import util
from cloudinit.config.cc_write_files_deferred import handle
+from cloudinit.config.schema import (
+ SchemaValidationError,
+ get_schema,
+ validate_cloudconfig_schema,
+)
from tests.unittests.helpers import (
- CiTestCase,
FilesystemMockingTestCase,
- mock,
skipUnlessJsonSchema,
)
-from .test_cc_write_files import VALID_SCHEMA
-
LOG = logging.getLogger(__name__)
-@skipUnlessJsonSchema()
-@mock.patch("cloudinit.config.cc_write_files_deferred.write_files")
-class TestWriteFilesDeferredSchema(CiTestCase):
-
- with_logs = True
-
- def test_schema_validation_warns_invalid_value(
- self, m_write_files_deferred
- ):
- """If 'defer' is defined, it must be of type 'bool'."""
-
- valid_config = {
- "write_files": [
- {**VALID_SCHEMA.get("write_files")[0], "defer": True}
- ]
- }
-
- invalid_config = {
- "write_files": [
- {**VALID_SCHEMA.get("write_files")[0], "defer": str("no")}
- ]
- }
-
- cc = self.tmp_cloud("ubuntu")
- handle("cc_write_files_deferred", valid_config, cc, LOG, [])
- self.assertNotIn(
- "Invalid cloud-config provided:", self.logs.getvalue()
- )
- handle("cc_write_files_deferred", invalid_config, cc, LOG, [])
- self.assertIn("Invalid cloud-config provided:", self.logs.getvalue())
- self.assertIn(
- "defer: 'no' is not of type 'boolean'", self.logs.getvalue()
- )
-
-
class TestWriteFilesDeferred(FilesystemMockingTestCase):
with_logs = True
@@ -82,4 +50,21 @@ class TestWriteFilesDeferred(FilesystemMockingTestCase):
util.load_file("/tmp/not_deferred.file")
+class TestWriteFilesDeferredSchema:
+ @pytest.mark.parametrize(
+ "config, error_msg",
+ [
+ # Allow undocumented keys client keys without error
+ (
+ {"write_files": [{"defer": "no"}]},
+ "write_files.0.defer: 'no' is not of type 'boolean'",
+ ),
+ ],
+ )
+ @skipUnlessJsonSchema()
+ def test_schema_validation(self, config, error_msg):
+ with pytest.raises(SchemaValidationError, match=error_msg):
+ validate_cloudconfig_schema(config, get_schema(), strict=True)
+
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/config/test_cc_yum_add_repo.py b/tests/unittests/config/test_cc_yum_add_repo.py
index 550b0af2..d6de2ec2 100644
--- a/tests/unittests/config/test_cc_yum_add_repo.py
+++ b/tests/unittests/config/test_cc_yum_add_repo.py
@@ -2,11 +2,19 @@
import configparser
import logging
+import re
import shutil
import tempfile
+import pytest
+
from cloudinit import util
from cloudinit.config import cc_yum_add_repo
+from cloudinit.config.schema import (
+ SchemaValidationError,
+ get_schema,
+ validate_cloudconfig_schema,
+)
from tests.unittests import helpers
LOG = logging.getLogger(__name__)
@@ -117,4 +125,40 @@ class TestConfig(helpers.FilesystemMockingTestCase):
self.assertEqual(parser.get(section, k), v)
+class TestAddYumRepoSchema:
+ @pytest.mark.parametrize(
+ "config, error_msg",
+ [
+ # Happy path case
+ ({"yum_repos": {"My-Repo 123": {"baseurl": "http://doit"}}}, None),
+ # yum_repo_dir is a string
+ (
+ {"yum_repo_dir": True},
+ "yum_repo_dir: True is not of type 'string'",
+ ),
+ (
+ {"yum_repos": {}},
+ re.escape("yum_repos: {} does not have enough properties"),
+ ),
+ # baseurl required
+ (
+ {"yum_repos": {"My-Repo": {}}},
+ "yum_repos.My-Repo: 'baseurl' is a required",
+ ),
+ # patternProperties don't override type of explicit property names
+ (
+ {"yum_repos": {"My Repo": {"enabled": "nope"}}},
+ "yum_repos.My Repo.enabled: 'nope' is not of type 'boolean'",
+ ),
+ ],
+ )
+ @helpers.skipUnlessJsonSchema()
+ def test_schema_validation(self, config, error_msg):
+ if error_msg is None:
+ validate_cloudconfig_schema(config, get_schema(), strict=True)
+ else:
+ with pytest.raises(SchemaValidationError, match=error_msg):
+ validate_cloudconfig_schema(config, get_schema(), strict=True)
+
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/config/test_salt_minion.py b/tests/unittests/config/test_salt_minion.py
new file mode 100644
index 00000000..b16034b4
--- /dev/null
+++ b/tests/unittests/config/test_salt_minion.py
@@ -0,0 +1,33 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+import pytest
+
+from cloudinit.config.schema import (
+ SchemaValidationError,
+ get_schema,
+ validate_cloudconfig_schema,
+)
+from tests.unittests.helpers import skipUnlessJsonSchema
+
+
+@skipUnlessJsonSchema()
+class TestSaltMinionSchema:
+ @pytest.mark.parametrize(
+ "config, error_msg",
+ (
+ ({"salt_minion": {"conf": {"any": "thing"}}}, None),
+ ({"salt_minion": {"grains": {"any": "thing"}}}, None),
+ (
+ {"salt_minion": {"invalid": "key"}},
+ "Additional properties are not allowed",
+ ),
+ ({"salt_minion": {"conf": "a"}}, "'a' is not of type 'object'"),
+ ({"salt_minion": {"grains": "a"}}, "'a' is not of type 'object'"),
+ ),
+ )
+ @skipUnlessJsonSchema()
+ def test_schema_validation(self, config, error_msg):
+ if error_msg is None:
+ validate_cloudconfig_schema(config, get_schema(), strict=True)
+ else:
+ with pytest.raises(SchemaValidationError, match=error_msg):
+ validate_cloudconfig_schema(config, get_schema(), strict=True)
diff --git a/tests/unittests/config/test_schema.py b/tests/unittests/config/test_schema.py
index 1d48056a..c75b7227 100644
--- a/tests/unittests/config/test_schema.py
+++ b/tests/unittests/config/test_schema.py
@@ -4,32 +4,39 @@
import importlib
import inspect
import itertools
+import json
import logging
+import os
import sys
-from copy import copy
+from copy import copy, deepcopy
from pathlib import Path
from textwrap import dedent
+from types import ModuleType
+from typing import List
+import jsonschema
import pytest
-import yaml
-from yaml import safe_load
from cloudinit.config.schema import (
CLOUD_CONFIG_HEADER,
+ VERSIONED_USERDATA_SCHEMA_FILE,
MetaSchema,
SchemaValidationError,
- _schemapath_for_cloudconfig,
annotated_cloudconfig_file,
get_jsonschema_validator,
get_meta_doc,
get_schema,
+ get_schema_dir,
load_doc,
main,
validate_cloudconfig_file,
validate_cloudconfig_metaschema,
validate_cloudconfig_schema,
)
-from cloudinit.util import write_file
+from cloudinit.distros import OSFAMILIES
+from cloudinit.safeyaml import load, load_with_marks
+from cloudinit.settings import FREQUENCIES
+from cloudinit.util import load_file, write_file
from tests.unittests.helpers import (
CiTestCase,
cloud_init_project_dir,
@@ -54,22 +61,29 @@ def get_metas() -> dict:
return get_module_variable("meta")
-def get_module_variable(var_name) -> dict:
- """Inspect modules and get variable from module matching var_name"""
- schemas = {}
-
+def get_module_names() -> List[str]:
+ """Return list of module names in cloudinit/config"""
files = list(
Path(cloud_init_project_dir("cloudinit/config/")).glob("cc_*.py")
)
- modules = [mod.stem for mod in files]
+ return [mod.stem for mod in files]
- for module in modules:
- importlib.import_module("cloudinit.config.{}".format(module))
+def get_modules() -> List[ModuleType]:
+ """Return list of modules in cloudinit/config"""
+ return [
+ importlib.import_module(f"cloudinit.config.{module}")
+ for module in get_module_names()
+ ]
+
+
+def get_module_variable(var_name) -> dict:
+ """Inspect modules and get variable from module matching var_name"""
+ schemas = {}
+ get_modules()
for k, v in sys.modules.items():
path = Path(k)
-
if "cloudinit.config" == path.stem and path.suffix[1:4] == "cc_":
module_name = path.suffix[1:]
members = inspect.getmembers(v)
@@ -81,35 +95,77 @@ def get_module_variable(var_name) -> dict:
return schemas
+class TestVersionedSchemas:
+ def _relative_ref_to_local_file_path(self, source_schema):
+ """Replace known relative ref URLs with full file path."""
+ # jsonschema 2.6.0 doesn't support relative URLs in $refs (bionic)
+ full_path_schema = deepcopy(source_schema)
+ relative_ref = full_path_schema["oneOf"][0]["allOf"][1]["$ref"]
+ full_local_filepath = get_schema_dir() + relative_ref[1:]
+ file_ref = f"file://{full_local_filepath}"
+ full_path_schema["oneOf"][0]["allOf"][1]["$ref"] = file_ref
+ return full_path_schema
+
+ @pytest.mark.parametrize(
+ "schema,error_msg",
+ (
+ ({}, None),
+ ({"version": "v1"}, None),
+ ({"version": "v2"}, "is not valid"),
+ ({"version": "v1", "final_message": -1}, "is not valid"),
+ ({"version": "v1", "final_message": "some msg"}, None),
+ ),
+ )
+ def test_versioned_cloud_config_schema_is_valid_json(
+ self, schema, error_msg
+ ):
+ version_schemafile = os.path.join(
+ get_schema_dir(), VERSIONED_USERDATA_SCHEMA_FILE
+ )
+ version_schema = json.loads(load_file(version_schemafile))
+ # To avoid JSON resolver trying to pull the reference from our
+ # upstream raw file in github.
+ version_schema["$id"] = f"file://{version_schemafile}"
+ if error_msg:
+ with pytest.raises(SchemaValidationError) as context_mgr:
+ try:
+ validate_cloudconfig_schema(
+ schema, schema=version_schema, strict=True
+ )
+ except jsonschema.exceptions.RefResolutionError:
+ full_path_schema = self._relative_ref_to_local_file_path(
+ version_schema
+ )
+ validate_cloudconfig_schema(
+ schema, schema=full_path_schema, strict=True
+ )
+ assert error_msg in str(context_mgr.value)
+ else:
+ try:
+ validate_cloudconfig_schema(
+ schema, schema=version_schema, strict=True
+ )
+ except jsonschema.exceptions.RefResolutionError:
+ full_path_schema = self._relative_ref_to_local_file_path(
+ version_schema
+ )
+ validate_cloudconfig_schema(
+ schema, schema=full_path_schema, strict=True
+ )
+
+
class TestGetSchema:
+ def test_static_schema_file_is_valid(self, caplog):
+ with caplog.at_level(logging.WARNING):
+ get_schema()
+ # Assert no warnings parsing our packaged schema file
+ warnings = [msg for (_, _, msg) in caplog.record_tuples]
+ assert [] == warnings
+
def test_get_schema_coalesces_known_schema(self):
"""Every cloudconfig module with schema is listed in allOf keyword."""
schema = get_schema()
- assert sorted(
- [
- "cc_apk_configure",
- "cc_apt_configure",
- "cc_apt_pipelining",
- "cc_bootcmd",
- "cc_byobu",
- "cc_ca_certs",
- "cc_chef",
- "cc_debug",
- "cc_disable_ec2_metadata",
- "cc_disk_setup",
- "cc_install_hotplug",
- "cc_keyboard",
- "cc_locale",
- "cc_ntp",
- "cc_resizefs",
- "cc_runcmd",
- "cc_snap",
- "cc_ubuntu_advantage",
- "cc_ubuntu_drivers",
- "cc_write_files",
- "cc_zypper_add_repo",
- ]
- ) == sorted(
+ assert sorted(get_module_names()) == sorted(
[meta["id"] for meta in get_metas().values() if meta is not None]
)
assert "http://json-schema.org/draft-04/schema#" == schema["$schema"]
@@ -126,6 +182,48 @@ class TestGetSchema:
{"$ref": "#/$defs/cc_debug"},
{"$ref": "#/$defs/cc_disable_ec2_metadata"},
{"$ref": "#/$defs/cc_disk_setup"},
+ {"$ref": "#/$defs/cc_fan"},
+ {"$ref": "#/$defs/cc_final_message"},
+ {"$ref": "#/$defs/cc_growpart"},
+ {"$ref": "#/$defs/cc_grub_dpkg"},
+ {"$ref": "#/$defs/cc_install_hotplug"},
+ {"$ref": "#/$defs/cc_keyboard"},
+ {"$ref": "#/$defs/cc_keys_to_console"},
+ {"$ref": "#/$defs/cc_landscape"},
+ {"$ref": "#/$defs/cc_locale"},
+ {"$ref": "#/$defs/cc_lxd"},
+ {"$ref": "#/$defs/cc_mcollective"},
+ {"$ref": "#/$defs/cc_migrator"},
+ {"$ref": "#/$defs/cc_mounts"},
+ {"$ref": "#/$defs/cc_ntp"},
+ {"$ref": "#/$defs/cc_package_update_upgrade_install"},
+ {"$ref": "#/$defs/cc_phone_home"},
+ {"$ref": "#/$defs/cc_power_state_change"},
+ {"$ref": "#/$defs/cc_puppet"},
+ {"$ref": "#/$defs/cc_resizefs"},
+ {"$ref": "#/$defs/cc_resolv_conf"},
+ {"$ref": "#/$defs/cc_rh_subscription"},
+ {"$ref": "#/$defs/cc_rsyslog"},
+ {"$ref": "#/$defs/cc_runcmd"},
+ {"$ref": "#/$defs/cc_salt_minion"},
+ {"$ref": "#/$defs/cc_scripts_vendor"},
+ {"$ref": "#/$defs/cc_seed_random"},
+ {"$ref": "#/$defs/cc_set_hostname"},
+ {"$ref": "#/$defs/cc_set_passwords"},
+ {"$ref": "#/$defs/cc_snap"},
+ {"$ref": "#/$defs/cc_spacewalk"},
+ {"$ref": "#/$defs/cc_ssh_authkey_fingerprints"},
+ {"$ref": "#/$defs/cc_ssh_import_id"},
+ {"$ref": "#/$defs/cc_ssh"},
+ {"$ref": "#/$defs/cc_timezone"},
+ {"$ref": "#/$defs/cc_ubuntu_advantage"},
+ {"$ref": "#/$defs/cc_ubuntu_drivers"},
+ {"$ref": "#/$defs/cc_update_etc_hosts"},
+ {"$ref": "#/$defs/cc_update_hostname"},
+ {"$ref": "#/$defs/cc_users_groups"},
+ {"$ref": "#/$defs/cc_write_files"},
+ {"$ref": "#/$defs/cc_yum_add_repo"},
+ {"$ref": "#/$defs/cc_zypper_add_repo"},
]
found_subschema_defs = []
legacy_schema_keys = []
@@ -136,67 +234,25 @@ class TestGetSchema:
legacy_schema_keys.extend(subschema["properties"].keys())
assert expected_subschema_defs == found_subschema_defs
- # This list will dwindle as we move legacy schema to new $defs
- assert [
- "drivers",
- "keyboard",
- "locale",
- "locale_configfile",
- "ntp",
- "resize_rootfs",
- "runcmd",
- "snap",
- "ubuntu_advantage",
- "updates",
- "write_files",
- "write_files",
- "zypper",
- ] == sorted(legacy_schema_keys)
+ # This list should remain empty unless we induct new modules with
+ # legacy schema attributes defined within the cc_module.
+ assert [] == sorted(legacy_schema_keys)
class TestLoadDoc:
docs = get_module_variable("__doc__")
- # TODO( Drop legacy test when all sub-schemas in cloud-init-schema.json )
@pytest.mark.parametrize(
"module_name",
- (
- "cc_apt_pipelining", # new style composite schema file
- "cc_zypper_add_repo", # legacy sub-schema defined in module
- ),
+ ("cc_apt_pipelining",), # new style composite schema file
)
- def test_report_docs_for_legacy_and_consolidated_schema(self, module_name):
+ def test_report_docs_consolidated_schema(self, module_name):
doc = load_doc([module_name])
assert doc, "Unexpected empty docs for {}".format(module_name)
assert self.docs[module_name] == doc
-class Test_SchemapathForCloudconfig:
- """Coverage tests for supported YAML formats."""
-
- @pytest.mark.parametrize(
- "source_content, expected",
- (
- (b"{}", {}), # assert empty config handled
- # Multiple keys account for comments and whitespace lines
- (b"#\na: va\n \nb: vb\n#\nc: vc", {"a": 2, "b": 4, "c": 6}),
- # List items represented on correct line number
- (b"a:\n - a1\n\n - a2\n", {"a": 1, "a.0": 2, "a.1": 4}),
- # Nested dicts represented on correct line number
- (b"a:\n a1:\n\n aa1: aa1v\n", {"a": 1, "a.a1": 2, "a.a1.aa1": 4}),
- ),
- )
- def test_schemapaths_representatative_of_source_yaml(
- self, source_content, expected
- ):
- """Validate schemapaths dict accurately represents source YAML line."""
- cfg = yaml.safe_load(source_content)
- assert expected == _schemapath_for_cloudconfig(
- config=cfg, original_content=source_content
- )
-
-
class SchemaValidationErrorTest(CiTestCase):
"""Test validate_cloudconfig_schema"""
@@ -332,106 +388,133 @@ class TestCloudConfigExamples:
according to the unified schema of all config modules
"""
schema = get_schema()
- config_load = safe_load(example)
+ config_load = load(example)
+ # cloud-init-schema-v1 is permissive of additionalProperties at the
+ # top-level.
+ # To validate specific schemas against known documented examples
+ # we need to only define the specific module schema and supply
+ # strict=True.
+ # TODO(Drop to pop/update once full schema is strict)
+ schema.pop("allOf")
+ schema.update(schema["$defs"][schema_id])
+ schema["additionalProperties"] = False
+ # Some module examples reference keys defined in multiple schemas
+ supplemental_schemas = {
+ "cc_ubuntu_advantage": ["cc_power_state_change"],
+ "cc_update_hostname": ["cc_set_hostname"],
+ "cc_users_groups": ["cc_ssh_import_id"],
+ "cc_disk_setup": ["cc_mounts"],
+ }
+ for supplement_id in supplemental_schemas.get(schema_id, []):
+ supplemental_props = dict(
+ [
+ (key, value)
+ for key, value in schema["$defs"][supplement_id][
+ "properties"
+ ].items()
+ ]
+ )
+ schema["properties"].update(supplemental_props)
validate_cloudconfig_schema(config_load, schema, strict=True)
-class ValidateCloudConfigFileTest(CiTestCase):
+class TestValidateCloudConfigFile:
"""Tests for validate_cloudconfig_file."""
- def setUp(self):
- super(ValidateCloudConfigFileTest, self).setUp()
- self.config_file = self.tmp_path("cloudcfg.yaml")
-
- def test_validateconfig_file_error_on_absent_file(self):
+ @pytest.mark.parametrize("annotate", (True, False))
+ def test_validateconfig_file_error_on_absent_file(self, annotate):
"""On absent config_path, validate_cloudconfig_file errors."""
- with self.assertRaises(RuntimeError) as context_mgr:
- validate_cloudconfig_file("/not/here", {})
- self.assertEqual(
- "Configfile /not/here does not exist", str(context_mgr.exception)
- )
+ with pytest.raises(
+ RuntimeError, match="Configfile /not/here does not exist"
+ ):
+ validate_cloudconfig_file("/not/here", {}, annotate)
- def test_validateconfig_file_error_on_invalid_header(self):
+ @pytest.mark.parametrize("annotate", (True, False))
+ def test_validateconfig_file_error_on_invalid_header(
+ self, annotate, tmpdir
+ ):
"""On invalid header, validate_cloudconfig_file errors.
A SchemaValidationError is raised when the file doesn't begin with
CLOUD_CONFIG_HEADER.
"""
- write_file(self.config_file, "#junk")
- with self.assertRaises(SchemaValidationError) as context_mgr:
- validate_cloudconfig_file(self.config_file, {})
- self.assertEqual(
- "Cloud config schema errors: format-l1.c1: File {0} needs to begin"
- ' with "{1}"'.format(
- self.config_file, CLOUD_CONFIG_HEADER.decode()
- ),
- str(context_mgr.exception),
+ config_file = tmpdir.join("my.yaml")
+ config_file.write("#junk")
+ error_msg = (
+ "Cloud config schema errors: format-l1.c1: File"
+ f" {config_file} needs to begin with"
+ f' "{CLOUD_CONFIG_HEADER.decode()}"'
)
+ with pytest.raises(SchemaValidationError, match=error_msg):
+ validate_cloudconfig_file(config_file.strpath, {}, annotate)
- def test_validateconfig_file_error_on_non_yaml_scanner_error(self):
+ @pytest.mark.parametrize("annotate", (True, False))
+ def test_validateconfig_file_error_on_non_yaml_scanner_error(
+ self, annotate, tmpdir
+ ):
"""On non-yaml scan issues, validate_cloudconfig_file errors."""
# Generate a scanner error by providing text on a single line with
# improper indent.
- write_file(self.config_file, "#cloud-config\nasdf:\nasdf")
- with self.assertRaises(SchemaValidationError) as context_mgr:
- validate_cloudconfig_file(self.config_file, {})
- self.assertIn(
- "schema errors: format-l3.c1: File {0} is not valid yaml.".format(
- self.config_file
- ),
- str(context_mgr.exception),
+ config_file = tmpdir.join("my.yaml")
+ config_file.write("#cloud-config\nasdf:\nasdf")
+ error_msg = (
+ f".*errors: format-l3.c1: File {config_file} is not valid yaml.*"
)
+ with pytest.raises(SchemaValidationError, match=error_msg):
+ validate_cloudconfig_file(config_file.strpath, {}, annotate)
- def test_validateconfig_file_error_on_non_yaml_parser_error(self):
+ @pytest.mark.parametrize("annotate", (True, False))
+ def test_validateconfig_file_error_on_non_yaml_parser_error(
+ self, annotate, tmpdir
+ ):
"""On non-yaml parser issues, validate_cloudconfig_file errors."""
- write_file(self.config_file, "#cloud-config\n{}}")
- with self.assertRaises(SchemaValidationError) as context_mgr:
- validate_cloudconfig_file(self.config_file, {})
- self.assertIn(
- "schema errors: format-l2.c3: File {0} is not valid yaml.".format(
- self.config_file
- ),
- str(context_mgr.exception),
+ config_file = tmpdir.join("my.yaml")
+ config_file.write("#cloud-config\n{}}")
+ error_msg = (
+ f"errors: format-l2.c3: File {config_file} is not valid yaml."
)
+ with pytest.raises(SchemaValidationError, match=error_msg):
+ validate_cloudconfig_file(config_file.strpath, {}, annotate)
@skipUnlessJsonSchema()
- def test_validateconfig_file_sctrictly_validates_schema(self):
+ @pytest.mark.parametrize("annotate", (True, False))
+ def test_validateconfig_file_sctrictly_validates_schema(
+ self, annotate, tmpdir
+ ):
"""validate_cloudconfig_file raises errors on invalid schema."""
+ config_file = tmpdir.join("my.yaml")
schema = {"properties": {"p1": {"type": "string", "format": "string"}}}
- write_file(self.config_file, "#cloud-config\np1: -1")
- with self.assertRaises(SchemaValidationError) as context_mgr:
- validate_cloudconfig_file(self.config_file, schema)
- self.assertEqual(
- "Cloud config schema errors: p1: -1 is not of type 'string'",
- str(context_mgr.exception),
+ config_file.write("#cloud-config\np1: -1")
+ error_msg = (
+ "Cloud config schema errors: p1: -1 is not of type 'string'"
)
+ with pytest.raises(SchemaValidationError, match=error_msg):
+ validate_cloudconfig_file(config_file.strpath, schema, annotate)
-class GetSchemaDocTest(CiTestCase):
+class TestSchemaDocMarkdown:
"""Tests for get_meta_doc."""
- def setUp(self):
- super(GetSchemaDocTest, self).setUp()
- self.required_schema = {
- "title": "title",
- "description": "description",
- "id": "id",
- "name": "name",
- "frequency": "frequency",
- "distros": ["debian", "rhel"],
- }
- self.meta: MetaSchema = {
- "title": "title",
- "description": "description",
- "id": "id",
- "name": "name",
- "frequency": "frequency",
- "distros": ["debian", "rhel"],
- "examples": [
- 'ex1:\n [don\'t, expand, "this"]',
- "ex2: true",
- ],
- }
+ required_schema = {
+ "title": "title",
+ "description": "description",
+ "id": "id",
+ "name": "name",
+ "frequency": "frequency",
+ "distros": ["debian", "rhel"],
+ }
+ meta: MetaSchema = {
+ "title": "title",
+ "description": "description",
+ "id": "id",
+ "name": "name",
+ "frequency": "frequency",
+ "distros": ["debian", "rhel"],
+ "examples": [
+ 'ex1:\n [don\'t, expand, "this"]',
+ "ex2: true",
+ ],
+ }
def test_get_meta_doc_returns_restructured_text(self):
"""get_meta_doc returns restructured text for a cloudinit schema."""
@@ -449,48 +532,138 @@ class GetSchemaDocTest(CiTestCase):
)
doc = get_meta_doc(self.meta, full_schema)
- self.assertEqual(
+ assert (
dedent(
"""
- name
- ----
- **Summary:** title
+ name
+ ----
+ **Summary:** title
- description
+ description
- **Internal name:** ``id``
+ **Internal name:** ``id``
- **Module frequency:** frequency
+ **Module frequency:** frequency
- **Supported distros:** debian, rhel
+ **Supported distros:** debian, rhel
- **Config schema**:
- **prop1:** (array of integer) prop-description
+ **Config schema**:
+ **prop1:** (array of integer) prop-description
- **Examples**::
+ **Examples**::
- ex1:
- [don't, expand, "this"]
- # --- Example2 ---
- ex2: true
- """
- ),
- doc,
+ ex1:
+ [don't, expand, "this"]
+ # --- Example2 ---
+ ex2: true
+ """
+ )
+ == doc
)
def test_get_meta_doc_handles_multiple_types(self):
"""get_meta_doc delimits multiple property types with a '/'."""
schema = {"properties": {"prop1": {"type": ["string", "integer"]}}}
- self.assertIn(
- "**prop1:** (string/integer)", get_meta_doc(self.meta, schema)
+ assert "**prop1:** (string/integer)" in get_meta_doc(self.meta, schema)
+
+ def test_references_are_flattened_in_schema_docs(self):
+ """get_meta_doc flattens and renders full schema definitions."""
+ schema = {
+ "$defs": {
+ "flattenit": {
+ "type": ["object", "string"],
+ "description": "Objects support the following keys:",
+ "patternProperties": {
+ "^.+$": {
+ "label": "<opaque_label>",
+ "description": "List of cool strings",
+ "type": "array",
+ "items": {"type": "string"},
+ "minItems": 1,
+ }
+ },
+ }
+ },
+ "properties": {"prop1": {"$ref": "#/$defs/flattenit"}},
+ }
+ assert (
+ dedent(
+ """\
+ **prop1:** (string/object) Objects support the following keys:
+
+ **<opaque_label>:** (array of string) List of cool strings
+ """
+ )
+ in get_meta_doc(self.meta, schema)
)
- def test_get_meta_doc_handles_enum_types(self):
+ @pytest.mark.parametrize(
+ "sub_schema,expected",
+ (
+ (
+ {"enum": [True, False, "stuff"]},
+ "**prop1:** (``true``/``false``/``stuff``)",
+ ),
+ # When type: string and enum, document enum values
+ (
+ {"type": "string", "enum": ["a", "b"]},
+ "**prop1:** (``a``/``b``)",
+ ),
+ ),
+ )
+ def test_get_meta_doc_handles_enum_types(self, sub_schema, expected):
"""get_meta_doc converts enum types to yaml and delimits with '/'."""
- schema = {"properties": {"prop1": {"enum": [True, False, "stuff"]}}}
- self.assertIn(
- "**prop1:** (true/false/stuff)", get_meta_doc(self.meta, schema)
- )
+ schema = {"properties": {"prop1": sub_schema}}
+ assert expected in get_meta_doc(self.meta, schema)
+
+ @pytest.mark.parametrize(
+ "schema,expected",
+ (
+ ( # Hide top-level keys like 'properties'
+ {
+ "hidden": ["properties"],
+ "properties": {
+ "p1": {"type": "string"},
+ "p2": {"type": "boolean"},
+ },
+ "patternProperties": {
+ "^.*$": {
+ "type": "string",
+ "label": "label2",
+ }
+ },
+ },
+ dedent(
+ """
+ **Config schema**:
+ **label2:** (string)
+ """
+ ),
+ ),
+ ( # Hide nested individual keys with a bool
+ {
+ "properties": {
+ "p1": {"type": "string", "hidden": True},
+ "p2": {"type": "boolean"},
+ }
+ },
+ dedent(
+ """
+ **Config schema**:
+ **p2:** (boolean)
+ """
+ ),
+ ),
+ ),
+ )
+ def test_get_meta_doc_hidden_hides_specific_properties_from_docs(
+ self, schema, expected
+ ):
+ """Docs are hidden for any property in the hidden list.
+
+ Useful for hiding deprecated key schema.
+ """
+ assert expected in get_meta_doc(self.meta, schema)
def test_get_meta_doc_handles_nested_oneof_property_types(self):
"""get_meta_doc describes array items oneOf declarations in type."""
@@ -504,9 +677,41 @@ class GetSchemaDocTest(CiTestCase):
}
}
}
- self.assertIn(
- "**prop1:** (array of (string)/(integer))",
- get_meta_doc(self.meta, schema),
+ assert "**prop1:** (array of (string/integer))" in get_meta_doc(
+ self.meta, schema
+ )
+
+ def test_get_meta_doc_handles_types_as_list(self):
+ """get_meta_doc renders types which have a list value."""
+ schema = {
+ "properties": {
+ "prop1": {
+ "type": ["boolean", "array"],
+ "items": {
+ "oneOf": [{"type": "string"}, {"type": "integer"}]
+ },
+ }
+ }
+ }
+ assert (
+ "**prop1:** (boolean/array of (string/integer))"
+ in get_meta_doc(self.meta, schema)
+ )
+
+ def test_get_meta_doc_handles_flattening_defs(self):
+ """get_meta_doc renders $defs."""
+ schema = {
+ "$defs": {
+ "prop1object": {
+ "type": "object",
+ "properties": {"subprop": {"type": "string"}},
+ }
+ },
+ "properties": {"prop1": {"$ref": "#/$defs/prop1object"}},
+ }
+ assert (
+ "**prop1:** (object)\n\n **subprop:** (string)\n"
+ in get_meta_doc(self.meta, schema)
)
def test_get_meta_doc_handles_string_examples(self):
@@ -527,21 +732,21 @@ class GetSchemaDocTest(CiTestCase):
},
}
)
- self.assertIn(
+ assert (
dedent(
"""
- **Config schema**:
- **prop1:** (array of integer) prop-description
+ **Config schema**:
+ **prop1:** (array of integer) prop-description
- **Examples**::
+ **Examples**::
- ex1:
- [don't, expand, "this"]
- # --- Example2 ---
- ex2: true
+ ex1:
+ [don't, expand, "this"]
+ # --- Example2 ---
+ ex2: true
"""
- ),
- get_meta_doc(self.meta, full_schema),
+ )
+ in get_meta_doc(self.meta, full_schema)
)
def test_get_meta_doc_properly_parse_description(self):
@@ -567,21 +772,21 @@ class GetSchemaDocTest(CiTestCase):
}
}
- self.assertIn(
+ assert (
dedent(
"""
- **Config schema**:
- **p1:** (string) This item has the following options:
+ **Config schema**:
+ **p1:** (string) This item has the following options:
- - option1
- - option2
- - option3
+ - option1
+ - option2
+ - option3
- The default value is option1
+ The default value is option1
- """
- ),
- get_meta_doc(self.meta, schema),
+ """
+ )
+ in get_meta_doc(self.meta, schema)
)
def test_get_meta_doc_raises_key_errors(self):
@@ -599,9 +804,9 @@ class GetSchemaDocTest(CiTestCase):
for key in self.meta:
invalid_meta = copy(self.meta)
invalid_meta.pop(key)
- with self.assertRaises(KeyError) as context_mgr:
+ with pytest.raises(KeyError) as context_mgr:
get_meta_doc(invalid_meta, schema)
- self.assertIn(key, str(context_mgr.exception))
+ assert key in str(context_mgr.value)
def test_label_overrides_property_name(self):
"""get_meta_doc overrides property name with label."""
@@ -636,20 +841,19 @@ class GetSchemaDocTest(CiTestCase):
assert "**label1:** (string)" in meta_doc
assert "**label2:** (string" in meta_doc
assert "**prop_no_label:** (string)" in meta_doc
- assert "Each item in **array_label** list" in meta_doc
+ assert "Each object in **array_label** list" in meta_doc
assert "prop1" not in meta_doc
assert ".*" not in meta_doc
-class AnnotatedCloudconfigFileTest(CiTestCase):
- maxDiff = None
-
+class TestAnnotatedCloudconfigFile:
def test_annotated_cloudconfig_file_no_schema_errors(self):
"""With no schema_errors, print the original content."""
content = b"ntp:\n pools: [ntp1.pools.com]\n"
- self.assertEqual(
- content, annotated_cloudconfig_file({}, content, schema_errors=[])
+ parse_cfg, schemamarks = load_with_marks(content)
+ assert content == annotated_cloudconfig_file(
+ parse_cfg, content, schema_errors=[], schemamarks=schemamarks
)
def test_annotated_cloudconfig_file_with_non_dict_cloud_config(self):
@@ -668,13 +872,11 @@ class AnnotatedCloudconfigFileTest(CiTestCase):
"# E1: Cloud-config is not a YAML dict.\n\n",
]
)
- self.assertEqual(
- expected,
- annotated_cloudconfig_file(
- None,
- content,
- schema_errors=[("", "None is not of type 'object'")],
- ),
+ assert expected == annotated_cloudconfig_file(
+ None,
+ content,
+ schema_errors=[("", "None is not of type 'object'")],
+ schemamarks={},
)
def test_annotated_cloudconfig_file_schema_annotates_and_adds_footer(self):
@@ -701,15 +903,14 @@ class AnnotatedCloudconfigFileTest(CiTestCase):
"""
)
- parsed_config = safe_load(content[13:])
+ parsed_config, schemamarks = load_with_marks(content[13:])
schema_errors = [
("ntp", "Some type error"),
("ntp.pools.0", "-99 is not a string"),
("ntp.pools.1", "75 is not a string"),
]
- self.assertEqual(
- expected,
- annotated_cloudconfig_file(parsed_config, content, schema_errors),
+ assert expected == annotated_cloudconfig_file(
+ parsed_config, content, schema_errors, schemamarks=schemamarks
)
def test_annotated_cloudconfig_file_annotates_separate_line_items(self):
@@ -732,14 +933,13 @@ class AnnotatedCloudconfigFileTest(CiTestCase):
- 75 # E2
"""
)
- parsed_config = safe_load(content[13:])
+ parsed_config, schemamarks = load_with_marks(content[13:])
schema_errors = [
("ntp.pools.0", "-99 is not a string"),
("ntp.pools.1", "75 is not a string"),
]
- self.assertIn(
- expected,
- annotated_cloudconfig_file(parsed_config, content, schema_errors),
+ assert expected in annotated_cloudconfig_file(
+ parsed_config, content, schema_errors, schemamarks=schemamarks
)
@@ -911,4 +1111,14 @@ class TestStrictMetaschema:
validate_cloudconfig_metaschema(validator, schema, throw=False)
-# vi: ts=4 expandtab syntax=python
+class TestMeta:
+ def test_valid_meta_for_every_module(self):
+ all_distros = {
+ name for distro in OSFAMILIES.values() for name in distro
+ }
+ all_distros.add("all")
+ for module in get_modules():
+ assert "frequency" in module.meta
+ assert "distros" in module.meta
+ assert {module.meta["frequency"]}.issubset(FREQUENCIES)
+ assert set(module.meta["distros"]).issubset(all_distros)
diff --git a/tests/unittests/distros/test_generic.py b/tests/unittests/distros/test_generic.py
index 93c5395c..fedc7300 100644
--- a/tests/unittests/distros/test_generic.py
+++ b/tests/unittests/distros/test_generic.py
@@ -187,12 +187,14 @@ class TestGenericDistro(helpers.FilesystemMockingTestCase):
locale = d.get_locale()
self.assertEqual("C.UTF-8", locale)
- def test_get_locale_rhel(self):
- """Test rhel distro returns NotImplementedError exception"""
+ @mock.patch("cloudinit.distros.rhel.Distro._read_system_locale")
+ def test_get_locale_rhel(self, m_locale):
+ """Test rhel distro returns locale set to C.UTF-8"""
+ m_locale.return_value = "C.UTF-8"
cls = distros.fetch("rhel")
d = cls("rhel", {}, None)
- with self.assertRaises(NotImplementedError):
- d.get_locale()
+ locale = d.get_locale()
+ self.assertEqual("C.UTF-8", locale)
def test_expire_passwd_uses_chpasswd(self):
"""Test ubuntu.expire_passwd uses the passwd command."""
diff --git a/tests/unittests/distros/test_networking.py b/tests/unittests/distros/test_networking.py
index 274647cb..f56b34ad 100644
--- a/tests/unittests/distros/test_networking.py
+++ b/tests/unittests/distros/test_networking.py
@@ -1,11 +1,14 @@
# See https://docs.pytest.org/en/stable/example
# /parametrize.html#parametrizing-conditional-raising
+
+import textwrap
from contextlib import ExitStack as does_not_raise
from unittest import mock
import pytest
from cloudinit import net
+from cloudinit import safeyaml as yaml
from cloudinit.distros.networking import (
BSDNetworking,
LinuxNetworking,
@@ -23,6 +26,9 @@ def generic_networking_cls():
"""
class TestNetworking(Networking):
+ def apply_network_config_names(self, *args, **kwargs):
+ raise NotImplementedError
+
def is_physical(self, *args, **kwargs):
raise NotImplementedError
@@ -229,3 +235,115 @@ class TestNetworkingWaitForPhysDevs:
5 * len(wait_for_physdevs_netcfg["ethernets"])
== m_settle.call_count
)
+
+
+class TestLinuxNetworkingApplyNetworkCfgNames:
+ V1_CONFIG = textwrap.dedent(
+ """\
+ version: 1
+ config:
+ - type: physical
+ name: interface0
+ mac_address: "52:54:00:12:34:00"
+ subnets:
+ - type: static
+ address: 10.0.2.15
+ netmask: 255.255.255.0
+ gateway: 10.0.2.2
+ """
+ )
+ V2_CONFIG = textwrap.dedent(
+ """\
+ version: 2
+ ethernets:
+ interface0:
+ match:
+ macaddress: "52:54:00:12:34:00"
+ addresses:
+ - 10.0.2.15/24
+ gateway4: 10.0.2.2
+ set-name: interface0
+ """
+ )
+
+ V2_CONFIG_NO_SETNAME = textwrap.dedent(
+ """\
+ version: 2
+ ethernets:
+ interface0:
+ match:
+ macaddress: "52:54:00:12:34:00"
+ addresses:
+ - 10.0.2.15/24
+ gateway4: 10.0.2.2
+ """
+ )
+
+ V2_CONFIG_NO_MAC = textwrap.dedent(
+ """\
+ version: 2
+ ethernets:
+ interface0:
+ match:
+ driver: virtio-net
+ addresses:
+ - 10.0.2.15/24
+ gateway4: 10.0.2.2
+ set-name: interface0
+ """
+ )
+
+ @pytest.mark.parametrize(
+ ["config_attr"],
+ [
+ pytest.param("V1_CONFIG", id="v1"),
+ pytest.param("V2_CONFIG", id="v2"),
+ ],
+ )
+ @mock.patch("cloudinit.net.device_devid")
+ @mock.patch("cloudinit.net.device_driver")
+ def test_apply_renames(
+ self,
+ m_device_driver,
+ m_device_devid,
+ config_attr: str,
+ ):
+ networking = LinuxNetworking()
+ m_device_driver.return_value = "virtio_net"
+ m_device_devid.return_value = "0x15d8"
+ netcfg = yaml.load(getattr(self, config_attr))
+
+ with mock.patch.object(
+ networking, "_rename_interfaces"
+ ) as m_rename_interfaces:
+ networking.apply_network_config_names(netcfg)
+
+ assert (
+ mock.call(
+ [["52:54:00:12:34:00", "interface0", "virtio_net", "0x15d8"]]
+ )
+ == m_rename_interfaces.call_args_list[-1]
+ )
+
+ @pytest.mark.parametrize(
+ ["config_attr"],
+ [
+ pytest.param("V2_CONFIG_NO_SETNAME", id="without_setname"),
+ pytest.param("V2_CONFIG_NO_MAC", id="without_mac"),
+ ],
+ )
+ def test_apply_v2_renames_skips_without_setname_or_mac(
+ self, config_attr: str
+ ):
+ networking = LinuxNetworking()
+ netcfg = yaml.load(getattr(self, config_attr))
+ with mock.patch.object(
+ networking, "_rename_interfaces"
+ ) as m_rename_interfaces:
+ networking.apply_network_config_names(netcfg)
+ m_rename_interfaces.assert_called_with([])
+
+ def test_apply_v2_renames_raises_runtime_error_on_unknown_version(self):
+ networking = LinuxNetworking()
+ with pytest.raises(RuntimeError):
+ networking.apply_network_config_names(yaml.load("version: 3"))
diff --git a/tests/unittests/net/test_dhcp.py b/tests/unittests/net/test_dhcp.py
index 876873d5..08ca001a 100644
--- a/tests/unittests/net/test_dhcp.py
+++ b/tests/unittests/net/test_dhcp.py
@@ -5,10 +5,14 @@ import signal
from textwrap import dedent
import httpretty
+import pytest
import cloudinit.net as net
from cloudinit.net.dhcp import (
InvalidDHCPLeaseFileError,
+ NoDHCPLeaseError,
+ NoDHCPLeaseInterfaceError,
+ NoDHCPLeaseMissingDhclientError,
dhcp_discovery,
maybe_perform_dhcp_discovery,
networkd_load_leases,
@@ -334,15 +338,21 @@ class TestDHCPDiscoveryClean(CiTestCase):
def test_no_fallback_nic_found(self, m_fallback_nic):
"""Log and do nothing when nic is absent and no fallback is found."""
m_fallback_nic.return_value = None # No fallback nic found
- self.assertEqual([], maybe_perform_dhcp_discovery())
+
+ with pytest.raises(NoDHCPLeaseInterfaceError):
+ maybe_perform_dhcp_discovery()
+
self.assertIn(
"Skip dhcp_discovery: Unable to find fallback nic.",
self.logs.getvalue(),
)
- def test_provided_nic_does_not_exist(self):
+ @mock.patch("cloudinit.net.dhcp.find_fallback_nic", return_value=None)
+ def test_provided_nic_does_not_exist(self, m_fallback_nic):
"""When the provided nic doesn't exist, log a message and no-op."""
- self.assertEqual([], maybe_perform_dhcp_discovery("idontexist"))
+ with pytest.raises(NoDHCPLeaseInterfaceError):
+ maybe_perform_dhcp_discovery("idontexist")
+
self.assertIn(
"Skip dhcp_discovery: nic idontexist not found in get_devicelist.",
self.logs.getvalue(),
@@ -354,7 +364,10 @@ class TestDHCPDiscoveryClean(CiTestCase):
"""When dhclient doesn't exist in the OS, log the issue and no-op."""
m_fallback.return_value = "eth9"
m_which.return_value = None # dhclient isn't found
- self.assertEqual([], maybe_perform_dhcp_discovery())
+
+ with pytest.raises(NoDHCPLeaseMissingDhclientError):
+ maybe_perform_dhcp_discovery()
+
self.assertIn(
"Skip dhclient configuration: No dhclient command found.",
self.logs.getvalue(),
@@ -794,4 +807,50 @@ class TestEphemeralDhcpNoNetworkSetup(HttprettyTestCase):
m_dhcp.called_once_with()
+@pytest.mark.parametrize(
+ "error_class",
+ [
+ NoDHCPLeaseInterfaceError,
+ NoDHCPLeaseInterfaceError,
+ NoDHCPLeaseMissingDhclientError,
+ ],
+)
+class TestEphemeralDhcpLeaseErrors:
+ @mock.patch("cloudinit.net.dhcp.maybe_perform_dhcp_discovery")
+ def test_obtain_lease_raises_error(self, m_dhcp, error_class):
+ m_dhcp.side_effect = [error_class()]
+
+ with pytest.raises(error_class):
+ net.dhcp.EphemeralDHCPv4().obtain_lease()
+
+ assert len(m_dhcp.mock_calls) == 1
+
+ @mock.patch("cloudinit.net.dhcp.maybe_perform_dhcp_discovery")
+ def test_obtain_lease_umbrella_error(self, m_dhcp, error_class):
+ m_dhcp.side_effect = [error_class()]
+ with pytest.raises(NoDHCPLeaseError):
+ net.dhcp.EphemeralDHCPv4().obtain_lease()
+
+ assert len(m_dhcp.mock_calls) == 1
+
+ @mock.patch("cloudinit.net.dhcp.maybe_perform_dhcp_discovery")
+ def test_ctx_mgr_raises_error(self, m_dhcp, error_class):
+ m_dhcp.side_effect = [error_class()]
+
+ with pytest.raises(error_class):
+ with net.dhcp.EphemeralDHCPv4():
+ pass
+
+ assert len(m_dhcp.mock_calls) == 1
+
+ @mock.patch("cloudinit.net.dhcp.maybe_perform_dhcp_discovery")
+ def test_ctx_mgr_umbrella_error(self, m_dhcp, error_class):
+ m_dhcp.side_effect = [error_class()]
+ with pytest.raises(NoDHCPLeaseError):
+ with net.dhcp.EphemeralDHCPv4():
+ pass
+
+ assert len(m_dhcp.mock_calls) == 1
+
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/net/test_init.py b/tests/unittests/net/test_init.py
index 18b3fe59..768cc112 100644
--- a/tests/unittests/net/test_init.py
+++ b/tests/unittests/net/test_init.py
@@ -4,7 +4,8 @@ import copy
import errno
import ipaddress
import os
-import textwrap
+from pathlib import Path
+from typing import Optional
from unittest import mock
import httpretty
@@ -12,7 +13,6 @@ import pytest
import requests
import cloudinit.net as net
-from cloudinit import safeyaml as yaml
from cloudinit.subp import ProcessExecutionError
from cloudinit.util import ensure_file, write_file
from tests.unittests.helpers import CiTestCase, HttprettyTestCase
@@ -390,6 +390,163 @@ class TestNetFindFallBackNic(CiTestCase):
self.assertEqual("eth1", net.find_fallback_nic())
+class TestNetFindCandidateNics:
+ def create_fake_interface(
+ self,
+ name: str,
+ address: Optional[str] = "aa:bb:cc:aa:bb:cc",
+ carrier: bool = True,
+ bonding: bool = False,
+ dormant: bool = False,
+ driver: str = "fakenic",
+ bridge: bool = False,
+ failover_standby: bool = False,
+ operstate: Optional[str] = None,
+ ):
+ interface_path = self.sys_path / name
+ interface_path.mkdir(parents=True)
+
+ if address is not None:
+ (interface_path / "address").write_text(str(address))
+
+ if carrier:
+ (interface_path / "carrier").write_text("1")
+ else:
+ (interface_path / "carrier").write_text("0")
+
+ if bonding:
+ (interface_path / "bonding").write_text("1")
+
+ if bridge:
+ (interface_path / "bridge").write_text("1")
+
+ if dormant:
+ (interface_path / "dormant").write_text("1")
+ else:
+ (interface_path / "dormant").write_text("0")
+
+ if operstate:
+ (interface_path / "operstate").write_text(operstate)
+
+ device_path = interface_path / "device"
+ device_path.mkdir()
+ if failover_standby:
+ driver = "virtio_net"
+ (interface_path / "master").symlink_to(os.path.join("..", name))
+ (device_path / "features").write_text("1" * 64)
+
+ if driver:
+ (device_path / driver).write_text(driver)
+ (device_path / "driver").symlink_to(driver)
+
+ @pytest.fixture(autouse=True)
+ def setup(self, monkeypatch, tmpdir):
+ self.sys_path = Path(tmpdir) / "sys"
+ monkeypatch.setattr(
+ net, "get_sys_class_path", lambda: str(self.sys_path) + "/"
+ )
+ monkeypatch.setattr(
+ net.util,
+ "is_container",
+ lambda: False,
+ )
+ monkeypatch.setattr(net.util, "udevadm_settle", lambda: None)
+
+ def test_ignored_interfaces(self):
+ self.create_fake_interface(
+ name="ethNoCarrierDormantOperstateIgnored",
+ carrier=False,
+ )
+ self.create_fake_interface(
+ name="ethWithoutMacIgnored",
+ address=None,
+ )
+ self.create_fake_interface(name="vethIgnored", carrier=1)
+ self.create_fake_interface(
+ name="bondIgnored",
+ bonding=True,
+ )
+ self.create_fake_interface(
+ name="bridgeIgnored",
+ bridge=True,
+ )
+ self.create_fake_interface(
+ name="failOverIgnored",
+ failover_standby=True,
+ )
+ self.create_fake_interface(
+ name="TestingOperStateIgnored",
+ carrier=False,
+ operstate="testing",
+ )
+ self.create_fake_interface(
+ name="blacklistedDriverIgnored",
+ driver="bad",
+ )
+
+ assert (
+ net.find_candidate_nics_on_linux(blacklist_drivers=["bad"]) == []
+ )
+
+ def test_carrier_preferred(self):
+ self.create_fake_interface(name="eth0", carrier=False, dormant=True)
+ self.create_fake_interface(name="eth1")
+
+ assert net.find_candidate_nics_on_linux() == ["eth1", "eth0"]
+
+ def test_natural_sort(self):
+ self.create_fake_interface(name="a")
+ self.create_fake_interface(name="a1")
+ self.create_fake_interface(name="a2")
+ self.create_fake_interface(name="a10")
+ self.create_fake_interface(name="b1")
+
+ assert net.find_candidate_nics_on_linux() == [
+ "a",
+ "a1",
+ "a2",
+ "a10",
+ "b1",
+ ]
+
+ def test_eth0_preferred_with_carrier(self):
+ self.create_fake_interface(name="abc0")
+ self.create_fake_interface(name="eth0")
+
+ assert net.find_candidate_nics_on_linux() == ["eth0", "abc0"]
+
+ @pytest.mark.parametrize("dormant", [False, True])
+ @pytest.mark.parametrize(
+ "operstate", ["dormant", "down", "lowerlayerdown", "unknown"]
+ )
+ def test_eth0_preferred_after_carrier(self, dormant, operstate):
+ self.create_fake_interface(name="xeth10")
+ self.create_fake_interface(name="eth", carrier=False, dormant=True)
+ self.create_fake_interface(
+ name="eth0",
+ carrier=False,
+ dormant=dormant,
+ operstate=operstate,
+ )
+ self.create_fake_interface(name="eth1", carrier=False, dormant=True)
+ self.create_fake_interface(
+ name="eth2",
+ carrier=False,
+ operstate=operstate,
+ )
+
+ assert net.find_candidate_nics_on_linux() == [
+ "xeth10",
+ "eth0",
+ "eth",
+ "eth1",
+ "eth2",
+ ]
+
+ def test_no_nics(self):
+ assert net.find_candidate_nics_on_linux() == []
+
+
class TestGetDeviceList(CiTestCase):
def setUp(self):
super(TestGetDeviceList, self).setUp()
@@ -1037,105 +1194,6 @@ class TestEphemeralIPV4Network(CiTestCase):
m_subp.assert_has_calls(expected_setup_calls + expected_teardown_calls)
-class TestApplyNetworkCfgNames(CiTestCase):
- V1_CONFIG = textwrap.dedent(
- """\
- version: 1
- config:
- - type: physical
- name: interface0
- mac_address: "52:54:00:12:34:00"
- subnets:
- - type: static
- address: 10.0.2.15
- netmask: 255.255.255.0
- gateway: 10.0.2.2
- """
- )
- V2_CONFIG = textwrap.dedent(
- """\
- version: 2
- ethernets:
- interface0:
- match:
- macaddress: "52:54:00:12:34:00"
- addresses:
- - 10.0.2.15/24
- gateway4: 10.0.2.2
- set-name: interface0
- """
- )
-
- V2_CONFIG_NO_SETNAME = textwrap.dedent(
- """\
- version: 2
- ethernets:
- interface0:
- match:
- macaddress: "52:54:00:12:34:00"
- addresses:
- - 10.0.2.15/24
- gateway4: 10.0.2.2
- """
- )
-
- V2_CONFIG_NO_MAC = textwrap.dedent(
- """\
- version: 2
- ethernets:
- interface0:
- match:
- driver: virtio-net
- addresses:
- - 10.0.2.15/24
- gateway4: 10.0.2.2
- set-name: interface0
- """
- )
-
- @mock.patch("cloudinit.net.device_devid")
- @mock.patch("cloudinit.net.device_driver")
- @mock.patch("cloudinit.net._rename_interfaces")
- def test_apply_v1_renames(
- self, m_rename_interfaces, m_device_driver, m_device_devid
- ):
- m_device_driver.return_value = "virtio_net"
- m_device_devid.return_value = "0x15d8"
-
- net.apply_network_config_names(yaml.load(self.V1_CONFIG))
-
- call = ["52:54:00:12:34:00", "interface0", "virtio_net", "0x15d8"]
- m_rename_interfaces.assert_called_with([call])
-
- @mock.patch("cloudinit.net.device_devid")
- @mock.patch("cloudinit.net.device_driver")
- @mock.patch("cloudinit.net._rename_interfaces")
- def test_apply_v2_renames(
- self, m_rename_interfaces, m_device_driver, m_device_devid
- ):
- m_device_driver.return_value = "virtio_net"
- m_device_devid.return_value = "0x15d8"
-
- net.apply_network_config_names(yaml.load(self.V2_CONFIG))
-
- call = ["52:54:00:12:34:00", "interface0", "virtio_net", "0x15d8"]
- m_rename_interfaces.assert_called_with([call])
-
- @mock.patch("cloudinit.net._rename_interfaces")
- def test_apply_v2_renames_skips_without_setname(self, m_rename_interfaces):
- net.apply_network_config_names(yaml.load(self.V2_CONFIG_NO_SETNAME))
- m_rename_interfaces.assert_called_with([])
-
- @mock.patch("cloudinit.net._rename_interfaces")
- def test_apply_v2_renames_skips_without_mac(self, m_rename_interfaces):
- net.apply_network_config_names(yaml.load(self.V2_CONFIG_NO_MAC))
- m_rename_interfaces.assert_called_with([])
-
- def test_apply_v2_renames_raises_runtime_error_on_unknown_version(self):
- with self.assertRaises(RuntimeError):
- net.apply_network_config_names(yaml.load("version: 3"))
-
-
class TestHasURLConnectivity(HttprettyTestCase):
def setUp(self):
super(TestHasURLConnectivity, self).setUp()
@@ -1689,7 +1747,9 @@ class TestIsIpAddress:
(
(ValueError, False),
(lambda _: ipaddress.IPv4Address("192.168.0.1"), True),
+ (lambda _: ipaddress.IPv4Address("192.168.0.1/24"), False),
(lambda _: ipaddress.IPv6Address("2001:db8::"), True),
+ (lambda _: ipaddress.IPv6Address("2001:db8::/48"), False),
),
)
def test_is_ip_address(self, ip_address_side_effect, expected_return):
@@ -1731,4 +1791,33 @@ class TestIsIpv4Address:
assert [expected_call] == m_ipv4address.call_args_list
-# vi: ts=4 expandtab
+class TestIsIpNetwork:
+ """Tests for net.is_ip_network() and related functions."""
+
+ @pytest.mark.parametrize(
+ "func,arg,expected_return",
+ (
+ (net.is_ip_network, "192.168.1.1", True),
+ (net.is_ip_network, "192.168.1.1/24", True),
+ (net.is_ip_network, "192.168.1.1/32", True),
+ (net.is_ip_network, "192.168.1.1/33", False),
+ (net.is_ip_network, "2001:67c:1", False),
+ (net.is_ip_network, "2001:67c:1/32", False),
+ (net.is_ip_network, "2001:67c::", True),
+ (net.is_ip_network, "2001:67c::/32", True),
+ (net.is_ipv4_network, "192.168.1.1", True),
+ (net.is_ipv4_network, "192.168.1.1/24", True),
+ (net.is_ipv4_network, "2001:67c::", False),
+ (net.is_ipv4_network, "2001:67c::/32", False),
+ (net.is_ipv6_network, "192.168.1.1", False),
+ (net.is_ipv6_network, "192.168.1.1/24", False),
+ (net.is_ipv6_network, "2001:67c:1", False),
+ (net.is_ipv6_network, "2001:67c:1/32", False),
+ (net.is_ipv6_network, "2001:67c::", True),
+ (net.is_ipv6_network, "2001:67c::/32", True),
+ (net.is_ipv6_network, "2001:67c::/129", False),
+ (net.is_ipv6_network, "2001:67c::/128", True),
+ ),
+ )
+ def test_is_ip_network(self, func, arg, expected_return):
+ assert func(arg) == expected_return
diff --git a/tests/unittests/net/test_network_state.py b/tests/unittests/net/test_network_state.py
index 471d969a..ec21d007 100644
--- a/tests/unittests/net/test_network_state.py
+++ b/tests/unittests/net/test_network_state.py
@@ -217,6 +217,3 @@ class TestNetworkStateHelperFunctions(CiTestCase):
expected = 48
prefix_value = network_state.ipv6_mask_to_net_prefix(netmask_value)
assert prefix_value == expected
-
-
-# vi: ts=4 expandtab
diff --git a/tests/unittests/runs/test_merge_run.py b/tests/unittests/runs/test_merge_run.py
index 1b1b5595..5f217a3d 100644
--- a/tests/unittests/runs/test_merge_run.py
+++ b/tests/unittests/runs/test_merge_run.py
@@ -5,6 +5,7 @@ import shutil
import tempfile
from cloudinit import safeyaml, stages, util
+from cloudinit.config.modules import Modules
from cloudinit.settings import PER_INSTANCE
from tests.unittests import helpers
@@ -49,7 +50,7 @@ class TestMergeRun(helpers.FilesystemMockingTestCase):
self.assertEqual(1, len(mirrors))
mirror = mirrors[0]
self.assertEqual(mirror["arches"], ["i386", "amd64", "blah"])
- mods = stages.Modules(initer)
+ mods = Modules(initer)
(which_ran, failures) = mods.run_section("cloud_init_modules")
self.assertTrue(len(failures) == 0)
self.assertTrue(os.path.exists("/etc/blah.ini"))
diff --git a/tests/unittests/runs/test_simple_run.py b/tests/unittests/runs/test_simple_run.py
index 38cf9494..2b51117c 100644
--- a/tests/unittests/runs/test_simple_run.py
+++ b/tests/unittests/runs/test_simple_run.py
@@ -4,6 +4,7 @@ import copy
import os
from cloudinit import safeyaml, stages, util
+from cloudinit.config.modules import Modules
from cloudinit.settings import PER_INSTANCE
from tests.unittests import helpers
@@ -71,7 +72,7 @@ class TestSimpleRun(helpers.FilesystemMockingTestCase):
freq=PER_INSTANCE,
)
- mods = stages.Modules(initer)
+ mods = Modules(initer)
(which_ran, failures) = mods.run_section("cloud_init_modules")
self.assertTrue(len(failures) == 0)
self.assertTrue(os.path.exists("/etc/blah.ini"))
@@ -99,7 +100,7 @@ class TestSimpleRun(helpers.FilesystemMockingTestCase):
freq=PER_INSTANCE,
)
- mods = stages.Modules(initer)
+ mods = Modules(initer)
(which_ran, failures) = mods.run_section("cloud_init_modules")
self.assertTrue(len(failures) == 0)
self.assertIn(
@@ -128,7 +129,7 @@ class TestSimpleRun(helpers.FilesystemMockingTestCase):
freq=PER_INSTANCE,
)
- mods = stages.Modules(initer)
+ mods = Modules(initer)
(which_ran, failures) = mods.run_section("cloud_init_modules")
self.assertTrue(len(failures) == 0)
self.assertIn("runcmd", which_ran)
@@ -163,7 +164,7 @@ class TestSimpleRun(helpers.FilesystemMockingTestCase):
freq=PER_INSTANCE,
)
- mods = stages.Modules(initer)
+ mods = Modules(initer)
(which_ran, failures) = mods.run_section("cloud_init_modules")
self.assertTrue(len(failures) == 0)
self.assertIn("spacewalk", which_ran)
@@ -197,7 +198,7 @@ class TestSimpleRun(helpers.FilesystemMockingTestCase):
freq=PER_INSTANCE,
)
- mods = stages.Modules(initer)
+ mods = Modules(initer)
(which_ran, failures) = mods.run_section("cloud_init_modules")
self.assertTrue(len(failures) == 0)
self.assertEqual([], which_ran)
diff --git a/tests/unittests/test_cs_util.py b/tests/unittests/sources/helpers/test_cloudsigma.py
index 109e0208..3c687388 100644
--- a/tests/unittests/test_cs_util.py
+++ b/tests/unittests/sources/helpers/test_cloudsigma.py
@@ -1,6 +1,6 @@
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit.cs_utils import Cepko
+from cloudinit.sources.helpers.cloudsigma import Cepko
from tests.unittests import helpers as test_helpers
SERVER_CONTEXT = {
diff --git a/tests/unittests/test_ec2_util.py b/tests/unittests/sources/helpers/test_ec2.py
index f447d295..77e7c7b6 100644
--- a/tests/unittests/test_ec2_util.py
+++ b/tests/unittests/sources/helpers/test_ec2.py
@@ -2,8 +2,8 @@
import httpretty as hp
-from cloudinit import ec2_utils as eu
from cloudinit import url_helper as uh
+from cloudinit.sources.helpers import ec2
from tests.unittests import helpers
@@ -17,7 +17,7 @@ class TestEc2Util(helpers.HttprettyTestCase):
body="stuff",
status=200,
)
- userdata = eu.get_instance_userdata(self.VERSION)
+ userdata = ec2.get_instance_userdata(self.VERSION)
self.assertEqual("stuff", userdata.decode("utf-8"))
def test_userdata_fetch_fail_not_found(self):
@@ -26,7 +26,7 @@ class TestEc2Util(helpers.HttprettyTestCase):
"http://169.254.169.254/%s/user-data" % (self.VERSION),
status=404,
)
- userdata = eu.get_instance_userdata(self.VERSION, retries=0)
+ userdata = ec2.get_instance_userdata(self.VERSION, retries=0)
self.assertEqual("", userdata)
def test_userdata_fetch_fail_server_dead(self):
@@ -35,7 +35,7 @@ class TestEc2Util(helpers.HttprettyTestCase):
"http://169.254.169.254/%s/user-data" % (self.VERSION),
status=500,
)
- userdata = eu.get_instance_userdata(self.VERSION, retries=0)
+ userdata = ec2.get_instance_userdata(self.VERSION, retries=0)
self.assertEqual("", userdata)
def test_userdata_fetch_fail_server_not_found(self):
@@ -44,7 +44,7 @@ class TestEc2Util(helpers.HttprettyTestCase):
"http://169.254.169.254/%s/user-data" % (self.VERSION),
status=404,
)
- userdata = eu.get_instance_userdata(self.VERSION)
+ userdata = ec2.get_instance_userdata(self.VERSION)
self.assertEqual("", userdata)
def test_metadata_fetch_no_keys(self):
@@ -73,7 +73,7 @@ class TestEc2Util(helpers.HttprettyTestCase):
status=200,
body="1",
)
- md = eu.get_instance_metadata(self.VERSION, retries=0)
+ md = ec2.get_instance_metadata(self.VERSION, retries=0)
self.assertEqual(md["hostname"], "ec2.fake.host.name.com")
self.assertEqual(md["instance-id"], "123")
self.assertEqual(md["ami-launch-index"], "1")
@@ -110,7 +110,7 @@ class TestEc2Util(helpers.HttprettyTestCase):
status=200,
body="ssh-rsa AAAA.....wZEf my-public-key",
)
- md = eu.get_instance_metadata(self.VERSION, retries=0, timeout=0.1)
+ md = ec2.get_instance_metadata(self.VERSION, retries=0, timeout=0.1)
self.assertEqual(md["hostname"], "ec2.fake.host.name.com")
self.assertEqual(md["instance-id"], "123")
self.assertEqual(1, len(md["public-keys"]))
@@ -153,7 +153,7 @@ class TestEc2Util(helpers.HttprettyTestCase):
status=200,
body="ssh-rsa AAAA.....wZEf my-other-key",
)
- md = eu.get_instance_metadata(self.VERSION, retries=0, timeout=0.1)
+ md = ec2.get_instance_metadata(self.VERSION, retries=0, timeout=0.1)
self.assertEqual(md["hostname"], "ec2.fake.host.name.com")
self.assertEqual(md["instance-id"], "123")
self.assertEqual(2, len(md["public-keys"]))
@@ -198,7 +198,7 @@ class TestEc2Util(helpers.HttprettyTestCase):
status=200,
body="sdc",
)
- md = eu.get_instance_metadata(self.VERSION, retries=0, timeout=0.1)
+ md = ec2.get_instance_metadata(self.VERSION, retries=0, timeout=0.1)
self.assertEqual(md["hostname"], "ec2.fake.host.name.com")
self.assertEqual(md["instance-id"], "123")
bdm = md["block-device-mapping"]
@@ -266,7 +266,7 @@ class TestEc2Util(helpers.HttprettyTestCase):
status=200,
body="2016-10-28T00:00:34Z",
)
- md = eu.get_instance_metadata(self.VERSION, retries=0, timeout=0.1)
+ md = ec2.get_instance_metadata(self.VERSION, retries=0, timeout=0.1)
self.assertEqual(md["instance-id"], "i-0123451689abcdef0")
iam = md["iam"]
self.assertEqual(1, len(iam))
diff --git a/tests/unittests/sources/test_azure.py b/tests/unittests/sources/test_azure.py
index 551de59f..b7dae873 100644
--- a/tests/unittests/sources/test_azure.py
+++ b/tests/unittests/sources/test_azure.py
@@ -7,13 +7,15 @@ import logging
import os
import stat
import xml.etree.ElementTree as ET
+from pathlib import Path
import httpretty
import pytest
import requests
import yaml
-from cloudinit import distros, helpers, url_helper
+from cloudinit import distros, helpers, subp, url_helper
+from cloudinit.net import dhcp
from cloudinit.sources import UNSET
from cloudinit.sources import DataSourceAzure as dsaz
from cloudinit.sources import InvalidMetaDataException
@@ -42,13 +44,23 @@ MOCKPATH = "cloudinit.sources.DataSourceAzure."
@pytest.fixture
-def azure_ds(paths):
+def azure_ds(patched_data_dir_path, paths):
"""Provide DataSourceAzure instance with mocks for minimal test case."""
with mock.patch(MOCKPATH + "_is_platform_viable", return_value=True):
yield dsaz.DataSourceAzure(sys_cfg={}, distro=mock.Mock(), paths=paths)
@pytest.fixture
+def mock_wrapping_setup_ephemeral_networking(azure_ds):
+ with mock.patch.object(
+ azure_ds,
+ "_setup_ephemeral_networking",
+ wraps=azure_ds._setup_ephemeral_networking,
+ ) as m:
+ yield m
+
+
+@pytest.fixture
def mock_azure_helper_readurl():
with mock.patch(
"cloudinit.sources.helpers.azure.url_helper.readurl", autospec=True
@@ -75,6 +87,15 @@ def mock_azure_report_failure_to_fabric():
@pytest.fixture
+def mock_time():
+ with mock.patch(
+ MOCKPATH + "time",
+ autospec=True,
+ ) as m:
+ yield m
+
+
+@pytest.fixture
def mock_dmi_read_dmi_data():
def fake_read(key: str) -> str:
if key == "system-uuid":
@@ -90,12 +111,21 @@ def mock_dmi_read_dmi_data():
@pytest.fixture
+def mock_ephemeral_dhcp_v4():
+ with mock.patch(
+ MOCKPATH + "EphemeralDHCPv4",
+ autospec=True,
+ ) as m:
+ yield m
+
+
+@pytest.fixture
def mock_net_dhcp_maybe_perform_dhcp_discovery():
with mock.patch(
"cloudinit.net.dhcp.maybe_perform_dhcp_discovery",
return_value=[
{
- "unknown-245": "aa:bb:cc:dd",
+ "unknown-245": "0a:0b:0c:0d",
"interface": "ethBoot0",
"fixed-address": "192.168.2.9",
"routers": "192.168.2.1",
@@ -153,12 +183,27 @@ def mock_readurl():
@pytest.fixture
+def mock_report_diagnostic_event():
+ with mock.patch(MOCKPATH + "report_diagnostic_event") as m:
+ yield m
+
+
+@pytest.fixture
def mock_requests_session_request():
with mock.patch("requests.Session.request", autospec=True) as m:
yield m
@pytest.fixture
+def mock_sleep():
+ with mock.patch(
+ MOCKPATH + "sleep",
+ autospec=True,
+ ) as m:
+ yield m
+
+
+@pytest.fixture
def mock_subp_subp():
with mock.patch(MOCKPATH + "subp.subp", side_effect=[]) as m:
yield m
@@ -214,6 +259,34 @@ def mock_util_write_file():
yield m
+@pytest.fixture
+def patched_data_dir_path(tmpdir):
+ data_dir_path = Path(tmpdir) / "data_dir"
+ data_dir_path.mkdir()
+ data_dir = str(data_dir_path)
+
+ with mock.patch(MOCKPATH + "AGENT_SEED_DIR", data_dir):
+ with mock.patch.dict(dsaz.BUILTIN_DS_CONFIG, {"data_dir": data_dir}):
+ yield data_dir_path
+
+
+@pytest.fixture
+def patched_markers_dir_path(tmpdir):
+ patched_markers_dir_path = Path(tmpdir) / "markers"
+ patched_markers_dir_path.mkdir()
+
+ yield patched_markers_dir_path
+
+
+@pytest.fixture
+def patched_reported_ready_marker_path(patched_markers_dir_path):
+ reported_ready_marker = patched_markers_dir_path / "reported_ready"
+ with mock.patch(
+ MOCKPATH + "REPORTED_READY_MARKER_FILE", str(reported_ready_marker)
+ ):
+ yield reported_ready_marker
+
+
def construct_valid_ovf_env(
data=None, pubkeys=None, userdata=None, platform_settings=None
):
@@ -1859,7 +1932,7 @@ scbus-1 on xpt0 bus 0
test_msg = "Test report failure description message"
self.assertTrue(dsrc._report_failure(description=test_msg))
self.m_report_failure_to_fabric.assert_called_once_with(
- dhcp_opts=mock.ANY, description=test_msg
+ endpoint="168.63.129.16", description=test_msg
)
def test_dsaz_report_failure_no_description_msg(self):
@@ -1870,7 +1943,7 @@ scbus-1 on xpt0 bus 0
self.assertTrue(dsrc._report_failure()) # no description msg
self.m_report_failure_to_fabric.assert_called_once_with(
- dhcp_opts=mock.ANY, description=None
+ endpoint="168.63.129.16", description=None
)
def test_dsaz_report_failure_uses_cached_ephemeral_dhcp_ctx_lease(self):
@@ -1879,8 +1952,8 @@ scbus-1 on xpt0 bus 0
with mock.patch.object(
dsrc, "crawl_metadata"
) as m_crawl_metadata, mock.patch.object(
- dsrc, "_wireserver_endpoint", return_value="test-ep"
- ) as m_wireserver_endpoint:
+ dsrc, "_wireserver_endpoint", "test-ep"
+ ):
# mock crawl metadata failure to cause report failure
m_crawl_metadata.side_effect = Exception
@@ -1888,7 +1961,7 @@ scbus-1 on xpt0 bus 0
# ensure called with cached ephemeral dhcp lease option 245
self.m_report_failure_to_fabric.assert_called_once_with(
- description=mock.ANY, dhcp_opts=m_wireserver_endpoint
+ endpoint="test-ep", description=mock.ANY
)
def test_dsaz_report_failure_no_net_uses_new_ephemeral_dhcp_lease(self):
@@ -1898,7 +1971,7 @@ scbus-1 on xpt0 bus 0
# mock crawl metadata failure to cause report failure
m_crawl_metadata.side_effect = Exception
- test_lease_dhcp_option_245 = "test_lease_dhcp_option_245"
+ test_lease_dhcp_option_245 = "01:02:03:04"
test_lease = {
"unknown-245": test_lease_dhcp_option_245,
"interface": "eth0",
@@ -1910,7 +1983,7 @@ scbus-1 on xpt0 bus 0
# ensure called with the newly discovered
# ephemeral dhcp lease option 245
self.m_report_failure_to_fabric.assert_called_once_with(
- description=mock.ANY, dhcp_opts=test_lease_dhcp_option_245
+ endpoint="1.2.3.4", description=mock.ANY
)
def test_exception_fetching_fabric_data_doesnt_propagate(self):
@@ -2923,7 +2996,9 @@ class TestDeterminePPSTypeScenarios:
azure_ds._determine_pps_type(ovf_cfg, imds_md)
== dsaz.PPSType.UNKNOWN
)
- assert is_file.mock_calls == [mock.call(dsaz.REPROVISION_MARKER_FILE)]
+ assert is_file.mock_calls == [
+ mock.call(dsaz.REPORTED_READY_MARKER_FILE)
+ ]
@mock.patch("os.path.isfile", return_value=False)
@@ -2959,12 +3034,14 @@ class TestPreprovisioningHotAttachNics(CiTestCase):
@mock.patch(MOCKPATH + "util.write_file", autospec=True)
@mock.patch(MOCKPATH + "DataSourceAzure._report_ready")
- @mock.patch(MOCKPATH + "DataSourceAzure._wait_for_hot_attached_nics")
+ @mock.patch(
+ MOCKPATH + "DataSourceAzure._wait_for_hot_attached_primary_nic"
+ )
@mock.patch(MOCKPATH + "DataSourceAzure._wait_for_nic_detach")
def test_detect_nic_attach_reports_ready_and_waits_for_detach(
self,
m_detach,
- m_wait_for_hot_attached_nics,
+ m_wait_for_hot_attached_primary_nic,
m_report_ready,
m_writefile,
):
@@ -2972,7 +3049,7 @@ class TestPreprovisioningHotAttachNics(CiTestCase):
dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
dsa._wait_for_all_nics_ready()
self.assertEqual(1, m_report_ready.call_count)
- self.assertEqual(1, m_wait_for_hot_attached_nics.call_count)
+ self.assertEqual(1, m_wait_for_hot_attached_primary_nic.call_count)
self.assertEqual(1, m_detach.call_count)
self.assertEqual(1, m_writefile.call_count)
m_writefile.assert_called_with(
@@ -2998,7 +3075,8 @@ class TestPreprovisioningHotAttachNics(CiTestCase):
m_report_ready,
m_writefile,
):
- """Wait for nic attach if we do not have a fallback interface"""
+ """Wait for nic attach if we do not have a fallback interface.
+ Skip waiting for additional nics after we have found primary"""
dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
lease = {
"interface": "eth9",
@@ -3029,10 +3107,28 @@ class TestPreprovisioningHotAttachNics(CiTestCase):
dsa._wait_for_all_nics_ready()
self.assertEqual(1, m_detach.call_count)
- self.assertEqual(2, m_attach.call_count)
+ # only wait for primary nic
+ self.assertEqual(1, m_attach.call_count)
# DHCP and network metadata calls will only happen on the primary NIC.
self.assertEqual(1, m_dhcpv4.call_count)
self.assertEqual(1, m_imds.call_count)
+ # no call to bring link up on secondary nic
+ self.assertEqual(1, m_link_up.call_count)
+
+ # reset mock to test again with primary nic being eth1
+ m_detach.reset_mock()
+ m_attach.reset_mock()
+ m_dhcpv4.reset_mock()
+ m_link_up.reset_mock()
+ m_attach.side_effect = ["eth0", "eth1"]
+ m_imds.reset_mock()
+ m_imds.side_effect = [{}, md]
+ dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
+ dsa._wait_for_all_nics_ready()
+ self.assertEqual(1, m_detach.call_count)
+ self.assertEqual(2, m_attach.call_count)
+ self.assertEqual(2, m_dhcpv4.call_count)
+ self.assertEqual(2, m_imds.call_count)
self.assertEqual(2, m_link_up.call_count)
@mock.patch("cloudinit.url_helper.time.sleep", autospec=True)
@@ -3098,12 +3194,10 @@ class TestPreprovisioningHotAttachNics(CiTestCase):
dsa.wait_for_link_up("eth0")
self.assertEqual(1, m_is_link_up.call_count)
- @mock.patch(MOCKPATH + "net.is_up", autospec=True)
- @mock.patch(MOCKPATH + "util.write_file")
- @mock.patch("cloudinit.net.read_sys_net", return_value="device-id")
@mock.patch("cloudinit.distros.networking.LinuxNetworking.try_set_link_up")
+ @mock.patch(MOCKPATH + "sleep")
def test_wait_for_link_up_checks_link_after_sleep(
- self, m_try_set_link_up, m_read_sys_net, m_writefile, m_is_up
+ self, m_sleep, m_try_set_link_up
):
"""Waiting for link to be up should return immediately if the link is
already up."""
@@ -3113,50 +3207,10 @@ class TestPreprovisioningHotAttachNics(CiTestCase):
dsa = dsaz.DataSourceAzure({}, distro=distro, paths=self.paths)
m_try_set_link_up.return_value = False
- callcount = 0
-
- def is_up_mock(key):
- nonlocal callcount
- if callcount == 0:
- callcount += 1
- return False
- return True
-
- m_is_up.side_effect = is_up_mock
-
- with mock.patch("cloudinit.sources.DataSourceAzure.sleep"):
- dsa.wait_for_link_up("eth0")
- self.assertEqual(2, m_try_set_link_up.call_count)
- self.assertEqual(2, m_is_up.call_count)
-
- @mock.patch(MOCKPATH + "util.write_file")
- @mock.patch("cloudinit.net.read_sys_net", return_value="device-id")
- @mock.patch("cloudinit.distros.networking.LinuxNetworking.try_set_link_up")
- def test_wait_for_link_up_writes_to_device_file(
- self, m_is_link_up, m_read_sys_net, m_writefile
- ):
- """Waiting for link to be up should return immediately if the link is
- already up."""
-
- distro_cls = distros.fetch("ubuntu")
- distro = distro_cls("ubuntu", {}, self.paths)
- dsa = dsaz.DataSourceAzure({}, distro=distro, paths=self.paths)
-
- callcount = 0
-
- def linkup(key):
- nonlocal callcount
- if callcount == 0:
- callcount += 1
- return False
- return True
-
- m_is_link_up.side_effect = linkup
-
dsa.wait_for_link_up("eth0")
- self.assertEqual(2, m_is_link_up.call_count)
- self.assertEqual(1, m_read_sys_net.call_count)
- self.assertEqual(2, m_writefile.call_count)
+
+ self.assertEqual(100, m_try_set_link_up.call_count)
+ self.assertEqual(99 * [mock.call(0.1)], m_sleep.mock_calls)
@mock.patch(
"cloudinit.sources.helpers.netlink.create_bound_netlink_socket"
@@ -3641,6 +3695,197 @@ class TestRandomSeed(CiTestCase):
self.assertEqual(deserialized["seed"], result)
+class TestEphemeralNetworking:
+ @pytest.mark.parametrize("iface", [None, "fakeEth0"])
+ def test_basic_setup(
+ self,
+ azure_ds,
+ mock_ephemeral_dhcp_v4,
+ mock_sleep,
+ iface,
+ ):
+ lease = {
+ "interface": "fakeEth0",
+ "unknown-245": "10:ff:fe:fd",
+ }
+ mock_ephemeral_dhcp_v4.return_value.obtain_lease.side_effect = [lease]
+
+ azure_ds._setup_ephemeral_networking(iface=iface)
+
+ assert mock_ephemeral_dhcp_v4.mock_calls == [
+ mock.call(iface=iface, dhcp_log_func=dsaz.dhcp_log_cb),
+ mock.call().obtain_lease(),
+ ]
+ assert mock_sleep.mock_calls == []
+ assert azure_ds._wireserver_endpoint == "16.255.254.253"
+ assert azure_ds._ephemeral_dhcp_ctx.iface == lease["interface"]
+
+ @pytest.mark.parametrize("iface", [None, "fakeEth0"])
+ def test_basic_setup_without_wireserver_opt(
+ self,
+ azure_ds,
+ mock_ephemeral_dhcp_v4,
+ mock_sleep,
+ iface,
+ ):
+ lease = {
+ "interface": "fakeEth0",
+ }
+ mock_ephemeral_dhcp_v4.return_value.obtain_lease.side_effect = [lease]
+
+ azure_ds._setup_ephemeral_networking(iface=iface)
+
+ assert mock_ephemeral_dhcp_v4.mock_calls == [
+ mock.call(iface=iface, dhcp_log_func=dsaz.dhcp_log_cb),
+ mock.call().obtain_lease(),
+ ]
+ assert mock_sleep.mock_calls == []
+ assert azure_ds._wireserver_endpoint == "168.63.129.16"
+ assert azure_ds._ephemeral_dhcp_ctx.iface == lease["interface"]
+
+ def test_no_retry_missing_dhclient_error(
+ self,
+ azure_ds,
+ mock_ephemeral_dhcp_v4,
+ mock_sleep,
+ ):
+ mock_ephemeral_dhcp_v4.return_value.obtain_lease.side_effect = [
+ dhcp.NoDHCPLeaseMissingDhclientError
+ ]
+
+ with pytest.raises(dhcp.NoDHCPLeaseMissingDhclientError):
+ azure_ds._setup_ephemeral_networking()
+
+ assert azure_ds._ephemeral_dhcp_ctx is None
+
+ def test_retry_interface_error(
+ self,
+ azure_ds,
+ mock_ephemeral_dhcp_v4,
+ mock_sleep,
+ ):
+ lease = {
+ "interface": "fakeEth0",
+ }
+ mock_ephemeral_dhcp_v4.return_value.obtain_lease.side_effect = [
+ dhcp.NoDHCPLeaseInterfaceError,
+ lease,
+ ]
+
+ azure_ds._setup_ephemeral_networking()
+
+ assert mock_ephemeral_dhcp_v4.mock_calls == [
+ mock.call(iface=None, dhcp_log_func=dsaz.dhcp_log_cb),
+ mock.call().obtain_lease(),
+ mock.call().obtain_lease(),
+ ]
+ assert mock_sleep.mock_calls == [mock.call(1)]
+ assert azure_ds._wireserver_endpoint == "168.63.129.16"
+ assert azure_ds._ephemeral_dhcp_ctx.iface == "fakeEth0"
+
+ def test_retry_process_error(
+ self,
+ azure_ds,
+ mock_ephemeral_dhcp_v4,
+ mock_report_diagnostic_event,
+ mock_sleep,
+ ):
+ lease = {
+ "interface": "fakeEth0",
+ }
+ mock_ephemeral_dhcp_v4.return_value.obtain_lease.side_effect = [
+ subp.ProcessExecutionError(
+ cmd=["failed", "cmd"],
+ stdout="test_stdout",
+ stderr="test_stderr",
+ exit_code=4,
+ ),
+ lease,
+ ]
+
+ azure_ds._setup_ephemeral_networking()
+
+ assert mock_ephemeral_dhcp_v4.mock_calls == [
+ mock.call(iface=None, dhcp_log_func=dsaz.dhcp_log_cb),
+ mock.call().obtain_lease(),
+ mock.call().obtain_lease(),
+ ]
+ assert mock_sleep.mock_calls == [mock.call(1)]
+ assert mock_report_diagnostic_event.mock_calls == [
+ mock.call(
+ "Command failed: cmd=['failed', 'cmd'] "
+ "stderr='test_stderr' stdout='test_stdout' exit_code=4",
+ logger_func=dsaz.LOG.error,
+ )
+ ]
+
+ @pytest.mark.parametrize(
+ "error_class", [dhcp.NoDHCPLeaseInterfaceError, dhcp.NoDHCPLeaseError]
+ )
+ def test_retry_sleeps(
+ self,
+ azure_ds,
+ mock_ephemeral_dhcp_v4,
+ mock_sleep,
+ error_class,
+ ):
+ lease = {
+ "interface": "fakeEth0",
+ }
+ mock_ephemeral_dhcp_v4.return_value.obtain_lease.side_effect = [
+ error_class()
+ ] * 10 + [lease]
+
+ azure_ds._setup_ephemeral_networking()
+
+ assert (
+ mock_ephemeral_dhcp_v4.mock_calls
+ == [
+ mock.call(iface=None, dhcp_log_func=dsaz.dhcp_log_cb),
+ ]
+ + [mock.call().obtain_lease()] * 11
+ )
+ assert mock_sleep.mock_calls == [mock.call(1)] * 10
+ assert azure_ds._wireserver_endpoint == "168.63.129.16"
+ assert azure_ds._ephemeral_dhcp_ctx.iface == "fakeEth0"
+
+ @pytest.mark.parametrize(
+ "error_class", [dhcp.NoDHCPLeaseInterfaceError, dhcp.NoDHCPLeaseError]
+ )
+ def test_retry_times_out(
+ self,
+ azure_ds,
+ mock_ephemeral_dhcp_v4,
+ mock_sleep,
+ mock_time,
+ error_class,
+ ):
+ mock_time.side_effect = [
+ 0.0, # start
+ 60.1, # first
+ 120.1, # third
+ 180.1, # timeout
+ ]
+ mock_ephemeral_dhcp_v4.return_value.obtain_lease.side_effect = [
+ error_class()
+ ] * 10 + [
+ {
+ "interface": "fakeEth0",
+ }
+ ]
+
+ with pytest.raises(dhcp.NoDHCPLeaseError):
+ azure_ds._setup_ephemeral_networking(timeout_minutes=3)
+
+ assert (
+ mock_ephemeral_dhcp_v4.return_value.mock_calls
+ == [mock.call.obtain_lease()] * 3
+ )
+ assert mock_sleep.mock_calls == [mock.call(1)] * 2
+ assert azure_ds._wireserver_endpoint == "168.63.129.16"
+ assert azure_ds._ephemeral_dhcp_ctx is None
+
+
def fake_http_error_for_code(status_code: int):
response_failure = requests.Response()
response_failure.status_code = status_code
@@ -3843,14 +4088,14 @@ class TestProvisioning:
mock_get_interfaces,
mock_get_interface_mac,
mock_netlink,
- mock_os_path_isfile,
mock_readurl,
mock_subp_subp,
mock_util_ensure_dir,
mock_util_find_devs_with,
mock_util_load_file,
mock_util_mount_cb,
- mock_util_write_file,
+ mock_wrapping_setup_ephemeral_networking,
+ patched_reported_ready_marker_path,
):
self.azure_ds = azure_ds
self.mock_azure_get_metadata_from_fabric = (
@@ -3869,14 +4114,18 @@ class TestProvisioning:
self.mock_get_interfaces = mock_get_interfaces
self.mock_get_interface_mac = mock_get_interface_mac
self.mock_netlink = mock_netlink
- self.mock_os_path_isfile = mock_os_path_isfile
self.mock_readurl = mock_readurl
self.mock_subp_subp = mock_subp_subp
self.mock_util_ensure_dir = mock_util_ensure_dir
self.mock_util_find_devs_with = mock_util_find_devs_with
self.mock_util_load_file = mock_util_load_file
self.mock_util_mount_cb = mock_util_mount_cb
- self.mock_util_write_file = mock_util_write_file
+ self.mock_wrapping_setup_ephemeral_networking = (
+ mock_wrapping_setup_ephemeral_networking
+ )
+ self.patched_reported_ready_marker_path = (
+ patched_reported_ready_marker_path
+ )
self.imds_md = {
"extended": {"compute": {"ppsType": "None"}},
@@ -3906,20 +4155,9 @@ class TestProvisioning:
mock.MagicMock(contents=json.dumps(self.imds_md).encode()),
]
self.mock_azure_get_metadata_from_fabric.return_value = []
- self.mock_os_path_isfile.side_effect = [False, False, False]
self.azure_ds._get_data()
- assert self.mock_os_path_isfile.mock_calls == [
- mock.call("/var/lib/cloud/data/poll_imds"),
- mock.call(
- os.path.join(
- self.azure_ds.paths.cloud_dir, "seed/azure/ovf-env.xml"
- )
- ),
- mock.call("/var/lib/cloud/data/poll_imds"),
- ]
-
assert self.mock_readurl.mock_calls == [
mock.call(
"http://169.254.169.254/metadata/instance?"
@@ -3933,10 +4171,13 @@ class TestProvisioning:
]
# Verify DHCP is setup once.
+ assert self.mock_wrapping_setup_ephemeral_networking.mock_calls == [
+ mock.call(timeout_minutes=20)
+ ]
assert self.mock_net_dhcp_maybe_perform_dhcp_discovery.mock_calls == [
mock.call(None, dsaz.dhcp_log_cb)
]
- assert self.azure_ds._wireserver_endpoint == "aa:bb:cc:dd"
+ assert self.azure_ds._wireserver_endpoint == "10.11.12.13"
assert self.azure_ds._is_ephemeral_networking_up() is False
# Verify DMI usage.
@@ -3951,8 +4192,7 @@ class TestProvisioning:
# Verify reporting ready once.
assert self.mock_azure_get_metadata_from_fabric.mock_calls == [
mock.call(
- fallback_lease_file=None,
- dhcp_opts="aa:bb:cc:dd",
+ endpoint="10.11.12.13",
iso_dev="/dev/sr0",
pubkey_info=None,
)
@@ -3975,21 +4215,9 @@ class TestProvisioning:
mock.MagicMock(contents=json.dumps(self.imds_md).encode()),
]
self.mock_azure_get_metadata_from_fabric.return_value = []
- self.mock_os_path_isfile.side_effect = [False, False, False, False]
self.azure_ds._get_data()
- assert self.mock_os_path_isfile.mock_calls == [
- mock.call("/var/lib/cloud/data/poll_imds"),
- mock.call(
- os.path.join(
- self.azure_ds.paths.cloud_dir, "seed/azure/ovf-env.xml"
- )
- ),
- mock.call("/var/lib/cloud/data/poll_imds"),
- mock.call("/var/lib/cloud/data/reported_ready"),
- ]
-
assert self.mock_readurl.mock_calls == [
mock.call(
"http://169.254.169.254/metadata/instance?"
@@ -4021,11 +4249,15 @@ class TestProvisioning:
]
# Verify DHCP is setup twice.
+ assert self.mock_wrapping_setup_ephemeral_networking.mock_calls == [
+ mock.call(timeout_minutes=20),
+ mock.call(timeout_minutes=5),
+ ]
assert self.mock_net_dhcp_maybe_perform_dhcp_discovery.mock_calls == [
mock.call(None, dsaz.dhcp_log_cb),
mock.call(None, dsaz.dhcp_log_cb),
]
- assert self.azure_ds._wireserver_endpoint == "aa:bb:cc:dd"
+ assert self.azure_ds._wireserver_endpoint == "10.11.12.13"
assert self.azure_ds._is_ephemeral_networking_up() is False
# Verify DMI usage.
@@ -4040,14 +4272,12 @@ class TestProvisioning:
# Verify reporting ready twice.
assert self.mock_azure_get_metadata_from_fabric.mock_calls == [
mock.call(
- fallback_lease_file=None,
- dhcp_opts="aa:bb:cc:dd",
+ endpoint="10.11.12.13",
iso_dev="/dev/sr0",
pubkey_info=None,
),
mock.call(
- fallback_lease_file=None,
- dhcp_opts="aa:bb:cc:dd",
+ endpoint="10.11.12.13",
iso_dev=None,
pubkey_info=None,
),
@@ -4082,26 +4312,9 @@ class TestProvisioning:
mock.MagicMock(contents=json.dumps(self.imds_md).encode()),
]
self.mock_azure_get_metadata_from_fabric.return_value = []
- self.mock_os_path_isfile.side_effect = [
- False, # /var/lib/cloud/data/poll_imds
- False, # seed/azure/ovf-env.xml
- False, # /var/lib/cloud/data/poll_imds
- True, # /var/lib/cloud/data/reported_ready
- ]
self.azure_ds._get_data()
- assert self.mock_os_path_isfile.mock_calls == [
- mock.call("/var/lib/cloud/data/poll_imds"),
- mock.call(
- os.path.join(
- self.azure_ds.paths.cloud_dir, "seed/azure/ovf-env.xml"
- )
- ),
- mock.call("/var/lib/cloud/data/poll_imds"),
- mock.call("/var/lib/cloud/data/reported_ready"),
- ]
-
assert self.mock_readurl.mock_calls == [
mock.call(
"http://169.254.169.254/metadata/instance?"
@@ -4142,11 +4355,15 @@ class TestProvisioning:
]
# Verify DHCP is setup twice.
+ assert self.mock_wrapping_setup_ephemeral_networking.mock_calls == [
+ mock.call(timeout_minutes=20),
+ mock.call(iface="ethAttached1", timeout_minutes=20),
+ ]
assert self.mock_net_dhcp_maybe_perform_dhcp_discovery.mock_calls == [
mock.call(None, dsaz.dhcp_log_cb),
mock.call("ethAttached1", dsaz.dhcp_log_cb),
]
- assert self.azure_ds._wireserver_endpoint == "aa:bb:cc:dd"
+ assert self.azure_ds._wireserver_endpoint == "10.11.12.13"
assert self.azure_ds._is_ephemeral_networking_up() is False
# Verify DMI usage.
@@ -4161,14 +4378,12 @@ class TestProvisioning:
# Verify reporting ready twice.
assert self.mock_azure_get_metadata_from_fabric.mock_calls == [
mock.call(
- fallback_lease_file=None,
- dhcp_opts="aa:bb:cc:dd",
+ endpoint="10.11.12.13",
iso_dev="/dev/sr0",
pubkey_info=None,
),
mock.call(
- fallback_lease_file=None,
- dhcp_opts="aa:bb:cc:dd",
+ endpoint="10.11.12.13",
iso_dev=None,
pubkey_info=None,
),
@@ -4183,6 +4398,76 @@ class TestProvisioning:
mock.call.create_bound_netlink_socket().close(),
]
+ @pytest.mark.parametrize("pps_type", ["Savable", "Running", "None"])
+ def test_recovery_pps(self, pps_type):
+ self.patched_reported_ready_marker_path.write_text("")
+ self.imds_md["extended"]["compute"]["ppsType"] = pps_type
+ ovf_data = {"HostName": "myhost", "UserName": "myuser"}
+
+ self.mock_readurl.side_effect = [
+ mock.MagicMock(contents=json.dumps(self.imds_md).encode()),
+ mock.MagicMock(
+ contents=construct_valid_ovf_env(data=ovf_data).encode()
+ ),
+ mock.MagicMock(contents=json.dumps(self.imds_md).encode()),
+ ]
+ self.mock_azure_get_metadata_from_fabric.return_value = []
+
+ self.azure_ds._get_data()
+
+ assert self.mock_readurl.mock_calls == [
+ mock.call(
+ "http://169.254.169.254/metadata/instance?"
+ "api-version=2021-08-01&extended=true",
+ timeout=2,
+ headers={"Metadata": "true"},
+ retries=10,
+ exception_cb=dsaz.imds_readurl_exception_callback,
+ infinite=False,
+ ),
+ mock.call(
+ "http://169.254.169.254/metadata/reprovisiondata?"
+ "api-version=2019-06-01",
+ timeout=2,
+ headers={"Metadata": "true"},
+ exception_cb=mock.ANY,
+ infinite=True,
+ log_req_resp=False,
+ ),
+ mock.call(
+ "http://169.254.169.254/metadata/instance?"
+ "api-version=2021-08-01&extended=true",
+ timeout=2,
+ headers={"Metadata": "true"},
+ retries=10,
+ exception_cb=dsaz.imds_readurl_exception_callback,
+ infinite=False,
+ ),
+ ]
+
+ # Verify DHCP is setup once.
+ assert self.mock_wrapping_setup_ephemeral_networking.mock_calls == [
+ mock.call(timeout_minutes=20),
+ ]
+ assert self.mock_net_dhcp_maybe_perform_dhcp_discovery.mock_calls == [
+ mock.call(None, dsaz.dhcp_log_cb),
+ ]
+
+ # Verify IMDS metadata.
+ assert self.azure_ds.metadata["imds"] == self.imds_md
+
+ # Verify reports ready once.
+ assert self.mock_azure_get_metadata_from_fabric.mock_calls == [
+ mock.call(
+ endpoint="10.11.12.13",
+ iso_dev="/dev/sr0",
+ pubkey_info=None,
+ ),
+ ]
+
+ # Verify no netlink operations for recovering PPS.
+ assert self.mock_netlink.mock_calls == []
+
class TestValidateIMDSMetadata:
@pytest.mark.parametrize(
diff --git a/tests/unittests/sources/test_azure_helper.py b/tests/unittests/sources/test_azure_helper.py
index 98143bc3..4279dc4f 100644
--- a/tests/unittests/sources/test_azure_helper.py
+++ b/tests/unittests/sources/test_azure_helper.py
@@ -1,6 +1,5 @@
# This file is part of cloud-init. See LICENSE file for license information.
-import copy
import os
import re
import unittest
@@ -8,10 +7,12 @@ from textwrap import dedent
from xml.etree import ElementTree
from xml.sax.saxutils import escape, unescape
+import pytest
+
from cloudinit.sources.helpers import azure as azure_helper
from cloudinit.sources.helpers.azure import WALinuxAgentShim as wa_shim
from cloudinit.util import load_file
-from tests.unittests.helpers import CiTestCase, ExitStack, mock, populate_dir
+from tests.unittests.helpers import CiTestCase, ExitStack, mock
GOAL_STATE_TEMPLATE = """\
<?xml version="1.0" encoding="utf-8"?>
@@ -87,99 +88,20 @@ class SentinelException(Exception):
pass
-class TestFindEndpoint(CiTestCase):
- def setUp(self):
- super(TestFindEndpoint, self).setUp()
- patches = ExitStack()
- self.addCleanup(patches.close)
-
- self.load_file = patches.enter_context(
- mock.patch.object(azure_helper.util, "load_file")
- )
-
- self.dhcp_options = patches.enter_context(
- mock.patch.object(wa_shim, "_load_dhclient_json")
- )
-
- self.networkd_leases = patches.enter_context(
- mock.patch.object(wa_shim, "_networkd_get_value_from_leases")
- )
- self.networkd_leases.return_value = None
-
- def test_missing_file(self):
- """wa_shim find_endpoint uses default endpoint if
- leasefile not found
- """
- self.assertEqual(wa_shim.find_endpoint(), "168.63.129.16")
-
- def test_missing_special_azure_line(self):
- """wa_shim find_endpoint uses default endpoint if leasefile is found
- but does not contain DHCP Option 245 (whose value is the endpoint)
- """
- self.load_file.return_value = ""
- self.dhcp_options.return_value = {"eth0": {"key": "value"}}
- self.assertEqual(wa_shim.find_endpoint(), "168.63.129.16")
-
- @staticmethod
- def _build_lease_content(encoded_address):
- endpoint = azure_helper._get_dhcp_endpoint_option_name()
- return "\n".join(
- [
- "lease {",
- ' interface "eth0";',
- " option {0} {1};".format(endpoint, encoded_address),
- "}",
- ]
- )
-
- def test_from_dhcp_client(self):
- self.dhcp_options.return_value = {"eth0": {"unknown_245": "5:4:3:2"}}
- self.assertEqual("5.4.3.2", wa_shim.find_endpoint(None))
-
- def test_latest_lease_used(self):
- encoded_addresses = ["5:4:3:2", "4:3:2:1"]
- file_content = "\n".join(
- [
- self._build_lease_content(encoded_address)
- for encoded_address in encoded_addresses
- ]
- )
- self.load_file.return_value = file_content
- self.assertEqual(
- encoded_addresses[-1].replace(":", "."),
- wa_shim.find_endpoint("foobar"),
- )
-
-
-class TestExtractIpAddressFromLeaseValue(CiTestCase):
- def test_hex_string(self):
- ip_address, encoded_address = "98.76.54.32", "62:4c:36:20"
- self.assertEqual(
- ip_address, wa_shim.get_ip_from_lease_value(encoded_address)
- )
-
- def test_hex_string_with_single_character_part(self):
- ip_address, encoded_address = "4.3.2.1", "4:3:2:1"
- self.assertEqual(
- ip_address, wa_shim.get_ip_from_lease_value(encoded_address)
- )
-
- def test_packed_string(self):
- ip_address, encoded_address = "98.76.54.32", "bL6 "
- self.assertEqual(
- ip_address, wa_shim.get_ip_from_lease_value(encoded_address)
- )
-
- def test_packed_string_with_escaped_quote(self):
- ip_address, encoded_address = "100.72.34.108", 'dH\\"l'
- self.assertEqual(
- ip_address, wa_shim.get_ip_from_lease_value(encoded_address)
- )
-
- def test_packed_string_containing_a_colon(self):
- ip_address, encoded_address = "100.72.58.108", "dH:l"
- self.assertEqual(
- ip_address, wa_shim.get_ip_from_lease_value(encoded_address)
+class TestGetIpFromLeaseValue:
+ @pytest.mark.parametrize(
+ "encoded_address,ip_address",
+ [
+ ("62:4c:36:20", "98.76.54.32"),
+ ("4:3:2:1", "4.3.2.1"),
+ ("bL6 ", "98.76.54.32"),
+ ('dH\\"l', "100.72.34.108"),
+ ("dH:l", "100.72.58.108"),
+ ],
+ )
+ def test_get_ip_from_lease_value(self, encoded_address, ip_address):
+ assert (
+ azure_helper.get_ip_from_lease_value(encoded_address) == ip_address
)
@@ -435,7 +357,7 @@ class TestAzureHelperHttpWithRetries(CiTestCase):
def test_http_with_retries(self):
self.m_readurl.return_value = "TestResp"
self.assertEqual(
- azure_helper.http_with_retries("testurl"),
+ azure_helper.http_with_retries("testurl", headers={}),
self.m_readurl.return_value,
)
self.assertEqual(self.m_readurl.call_count, 1)
@@ -444,7 +366,10 @@ class TestAzureHelperHttpWithRetries(CiTestCase):
self.m_readurl.side_effect = SentinelException
self.assertRaises(
- SentinelException, azure_helper.http_with_retries, "testurl"
+ SentinelException,
+ azure_helper.http_with_retries,
+ "testurl",
+ headers={},
)
self.assertEqual(self.m_readurl.call_count, self.max_readurl_attempts)
@@ -471,7 +396,7 @@ class TestAzureHelperHttpWithRetries(CiTestCase):
] * self.periodic_logging_attempts + ["TestResp"]
self.m_readurl.return_value = "TestResp"
- response = azure_helper.http_with_retries("testurl")
+ response = azure_helper.http_with_retries("testurl", headers={})
self.assertEqual(response, self.m_readurl.return_value)
self.assertEqual(
self.m_readurl.call_count, self.periodic_logging_attempts + 1
@@ -489,7 +414,7 @@ class TestAzureHelperHttpWithRetries(CiTestCase):
] * self.periodic_logging_attempts + ["TestResp"]
self.m_readurl.return_value = "TestResp"
- azure_helper.http_with_retries("testurl")
+ azure_helper.http_with_retries("testurl", headers={})
self.assertEqual(
self.m_readurl.call_count, self.periodic_logging_attempts + 1
@@ -517,7 +442,7 @@ class TestAzureHelperHttpWithRetries(CiTestCase):
) + ["TestResp"]
self.m_readurl.return_value = "TestResp"
- azure_helper.http_with_retries("testurl")
+ azure_helper.http_with_retries("testurl", headers={})
self.assertEqual(
self.m_readurl.call_count, self.periodic_logging_attempts
)
@@ -542,49 +467,9 @@ class TestAzureHelperHttpWithRetries(CiTestCase):
kwargs = {
"headers": mock.MagicMock(),
"data": mock.MagicMock(),
- # timeout kwarg should not be modified or deleted if present
- "timeout": mock.MagicMock(),
}
azure_helper.http_with_retries(testurl, **kwargs)
- self.m_readurl.assert_called_once_with(testurl, **kwargs)
-
- def test_http_with_retries_adds_timeout_kwarg_if_not_present(self):
- testurl = mock.MagicMock()
- kwargs = {"headers": mock.MagicMock(), "data": mock.MagicMock()}
- expected_kwargs = copy.deepcopy(kwargs)
- expected_kwargs["timeout"] = self.default_readurl_timeout
-
- azure_helper.http_with_retries(testurl, **kwargs)
- self.m_readurl.assert_called_once_with(testurl, **expected_kwargs)
-
- def test_http_with_retries_deletes_retries_kwargs_passed_in(self):
- """http_with_retries already implements retry logic,
- so url_helper.readurl should not have retries.
- http_with_retries should delete kwargs that
- cause url_helper.readurl to retry.
- """
- testurl = mock.MagicMock()
- kwargs = {
- "headers": mock.MagicMock(),
- "data": mock.MagicMock(),
- "timeout": mock.MagicMock(),
- "retries": mock.MagicMock(),
- "infinite": mock.MagicMock(),
- }
- expected_kwargs = copy.deepcopy(kwargs)
- expected_kwargs.pop("retries", None)
- expected_kwargs.pop("infinite", None)
-
- azure_helper.http_with_retries(testurl, **kwargs)
- self.m_readurl.assert_called_once_with(testurl, **expected_kwargs)
- self.assertIn(
- "retries kwarg passed in for communication with Azure endpoint.",
- self.logs.getvalue(),
- )
- self.assertIn(
- "infinite kwarg passed in for communication with Azure endpoint.",
- self.logs.getvalue(),
- )
+ self.m_readurl.assert_called_once_with(testurl, **kwargs, timeout=5)
class TestOpenSSLManager(CiTestCase):
@@ -1101,9 +986,6 @@ class TestWALinuxAgentShim(CiTestCase):
self.AzureEndpointHttpClient = patches.enter_context(
mock.patch.object(azure_helper, "AzureEndpointHttpClient")
)
- self.find_endpoint = patches.enter_context(
- mock.patch.object(wa_shim, "find_endpoint")
- )
self.GoalState = patches.enter_context(
mock.patch.object(azure_helper, "GoalState")
)
@@ -1122,7 +1004,7 @@ class TestWALinuxAgentShim(CiTestCase):
self.GoalState.return_value.instance_id = self.test_instance_id
def test_eject_iso_is_called(self):
- shim = wa_shim()
+ shim = wa_shim(endpoint="test_endpoint")
with mock.patch.object(
shim, "eject_iso", autospec=True
) as m_eject_iso:
@@ -1130,22 +1012,21 @@ class TestWALinuxAgentShim(CiTestCase):
m_eject_iso.assert_called_once_with("/dev/sr0")
def test_http_client_does_not_use_certificate_for_report_ready(self):
- shim = wa_shim()
+ shim = wa_shim(endpoint="test_endpoint")
shim.register_with_azure_and_fetch_data()
self.assertEqual(
[mock.call(None)], self.AzureEndpointHttpClient.call_args_list
)
def test_http_client_does_not_use_certificate_for_report_failure(self):
- shim = wa_shim()
+ shim = wa_shim(endpoint="test_endpoint")
shim.register_with_azure_and_report_failure(description="TestDesc")
self.assertEqual(
[mock.call(None)], self.AzureEndpointHttpClient.call_args_list
)
def test_correct_url_used_for_goalstate_during_report_ready(self):
- self.find_endpoint.return_value = "test_endpoint"
- shim = wa_shim()
+ shim = wa_shim(endpoint="test_endpoint")
shim.register_with_azure_and_fetch_data()
m_get = self.AzureEndpointHttpClient.return_value.get
self.assertEqual(
@@ -1164,8 +1045,7 @@ class TestWALinuxAgentShim(CiTestCase):
)
def test_correct_url_used_for_goalstate_during_report_failure(self):
- self.find_endpoint.return_value = "test_endpoint"
- shim = wa_shim()
+ shim = wa_shim(endpoint="test_endpoint")
shim.register_with_azure_and_report_failure(description="TestDesc")
m_get = self.AzureEndpointHttpClient.return_value.get
self.assertEqual(
@@ -1187,7 +1067,7 @@ class TestWALinuxAgentShim(CiTestCase):
# if register_with_azure_and_fetch_data() isn't passed some info about
# the user's public keys, there's no point in even trying to parse the
# certificates
- shim = wa_shim()
+ shim = wa_shim(endpoint="test_endpoint")
mypk = [
{"fingerprint": "fp1", "path": "path1"},
{"fingerprint": "fp3", "path": "path3", "value": ""},
@@ -1211,13 +1091,12 @@ class TestWALinuxAgentShim(CiTestCase):
def test_absent_certificates_produces_empty_public_keys(self):
mypk = [{"fingerprint": "fp1", "path": "path1"}]
self.GoalState.return_value.certificates_xml = None
- shim = wa_shim()
+ shim = wa_shim(endpoint="test_endpoint")
data = shim.register_with_azure_and_fetch_data(pubkey_info=mypk)
self.assertEqual([], data)
def test_correct_url_used_for_report_ready(self):
- self.find_endpoint.return_value = "test_endpoint"
- shim = wa_shim()
+ shim = wa_shim(endpoint="test_endpoint")
shim.register_with_azure_and_fetch_data()
expected_url = "http://test_endpoint/machine?comp=health"
self.assertEqual(
@@ -1226,8 +1105,7 @@ class TestWALinuxAgentShim(CiTestCase):
)
def test_correct_url_used_for_report_failure(self):
- self.find_endpoint.return_value = "test_endpoint"
- shim = wa_shim()
+ shim = wa_shim(endpoint="test_endpoint")
shim.register_with_azure_and_report_failure(description="TestDesc")
expected_url = "http://test_endpoint/machine?comp=health"
self.assertEqual(
@@ -1236,7 +1114,7 @@ class TestWALinuxAgentShim(CiTestCase):
)
def test_goal_state_values_used_for_report_ready(self):
- shim = wa_shim()
+ shim = wa_shim(endpoint="test_endpoint")
shim.register_with_azure_and_fetch_data()
posted_document = (
self.AzureEndpointHttpClient.return_value.post.call_args[1]["data"]
@@ -1246,7 +1124,7 @@ class TestWALinuxAgentShim(CiTestCase):
self.assertIn(self.test_instance_id, posted_document)
def test_goal_state_values_used_for_report_failure(self):
- shim = wa_shim()
+ shim = wa_shim(endpoint="test_endpoint")
shim.register_with_azure_and_report_failure(description="TestDesc")
posted_document = (
self.AzureEndpointHttpClient.return_value.post.call_args[1]["data"]
@@ -1256,7 +1134,7 @@ class TestWALinuxAgentShim(CiTestCase):
self.assertIn(self.test_instance_id, posted_document)
def test_xml_elems_in_report_ready_post(self):
- shim = wa_shim()
+ shim = wa_shim(endpoint="test_endpoint")
shim.register_with_azure_and_fetch_data()
health_document = HEALTH_REPORT_XML_TEMPLATE.format(
incarnation=escape(self.test_incarnation),
@@ -1271,7 +1149,7 @@ class TestWALinuxAgentShim(CiTestCase):
self.assertEqual(health_document, posted_document)
def test_xml_elems_in_report_failure_post(self):
- shim = wa_shim()
+ shim = wa_shim(endpoint="test_endpoint")
shim.register_with_azure_and_report_failure(description="TestDesc")
health_document = HEALTH_REPORT_XML_TEMPLATE.format(
incarnation=escape(self.test_incarnation),
@@ -1294,7 +1172,7 @@ class TestWALinuxAgentShim(CiTestCase):
def test_register_with_azure_and_fetch_data_calls_send_ready_signal(
self, m_goal_state_health_reporter
):
- shim = wa_shim()
+ shim = wa_shim(endpoint="test_endpoint")
shim.register_with_azure_and_fetch_data()
self.assertEqual(
1,
@@ -1305,7 +1183,7 @@ class TestWALinuxAgentShim(CiTestCase):
def test_register_with_azure_and_report_failure_calls_send_failure_signal(
self, m_goal_state_health_reporter
):
- shim = wa_shim()
+ shim = wa_shim(endpoint="test_endpoint")
shim.register_with_azure_and_report_failure(description="TestDesc")
m_goal_state_health_reporter.return_value.send_failure_signal.assert_called_once_with( # noqa: E501
description="TestDesc"
@@ -1314,7 +1192,7 @@ class TestWALinuxAgentShim(CiTestCase):
def test_register_with_azure_and_report_failure_does_not_need_certificates(
self,
):
- shim = wa_shim()
+ shim = wa_shim(endpoint="test_endpoint")
with mock.patch.object(
shim, "_fetch_goal_state_from_azure", autospec=True
) as m_fetch_goal_state_from_azure:
@@ -1324,24 +1202,24 @@ class TestWALinuxAgentShim(CiTestCase):
)
def test_clean_up_can_be_called_at_any_time(self):
- shim = wa_shim()
+ shim = wa_shim(endpoint="test_endpoint")
shim.clean_up()
def test_openssl_manager_not_instantiated_by_shim_report_status(self):
- shim = wa_shim()
+ shim = wa_shim(endpoint="test_endpoint")
shim.register_with_azure_and_fetch_data()
shim.register_with_azure_and_report_failure(description="TestDesc")
shim.clean_up()
self.OpenSSLManager.assert_not_called()
def test_clean_up_after_report_ready(self):
- shim = wa_shim()
+ shim = wa_shim(endpoint="test_endpoint")
shim.register_with_azure_and_fetch_data()
shim.clean_up()
self.OpenSSLManager.return_value.clean_up.assert_not_called()
def test_clean_up_after_report_failure(self):
- shim = wa_shim()
+ shim = wa_shim(endpoint="test_endpoint")
shim.register_with_azure_and_report_failure(description="TestDesc")
shim.clean_up()
self.OpenSSLManager.return_value.clean_up.assert_not_called()
@@ -1350,7 +1228,7 @@ class TestWALinuxAgentShim(CiTestCase):
self.AzureEndpointHttpClient.return_value.get.side_effect = (
SentinelException
)
- shim = wa_shim()
+ shim = wa_shim(endpoint="test_endpoint")
self.assertRaises(
SentinelException, shim.register_with_azure_and_fetch_data
)
@@ -1359,7 +1237,7 @@ class TestWALinuxAgentShim(CiTestCase):
self.AzureEndpointHttpClient.return_value.get.side_effect = (
SentinelException
)
- shim = wa_shim()
+ shim = wa_shim(endpoint="test_endpoint")
self.assertRaises(
SentinelException,
shim.register_with_azure_and_report_failure,
@@ -1368,7 +1246,7 @@ class TestWALinuxAgentShim(CiTestCase):
def test_fetch_goalstate_during_report_ready_raises_exc_on_parse_exc(self):
self.GoalState.side_effect = SentinelException
- shim = wa_shim()
+ shim = wa_shim(endpoint="test_endpoint")
self.assertRaises(
SentinelException, shim.register_with_azure_and_fetch_data
)
@@ -1377,7 +1255,7 @@ class TestWALinuxAgentShim(CiTestCase):
self,
):
self.GoalState.side_effect = SentinelException
- shim = wa_shim()
+ shim = wa_shim(endpoint="test_endpoint")
self.assertRaises(
SentinelException,
shim.register_with_azure_and_report_failure,
@@ -1388,7 +1266,7 @@ class TestWALinuxAgentShim(CiTestCase):
self.AzureEndpointHttpClient.return_value.post.side_effect = (
SentinelException
)
- shim = wa_shim()
+ shim = wa_shim(endpoint="test_endpoint")
self.assertRaises(
SentinelException, shim.register_with_azure_and_fetch_data
)
@@ -1397,7 +1275,7 @@ class TestWALinuxAgentShim(CiTestCase):
self.AzureEndpointHttpClient.return_value.post.side_effect = (
SentinelException
)
- shim = wa_shim()
+ shim = wa_shim(endpoint="test_endpoint")
self.assertRaises(
SentinelException,
shim.register_with_azure_and_report_failure,
@@ -1416,14 +1294,14 @@ class TestGetMetadataGoalStateXMLAndReportReadyToFabric(CiTestCase):
)
def test_data_from_shim_returned(self):
- ret = azure_helper.get_metadata_from_fabric()
+ ret = azure_helper.get_metadata_from_fabric(endpoint="test_endpoint")
self.assertEqual(
self.m_shim.return_value.register_with_azure_and_fetch_data.return_value, # noqa: E501
ret,
)
def test_success_calls_clean_up(self):
- azure_helper.get_metadata_from_fabric()
+ azure_helper.get_metadata_from_fabric(endpoint="test_endpoint")
self.assertEqual(1, self.m_shim.return_value.clean_up.call_count)
def test_failure_in_registration_propagates_exc_and_calls_clean_up(self):
@@ -1431,14 +1309,18 @@ class TestGetMetadataGoalStateXMLAndReportReadyToFabric(CiTestCase):
SentinelException
)
self.assertRaises(
- SentinelException, azure_helper.get_metadata_from_fabric
+ SentinelException,
+ azure_helper.get_metadata_from_fabric,
+ "test_endpoint",
)
self.assertEqual(1, self.m_shim.return_value.clean_up.call_count)
def test_calls_shim_register_with_azure_and_fetch_data(self):
m_pubkey_info = mock.MagicMock()
azure_helper.get_metadata_from_fabric(
- pubkey_info=m_pubkey_info, iso_dev="/dev/sr0"
+ endpoint="test_endpoint",
+ pubkey_info=m_pubkey_info,
+ iso_dev="/dev/sr0",
)
self.assertEqual(
1,
@@ -1450,17 +1332,10 @@ class TestGetMetadataGoalStateXMLAndReportReadyToFabric(CiTestCase):
)
def test_instantiates_shim_with_kwargs(self):
- m_fallback_lease_file = mock.MagicMock()
- m_dhcp_options = mock.MagicMock()
- azure_helper.get_metadata_from_fabric(
- fallback_lease_file=m_fallback_lease_file, dhcp_opts=m_dhcp_options
- )
+ azure_helper.get_metadata_from_fabric(endpoint="test_endpoint")
self.assertEqual(1, self.m_shim.call_count)
self.assertEqual(
- mock.call(
- fallback_lease_file=m_fallback_lease_file,
- dhcp_options=m_dhcp_options,
- ),
+ mock.call(endpoint="test_endpoint"),
self.m_shim.call_args,
)
@@ -1478,7 +1353,7 @@ class TestGetMetadataGoalStateXMLAndReportFailureToFabric(CiTestCase):
)
def test_success_calls_clean_up(self):
- azure_helper.report_failure_to_fabric()
+ azure_helper.report_failure_to_fabric(endpoint="test_endpoint")
self.assertEqual(1, self.m_shim.return_value.clean_up.call_count)
def test_failure_in_shim_report_failure_propagates_exc_and_calls_clean_up(
@@ -1488,14 +1363,18 @@ class TestGetMetadataGoalStateXMLAndReportFailureToFabric(CiTestCase):
SentinelException
)
self.assertRaises(
- SentinelException, azure_helper.report_failure_to_fabric
+ SentinelException,
+ azure_helper.report_failure_to_fabric,
+ "test_endpoint",
)
self.assertEqual(1, self.m_shim.return_value.clean_up.call_count)
def test_report_failure_to_fabric_with_desc_calls_shim_report_failure(
self,
):
- azure_helper.report_failure_to_fabric(description="TestDesc")
+ azure_helper.report_failure_to_fabric(
+ endpoint="test_endpoint", description="TestDesc"
+ )
self.m_shim.return_value.register_with_azure_and_report_failure.assert_called_once_with( # noqa: E501
description="TestDesc"
)
@@ -1503,7 +1382,7 @@ class TestGetMetadataGoalStateXMLAndReportFailureToFabric(CiTestCase):
def test_report_failure_to_fabric_with_no_desc_calls_shim_report_failure(
self,
):
- azure_helper.report_failure_to_fabric()
+ azure_helper.report_failure_to_fabric(endpoint="test_endpoint")
# default err message description should be shown to the user
# if no description is passed in
self.m_shim.return_value.register_with_azure_and_report_failure.assert_called_once_with( # noqa: E501
@@ -1515,7 +1394,9 @@ class TestGetMetadataGoalStateXMLAndReportFailureToFabric(CiTestCase):
def test_report_failure_to_fabric_empty_desc_calls_shim_report_failure(
self,
):
- azure_helper.report_failure_to_fabric(description="")
+ azure_helper.report_failure_to_fabric(
+ endpoint="test_endpoint", description=""
+ )
# default err message description should be shown to the user
# if an empty description is passed in
self.m_shim.return_value.register_with_azure_and_report_failure.assert_called_once_with( # noqa: E501
@@ -1525,84 +1406,11 @@ class TestGetMetadataGoalStateXMLAndReportFailureToFabric(CiTestCase):
)
def test_instantiates_shim_with_kwargs(self):
- m_fallback_lease_file = mock.MagicMock()
- m_dhcp_options = mock.MagicMock()
azure_helper.report_failure_to_fabric(
- fallback_lease_file=m_fallback_lease_file, dhcp_opts=m_dhcp_options
+ endpoint="test_endpoint",
)
self.m_shim.assert_called_once_with(
- fallback_lease_file=m_fallback_lease_file,
- dhcp_options=m_dhcp_options,
- )
-
-
-class TestExtractIpAddressFromNetworkd(CiTestCase):
-
- azure_lease = dedent(
- """\
- # This is private data. Do not parse.
- ADDRESS=10.132.0.5
- NETMASK=255.255.255.255
- ROUTER=10.132.0.1
- SERVER_ADDRESS=169.254.169.254
- NEXT_SERVER=10.132.0.1
- MTU=1460
- T1=43200
- T2=75600
- LIFETIME=86400
- DNS=169.254.169.254
- NTP=169.254.169.254
- DOMAINNAME=c.ubuntu-foundations.internal
- DOMAIN_SEARCH_LIST=c.ubuntu-foundations.internal google.internal
- HOSTNAME=tribaal-test-171002-1349.c.ubuntu-foundations.internal
- ROUTES=10.132.0.1/32,0.0.0.0 0.0.0.0/0,10.132.0.1
- CLIENTID=ff405663a200020000ab11332859494d7a8b4c
- OPTION_245=624c3620
- """
- )
-
- def setUp(self):
- super(TestExtractIpAddressFromNetworkd, self).setUp()
- self.lease_d = self.tmp_dir()
-
- def test_no_valid_leases_is_none(self):
- """No valid leases should return None."""
- self.assertIsNone(
- wa_shim._networkd_get_value_from_leases(self.lease_d)
- )
-
- def test_option_245_is_found_in_single(self):
- """A single valid lease with 245 option should return it."""
- populate_dir(self.lease_d, {"9": self.azure_lease})
- self.assertEqual(
- "624c3620", wa_shim._networkd_get_value_from_leases(self.lease_d)
- )
-
- def test_option_245_not_found_returns_None(self):
- """A valid lease, but no option 245 should return None."""
- populate_dir(
- self.lease_d,
- {"9": self.azure_lease.replace("OPTION_245", "OPTION_999")},
- )
- self.assertIsNone(
- wa_shim._networkd_get_value_from_leases(self.lease_d)
- )
-
- def test_multiple_returns_first(self):
- """Somewhat arbitrarily return the first address when multiple.
-
- Most important at the moment is that this is consistent behavior
- rather than changing randomly as in order of a dictionary."""
- myval = "624c3601"
- populate_dir(
- self.lease_d,
- {
- "9": self.azure_lease,
- "2": self.azure_lease.replace("624c3620", myval),
- },
- )
- self.assertEqual(
- myval, wa_shim._networkd_get_value_from_leases(self.lease_d)
+ endpoint="test_endpoint",
)
diff --git a/tests/unittests/sources/test_cloudsigma.py b/tests/unittests/sources/test_cloudsigma.py
index a2f26245..8cd58c96 100644
--- a/tests/unittests/sources/test_cloudsigma.py
+++ b/tests/unittests/sources/test_cloudsigma.py
@@ -3,8 +3,8 @@
import copy
from cloudinit import distros, helpers, sources
-from cloudinit.cs_utils import Cepko
from cloudinit.sources import DataSourceCloudSigma
+from cloudinit.sources.helpers.cloudsigma import Cepko
from tests.unittests import helpers as test_helpers
SERVER_CONTEXT = {
diff --git a/tests/unittests/sources/test_common.py b/tests/unittests/sources/test_common.py
index a5bdb629..a78eaccb 100644
--- a/tests/unittests/sources/test_common.py
+++ b/tests/unittests/sources/test_common.py
@@ -106,7 +106,7 @@ class TestDataSourceInvariants(test_helpers.TestCase):
" {}".format(str(ds), cfg_src)
)
self.assertTrue(
- hasattr(sources.NetworkConfigSource, cfg_src), fail_msg
+ isinstance(cfg_src, sources.NetworkConfigSource), fail_msg
)
def test_expected_dsname_defined(self):
diff --git a/tests/unittests/sources/test_configdrive.py b/tests/unittests/sources/test_configdrive.py
index 1fc40a0e..83f4621b 100644
--- a/tests/unittests/sources/test_configdrive.py
+++ b/tests/unittests/sources/test_configdrive.py
@@ -35,6 +35,12 @@ EC2_META = {
"security-groups": ["default"],
}
USER_DATA = b"#!/bin/sh\necho This is user data\n"
+VENDOR_DATA = {
+ "magic": "",
+}
+VENDOR_DATA2 = {
+ "static": "",
+}
OSTACK_META = {
"availability_zone": "nova",
"files": [
@@ -363,10 +369,14 @@ CFG_DRIVE_FILES_V2 = {
"openstack/content/0001": CONTENT_1,
"openstack/latest/meta_data.json": json.dumps(OSTACK_META),
"openstack/latest/user_data": USER_DATA,
+ "openstack/latest/vendor_data.json": json.dumps(VENDOR_DATA),
+ "openstack/latest/vendor_data2.json": json.dumps(VENDOR_DATA2),
"openstack/latest/network_data.json": json.dumps(NETWORK_DATA),
"openstack/2015-10-15/meta_data.json": json.dumps(OSTACK_META),
"openstack/2015-10-15/user_data": USER_DATA,
"openstack/2015-10-15/network_data.json": json.dumps(NETWORK_DATA),
+ "openstack/2015-10-15/vendor_data.json": json.dumps(VENDOR_DATA),
+ "openstack/2015-10-15/vendor_data2.json": json.dumps(VENDOR_DATA2),
}
M_PATH = "cloudinit.sources.DataSourceConfigDrive."
@@ -531,6 +541,8 @@ class TestConfigDriveDataSource(CiTestCase):
self.assertEqual(USER_DATA, found["userdata"])
self.assertEqual(expected_md, found["metadata"])
self.assertEqual(NETWORK_DATA, found["networkdata"])
+ self.assertEqual(VENDOR_DATA, found["vendordata"])
+ self.assertEqual(VENDOR_DATA2, found["vendordata2"])
self.assertEqual(found["files"]["/etc/foo.cfg"], CONTENT_0)
self.assertEqual(found["files"]["/etc/bar/bar.cfg"], CONTENT_1)
@@ -591,11 +603,10 @@ class TestConfigDriveDataSource(CiTestCase):
def my_is_partition(dev):
return dev[-1] in "0123456789" and not dev.startswith("sr")
+ orig_find_devs_with = util.find_devs_with
+ orig_is_partition = util.is_partition
try:
- orig_find_devs_with = util.find_devs_with
util.find_devs_with = my_devs_with
-
- orig_is_partition = util.is_partition
util.is_partition = my_is_partition
devs_with_answers = {
@@ -1058,6 +1069,8 @@ def populate_ds_from_read_config(cfg_ds, source, results):
cfg_ds.metadata = results.get("metadata")
cfg_ds.ec2_metadata = results.get("ec2-metadata")
cfg_ds.userdata_raw = results.get("userdata")
+ cfg_ds.vendordata_raw = results.get("vendordata")
+ cfg_ds.vendordata2_raw = results.get("vendordata2")
cfg_ds.version = results.get("version")
cfg_ds.network_json = results.get("networkdata")
cfg_ds._network_config = openstack.convert_net_json(
diff --git a/tests/unittests/sources/test_ec2.py b/tests/unittests/sources/test_ec2.py
index b376660d..e5648007 100644
--- a/tests/unittests/sources/test_ec2.py
+++ b/tests/unittests/sources/test_ec2.py
@@ -2,10 +2,11 @@
import copy
import json
+import threading
from unittest import mock
-import httpretty
import requests
+import responses
from cloudinit import helpers
from cloudinit.sources import DataSourceEc2 as ec2
@@ -38,7 +39,7 @@ DYNAMIC_METADATA = {
# collected from api version 2016-09-02/ with
# python3 -c 'import json
-# from cloudinit.ec2_utils import get_instance_metadata as gm
+# from cloudinit.sources.helpers.ec2 import get_instance_metadata as gm
# print(json.dumps(gm("2016-09-02"), indent=1, sort_keys=True))'
# Note that the MAC addresses have been modified to sort in the opposite order
# to the device-number attribute, to test LP: #1876312
@@ -123,7 +124,7 @@ DEFAULT_METADATA = {
# collected from api version 2018-09-24/ with
# python3 -c 'import json
-# from cloudinit.ec2_utils import get_instance_metadata as gm
+# from cloudinit.sources.helpers.ec2 import get_instance_metadata as gm
# print(json.dumps(gm("2018-09-24"), indent=1, sort_keys=True))'
NIC1_MD_IPV4_IPV6_MULTI_IP = {
@@ -210,6 +211,17 @@ SECONDARY_IP_METADATA_2018_09_24 = {
M_PATH_NET = "cloudinit.sources.DataSourceEc2.net."
+TAGS_METADATA_2021_03_23 = {
+ **DEFAULT_METADATA,
+ "tags": {
+ "instance": {
+ "Environment": "production",
+ "Application": "test",
+ "TagWithoutValue": "",
+ }
+ },
+}
+
def _register_ssh_keys(rfunc, base_url, keys_data):
"""handle ssh key inconsistencies.
@@ -289,9 +301,10 @@ def register_mock_metaserver(base_url, data):
register(base_url, "not found", status=404)
def myreg(*argc, **kwargs):
- url = argc[0]
- method = httpretty.PUT if ec2.API_TOKEN_ROUTE in url else httpretty.GET
- return httpretty.register_uri(method, *argc, **kwargs)
+ url, body = argc
+ method = responses.PUT if ec2.API_TOKEN_ROUTE in url else responses.GET
+ status = kwargs.get("status", 200)
+ return responses.add(method, url, body, status=status)
register_helper(myreg, base_url, data)
@@ -328,6 +341,15 @@ class TestEc2(test_helpers.HttprettyTestCase):
if sys_cfg is None:
sys_cfg = {}
ds = self.datasource(sys_cfg=sys_cfg, distro=distro, paths=paths)
+ event = threading.Event()
+ p = mock.patch("time.sleep", event.wait)
+ p.start()
+
+ def _mock_sleep():
+ event.set()
+ p.stop()
+
+ self.addCleanup(_mock_sleep)
if not md_version:
md_version = ds.min_metadata_version
if platform_data is not None:
@@ -371,6 +393,7 @@ class TestEc2(test_helpers.HttprettyTestCase):
register_mock_metaserver(instance_id_url, None)
return ds
+ @responses.activate
def test_network_config_property_returns_version_2_network_data(self):
"""network_config property returns network version 2 for metadata"""
ds = self._setup_ds(
@@ -405,6 +428,7 @@ class TestEc2(test_helpers.HttprettyTestCase):
m_get_mac.return_value = mac1
self.assertEqual(expected, ds.network_config)
+ @responses.activate
def test_network_config_property_set_dhcp4(self):
"""network_config property configures dhcp4 on nics with local-ipv4s.
@@ -443,6 +467,7 @@ class TestEc2(test_helpers.HttprettyTestCase):
m_get_mac.return_value = mac1
self.assertEqual(expected, ds.network_config)
+ @responses.activate
def test_network_config_property_secondary_private_ips(self):
"""network_config property configures any secondary ipv4 addresses.
@@ -486,6 +511,7 @@ class TestEc2(test_helpers.HttprettyTestCase):
m_get_mac.return_value = mac1
self.assertEqual(expected, ds.network_config)
+ @responses.activate
def test_network_config_property_is_cached_in_datasource(self):
"""network_config property is cached in DataSourceEc2."""
ds = self._setup_ds(
@@ -497,6 +523,7 @@ class TestEc2(test_helpers.HttprettyTestCase):
self.assertEqual({"cached": "data"}, ds.network_config)
@mock.patch("cloudinit.net.dhcp.maybe_perform_dhcp_discovery")
+ @responses.activate
def test_network_config_cached_property_refreshed_on_upgrade(self, m_dhcp):
"""Refresh the network_config Ec2 cache if network key is absent.
@@ -511,6 +538,23 @@ class TestEc2(test_helpers.HttprettyTestCase):
md={"md": old_metadata},
)
self.assertTrue(ds.get_data())
+
+ # Workaround https://github.com/getsentry/responses/issues/212
+ if hasattr(responses.mock, "_urls"):
+ # Can be removed when Bionic is EOL
+ for index, url in enumerate(responses.mock._urls):
+ if url["url"].startswith(
+ "http://169.254.169.254/2009-04-04/meta-data/"
+ ):
+ del responses.mock._urls[index]
+ elif hasattr(responses.mock, "_matches"):
+ # Can be removed when Focal and Impish are EOL
+ for index, response in enumerate(responses.mock._matches):
+ if response.url.startswith(
+ "http://169.254.169.254/2009-04-04/meta-data/"
+ ):
+ del responses.mock._matches[index]
+
# Provide new revision of metadata that contains network data
register_mock_metaserver(
"http://169.254.169.254/2009-04-04/meta-data/", DEFAULT_METADATA
@@ -539,6 +583,7 @@ class TestEc2(test_helpers.HttprettyTestCase):
}
self.assertEqual(expected, ds.network_config)
+ @responses.activate
def test_ec2_get_instance_id_refreshes_identity_on_upgrade(self):
"""get_instance-id gets DataSourceEc2Local.identity if not present.
@@ -558,10 +603,11 @@ class TestEc2(test_helpers.HttprettyTestCase):
] + ds.extended_metadata_versions
for ver in all_versions[:-1]:
register_mock_metaserver(
- "http://169.254.169.254/{0}/meta-data/instance-id".format(ver),
+ "http://[fd00:ec2::254]/{0}/meta-data/instance-id".format(ver),
None,
)
- ds.metadata_address = "http://169.254.169.254"
+
+ ds.metadata_address = "http://[fd00:ec2::254]"
register_mock_metaserver(
"{0}/{1}/meta-data/".format(ds.metadata_address, all_versions[-1]),
DEFAULT_METADATA,
@@ -576,6 +622,7 @@ class TestEc2(test_helpers.HttprettyTestCase):
ds.metadata = DEFAULT_METADATA
self.assertEqual("my-identity-id", ds.get_instance_id())
+ @responses.activate
def test_classic_instance_true(self):
"""If no vpc-id in metadata, is_classic_instance must return true."""
md_copy = copy.deepcopy(DEFAULT_METADATA)
@@ -592,6 +639,7 @@ class TestEc2(test_helpers.HttprettyTestCase):
self.assertTrue(ds.get_data())
self.assertTrue(ds.is_classic_instance())
+ @responses.activate
def test_classic_instance_false(self):
"""If vpc-id in metadata, is_classic_instance must return false."""
ds = self._setup_ds(
@@ -602,6 +650,7 @@ class TestEc2(test_helpers.HttprettyTestCase):
self.assertTrue(ds.get_data())
self.assertFalse(ds.is_classic_instance())
+ @responses.activate
def test_aws_inaccessible_imds_service_fails_with_retries(self):
"""Inaccessibility of http://169.254.169.254 are retried."""
ds = self._setup_ds(
@@ -618,15 +667,37 @@ class TestEc2(test_helpers.HttprettyTestCase):
mock_success.ok.return_value = True
with mock.patch("cloudinit.url_helper.readurl") as m_readurl:
- m_readurl.side_effect = (conn_error, conn_error, mock_success)
+ # yikes, this endpoint needs help
+ m_readurl.side_effect = (
+ conn_error,
+ conn_error,
+ conn_error,
+ conn_error,
+ conn_error,
+ conn_error,
+ conn_error,
+ conn_error,
+ conn_error,
+ conn_error,
+ conn_error,
+ conn_error,
+ conn_error,
+ conn_error,
+ conn_error,
+ conn_error,
+ conn_error,
+ conn_error,
+ mock_success,
+ )
with mock.patch("cloudinit.url_helper.time.sleep"):
self.assertTrue(ds.wait_for_metadata_service())
# Just one /latest/api/token request
- self.assertEqual(3, len(m_readurl.call_args_list))
+ self.assertEqual(19, len(m_readurl.call_args_list))
for readurl_call in m_readurl.call_args_list:
self.assertIn("latest/api/token", readurl_call[0][0])
+ @responses.activate
def test_aws_token_403_fails_without_retries(self):
"""Verify that 403s fetching AWS tokens are not retried."""
ds = self._setup_ds(
@@ -634,27 +705,21 @@ class TestEc2(test_helpers.HttprettyTestCase):
sys_cfg={"datasource": {"Ec2": {"strict_id": False}}},
md=None,
)
+
token_url = self.data_url("latest", data_item="api/token")
- httpretty.register_uri(httpretty.PUT, token_url, body={}, status=403)
+ responses.add(responses.PUT, token_url, status=403)
self.assertFalse(ds.get_data())
# Just one /latest/api/token request
logs = self.logs.getvalue()
- failed_put_log = '"PUT /latest/api/token HTTP/1.1" 403 0'
expected_logs = [
"WARNING: Ec2 IMDS endpoint returned a 403 error. HTTP endpoint is"
" disabled. Aborting.",
"WARNING: IMDS's HTTP endpoint is probably disabled",
- failed_put_log,
]
for log in expected_logs:
self.assertIn(log, logs)
- self.assertEqual(
- 1,
- len(
- [line for line in logs.splitlines() if failed_put_log in line]
- ),
- )
+ @responses.activate
def test_aws_token_redacted(self):
"""Verify that aws tokens are redacted when logged."""
ds = self._setup_ds(
@@ -670,9 +735,10 @@ class TestEc2(test_helpers.HttprettyTestCase):
logs_with_redacted = [log for log in all_logs if REDACT_TOK in log]
logs_with_token = [log for log in all_logs if "API-TOKEN" in log]
self.assertEqual(1, len(logs_with_redacted_ttl))
- self.assertEqual(81, len(logs_with_redacted))
+ self.assertEqual(83, len(logs_with_redacted))
self.assertEqual(0, len(logs_with_token))
+ @responses.activate
@mock.patch("cloudinit.net.dhcp.maybe_perform_dhcp_discovery")
def test_valid_platform_with_strict_true(self, m_dhcp):
"""Valid platform data should return true with strict_id true."""
@@ -688,6 +754,7 @@ class TestEc2(test_helpers.HttprettyTestCase):
self.assertEqual("ec2", ds.platform_type)
self.assertEqual("metadata (%s)" % ds.metadata_address, ds.subplatform)
+ @responses.activate
def test_valid_platform_with_strict_false(self):
"""Valid platform data should return true with strict_id false."""
ds = self._setup_ds(
@@ -698,6 +765,7 @@ class TestEc2(test_helpers.HttprettyTestCase):
ret = ds.get_data()
self.assertTrue(ret)
+ @responses.activate
def test_unknown_platform_with_strict_true(self):
"""Unknown platform data with strict_id true should return False."""
uuid = "ab439480-72bf-11d3-91fc-b8aded755F9a"
@@ -709,6 +777,7 @@ class TestEc2(test_helpers.HttprettyTestCase):
ret = ds.get_data()
self.assertFalse(ret)
+ @responses.activate
def test_unknown_platform_with_strict_false(self):
"""Unknown platform data with strict_id false should return True."""
uuid = "ab439480-72bf-11d3-91fc-b8aded755F9a"
@@ -720,6 +789,7 @@ class TestEc2(test_helpers.HttprettyTestCase):
ret = ds.get_data()
self.assertTrue(ret)
+ @responses.activate
def test_ec2_local_returns_false_on_non_aws(self):
"""DataSourceEc2Local returns False when platform is not AWS."""
self.datasource = ec2.DataSourceEc2Local
@@ -747,6 +817,7 @@ class TestEc2(test_helpers.HttprettyTestCase):
self.assertIn(message, self.logs.getvalue())
@mock.patch("cloudinit.sources.DataSourceEc2.util.is_FreeBSD")
+ @responses.activate
def test_ec2_local_returns_false_on_bsd(self, m_is_freebsd):
"""DataSourceEc2Local returns False on BSD.
@@ -770,6 +841,7 @@ class TestEc2(test_helpers.HttprettyTestCase):
@mock.patch("cloudinit.net.find_fallback_nic")
@mock.patch("cloudinit.net.dhcp.maybe_perform_dhcp_discovery")
@mock.patch("cloudinit.sources.DataSourceEc2.util.is_FreeBSD")
+ @responses.activate
def test_ec2_local_performs_dhcp_on_non_bsd(
self, m_is_bsd, m_dhcp, m_fallback_nic, m_net
):
@@ -811,6 +883,20 @@ class TestEc2(test_helpers.HttprettyTestCase):
)
self.assertIn("Crawl of metadata service took", self.logs.getvalue())
+ @responses.activate
+ def test_get_instance_tags(self):
+ ds = self._setup_ds(
+ platform_data=self.valid_platform_data,
+ sys_cfg={"datasource": {"Ec2": {"strict_id": False}}},
+ md={"md": TAGS_METADATA_2021_03_23},
+ )
+ self.assertTrue(ds.get_data())
+ self.assertIn("tags", ds.metadata)
+ self.assertIn("instance", ds.metadata["tags"])
+ instance_tags = ds.metadata["tags"]["instance"]
+ self.assertEqual(instance_tags["Application"], "test")
+ self.assertEqual(instance_tags["Environment"], "production")
+
class TestGetSecondaryAddresses(test_helpers.CiTestCase):
diff --git a/tests/unittests/sources/test_nocloud.py b/tests/unittests/sources/test_nocloud.py
index 1f6b722d..15b25196 100644
--- a/tests/unittests/sources/test_nocloud.py
+++ b/tests/unittests/sources/test_nocloud.py
@@ -7,10 +7,7 @@ import yaml
from cloudinit import dmi, helpers, util
from cloudinit.sources.DataSourceNoCloud import DataSourceNoCloud as dsNoCloud
-from cloudinit.sources.DataSourceNoCloud import (
- _maybe_remove_top_network,
- parse_cmdline_data,
-)
+from cloudinit.sources.DataSourceNoCloud import parse_cmdline_data
from tests.unittests.helpers import CiTestCase, ExitStack, mock, populate_dir
@@ -253,25 +250,6 @@ class TestNoCloudDataSource(CiTestCase):
self.assertTrue(ret)
self.assertEqual(netconf, dsrc.network_config)
- def test_metadata_network_config_with_toplevel_network(self, m_is_lxd):
- """network-config may have 'network' top level key."""
- netconf = {"config": "disabled"}
- populate_dir(
- os.path.join(self.paths.seed_dir, "nocloud"),
- {
- "user-data": b"ud",
- "meta-data": "instance-id: IID\n",
- "network-config": yaml.dump({"network": netconf}) + "\n",
- },
- )
-
- sys_cfg = {"datasource": {"NoCloud": {"fs_label": None}}}
-
- dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths)
- ret = dsrc.get_data()
- self.assertTrue(ret)
- self.assertEqual(netconf, dsrc.network_config)
-
def test_metadata_network_config_over_interfaces(self, m_is_lxd):
# network-config should override meta-data/network-interfaces
gateway = "103.225.10.1"
@@ -406,48 +384,3 @@ class TestParseCommandLineData(CiTestCase):
ret = parse_cmdline_data(ds_id=ds_id, fill=fill, cmdline=cmdline)
self.assertEqual(fill, {})
self.assertFalse(ret)
-
-
-class TestMaybeRemoveToplevelNetwork(CiTestCase):
- """test _maybe_remove_top_network function."""
-
- basecfg = [
- {
- "type": "physical",
- "name": "interface0",
- "subnets": [{"type": "dhcp"}],
- }
- ]
-
- def test_should_remove_safely(self):
- mcfg = {"config": self.basecfg, "version": 1}
- self.assertEqual(mcfg, _maybe_remove_top_network({"network": mcfg}))
-
- def test_no_remove_if_other_keys(self):
- """should not shift if other keys at top level."""
- mcfg = {
- "network": {"config": self.basecfg, "version": 1},
- "unknown_keyname": "keyval",
- }
- self.assertEqual(mcfg, _maybe_remove_top_network(mcfg))
-
- def test_no_remove_if_non_dict(self):
- """should not shift if not a dict."""
- mcfg = {"network": '"content here'}
- self.assertEqual(mcfg, _maybe_remove_top_network(mcfg))
-
- def test_no_remove_if_missing_config_or_version(self):
- """should not shift unless network entry has config and version."""
- mcfg = {"network": {"config": self.basecfg}}
- self.assertEqual(mcfg, _maybe_remove_top_network(mcfg))
-
- mcfg = {"network": {"version": 1}}
- self.assertEqual(mcfg, _maybe_remove_top_network(mcfg))
-
- def test_remove_with_config_disabled(self):
- """network/config=disabled should be shifted."""
- mcfg = {"config": "disabled"}
- self.assertEqual(mcfg, _maybe_remove_top_network({"network": mcfg}))
-
-
-# vi: ts=4 expandtab
diff --git a/tests/unittests/sources/test_oracle.py b/tests/unittests/sources/test_oracle.py
index b3e6f10c..b7b16952 100644
--- a/tests/unittests/sources/test_oracle.py
+++ b/tests/unittests/sources/test_oracle.py
@@ -920,14 +920,12 @@ class TestNetworkConfig:
assert network_config == m_read_initramfs_config.return_value
assert "Failed to parse secondary network configuration" in caplog.text
- def test_ds_network_cfg_order(self, _m):
- """Ensure that DS net config is preferred over initramfs config
- but less than system config."""
+ def test_ds_network_cfg_preferred_over_initramfs(self, _m):
+ """Ensure that DS net config is preferred over initramfs config"""
config_sources = oracle.DataSourceOracle.network_config_sources
- system_idx = config_sources.index(NetworkConfigSource.system_cfg)
- ds_idx = config_sources.index(NetworkConfigSource.ds)
- initramfs_idx = config_sources.index(NetworkConfigSource.initramfs)
- assert system_idx < ds_idx < initramfs_idx
+ ds_idx = config_sources.index(NetworkConfigSource.DS)
+ initramfs_idx = config_sources.index(NetworkConfigSource.INITRAMFS)
+ assert ds_idx < initramfs_idx
# vi: ts=4 expandtab
diff --git a/tests/unittests/sources/test_scaleway.py b/tests/unittests/sources/test_scaleway.py
index d7e8b969..52bcbc17 100644
--- a/tests/unittests/sources/test_scaleway.py
+++ b/tests/unittests/sources/test_scaleway.py
@@ -444,8 +444,14 @@ class TestDataSourceScaleway(HttprettyTestCase):
{
"type": "static",
"address": "2000:abc:4444:9876::42:999",
- "gateway": "2000:abc:4444:9876::42:000",
"netmask": "127",
+ "routes": [
+ {
+ "gateway": "2000:abc:4444:9876::42:000",
+ "network": "::",
+ "prefix": "0",
+ }
+ ],
},
],
}
diff --git a/tests/unittests/sources/test_vmware.py b/tests/unittests/sources/test_vmware.py
index dd331349..3579041a 100644
--- a/tests/unittests/sources/test_vmware.py
+++ b/tests/unittests/sources/test_vmware.py
@@ -7,6 +7,7 @@
import base64
import gzip
import os
+from contextlib import ExitStack
import pytest
@@ -59,13 +60,26 @@ runcmd:
@pytest.fixture(autouse=True)
def common_patches():
- with mock.patch("cloudinit.util.platform.platform", return_value="Linux"):
- with mock.patch.multiple(
+ mocks = [
+ mock.patch("cloudinit.util.platform.platform", return_value="Linux"),
+ mock.patch.multiple(
"cloudinit.dmi",
is_container=mock.Mock(return_value=False),
is_FreeBSD=mock.Mock(return_value=False),
- ):
- yield
+ ),
+ mock.patch(
+ "cloudinit.sources.DataSourceVMware.netifaces.interfaces",
+ return_value=[],
+ ),
+ mock.patch(
+ "cloudinit.sources.DataSourceVMware.getfqdn",
+ return_value="host.cloudinit.test",
+ ),
+ ]
+ with ExitStack() as stack:
+ for some_mock in mocks:
+ stack.enter_context(some_mock)
+ yield
class TestDataSourceVMware(CiTestCase):
@@ -83,13 +97,49 @@ class TestDataSourceVMware(CiTestCase):
ret = ds.get_data()
self.assertFalse(ret)
- def test_get_host_info(self):
+ @mock.patch("cloudinit.sources.DataSourceVMware.get_default_ip_addrs")
+ def test_get_host_info_ipv4(self, m_fn_ipaddr):
+ m_fn_ipaddr.return_value = ("10.10.10.1", None)
host_info = DataSourceVMware.get_host_info()
self.assertTrue(host_info)
self.assertTrue(host_info["hostname"])
+ self.assertTrue(host_info["hostname"] == "host.cloudinit.test")
self.assertTrue(host_info["local-hostname"])
self.assertTrue(host_info["local_hostname"])
self.assertTrue(host_info[DataSourceVMware.LOCAL_IPV4])
+ self.assertTrue(host_info[DataSourceVMware.LOCAL_IPV4] == "10.10.10.1")
+ self.assertFalse(host_info.get(DataSourceVMware.LOCAL_IPV6))
+
+ @mock.patch("cloudinit.sources.DataSourceVMware.get_default_ip_addrs")
+ def test_get_host_info_ipv6(self, m_fn_ipaddr):
+ m_fn_ipaddr.return_value = (None, "2001:db8::::::8888")
+ host_info = DataSourceVMware.get_host_info()
+ self.assertTrue(host_info)
+ self.assertTrue(host_info["hostname"])
+ self.assertTrue(host_info["hostname"] == "host.cloudinit.test")
+ self.assertTrue(host_info["local-hostname"])
+ self.assertTrue(host_info["local_hostname"])
+ self.assertTrue(host_info[DataSourceVMware.LOCAL_IPV6])
+ self.assertTrue(
+ host_info[DataSourceVMware.LOCAL_IPV6] == "2001:db8::::::8888"
+ )
+ self.assertFalse(host_info.get(DataSourceVMware.LOCAL_IPV4))
+
+ @mock.patch("cloudinit.sources.DataSourceVMware.get_default_ip_addrs")
+ def test_get_host_info_dual(self, m_fn_ipaddr):
+ m_fn_ipaddr.return_value = ("10.10.10.1", "2001:db8::::::8888")
+ host_info = DataSourceVMware.get_host_info()
+ self.assertTrue(host_info)
+ self.assertTrue(host_info["hostname"])
+ self.assertTrue(host_info["hostname"] == "host.cloudinit.test")
+ self.assertTrue(host_info["local-hostname"])
+ self.assertTrue(host_info["local_hostname"])
+ self.assertTrue(host_info[DataSourceVMware.LOCAL_IPV4])
+ self.assertTrue(host_info[DataSourceVMware.LOCAL_IPV4] == "10.10.10.1")
+ self.assertTrue(host_info[DataSourceVMware.LOCAL_IPV6])
+ self.assertTrue(
+ host_info[DataSourceVMware.LOCAL_IPV6] == "2001:db8::::::8888"
+ )
class TestDataSourceVMwareEnvVars(FilesystemMockingTestCase):
diff --git a/tests/unittests/sources/test_vultr.py b/tests/unittests/sources/test_vultr.py
index 18b2c084..c8398579 100644
--- a/tests/unittests/sources/test_vultr.py
+++ b/tests/unittests/sources/test_vultr.py
@@ -141,13 +141,11 @@ VULTR_V1_2 = {
SSH_KEYS_1 = ["ssh-rsa AAAAB3NzaC1y...IQQhv5PAOKaIl+mM3c= test3@key"]
-INTERFACES = [
- ["lo", "56:00:03:15:c4:00", "drv", "devid0"],
- ["dummy0", "56:00:03:15:c4:01", "drv", "devid1"],
- ["eth1", "56:00:03:15:c4:02", "drv", "devid2"],
- ["eth0", "56:00:03:15:c4:04", "drv", "devid4"],
- ["eth2", "56:00:03:15:c4:03", "drv", "devid3"],
-]
+INTERFACES = ["lo", "dummy0", "eth1", "eth0", "eth2"]
+
+ORDERED_INTERFACES = ["eth0", "eth1", "eth2"]
+
+FILTERED_INTERFACES = ["eth1", "eth2", "eth0"]
# Expected generated objects
@@ -167,7 +165,10 @@ EXPECTED_VULTR_CONFIG = {
EXPECTED_VULTR_NETWORK_1 = {
"version": 1,
"config": [
- {"type": "nameserver", "address": ["108.61.10.10"]},
+ {
+ "type": "nameserver",
+ "address": ["108.61.10.10", "2001:19f0:300:1704::6"],
+ },
{
"name": "eth0",
"type": "physical",
@@ -184,7 +185,10 @@ EXPECTED_VULTR_NETWORK_1 = {
EXPECTED_VULTR_NETWORK_2 = {
"version": 1,
"config": [
- {"type": "nameserver", "address": ["108.61.10.10"]},
+ {
+ "type": "nameserver",
+ "address": ["108.61.10.10", "2001:19f0:300:1704::6"],
+ },
{
"name": "eth0",
"type": "physical",
@@ -224,7 +228,15 @@ INTERFACE_MAP = {
}
-EPHERMERAL_USED = ""
+FINAL_INTERFACE_USED = ""
+
+
+# Static override, pylint doesnt like this in
+# classes without self
+def check_route(url):
+ if FINAL_INTERFACE_USED == "eth0":
+ return True
+ return False
class TestDataSourceVultr(CiTestCase):
@@ -297,32 +309,87 @@ class TestDataSourceVultr(CiTestCase):
@mock.patch("cloudinit.net.get_interfaces_by_mac")
def test_private_network_config(self, mock_netmap):
mock_netmap.return_value = INTERFACE_MAP
- interf = VULTR_V1_2["interfaces"]
+ interf = VULTR_V1_2["interfaces"].copy()
+ # Test configuring
self.assertEqual(
EXPECTED_VULTR_NETWORK_2, vultr.generate_network_config(interf)
)
+ # Test unconfigured
+ interf[1]["unconfigured"] = True
+ expected = EXPECTED_VULTR_NETWORK_2.copy()
+ expected["config"].pop(2)
+ self.assertEqual(expected, vultr.generate_network_config(interf))
+
+ # Override ephemeral for proper unit testing
def ephemeral_init(self, iface="", connectivity_url_data=None):
- global EPHERMERAL_USED
- EPHERMERAL_USED = iface
+ global FINAL_INTERFACE_USED
+ FINAL_INTERFACE_USED = iface
if iface == "eth0":
return
raise NoDHCPLeaseError("Generic for testing")
+ # Override ephemeral for proper unit testing
+ def ephemeral_init_always(self, iface="", connectivity_url_data=None):
+ global FINAL_INTERFACE_USED
+ FINAL_INTERFACE_USED = iface
+
+ # Override ephemeral for proper unit testing
+ def override_enter(self):
+ return
+
+ # Override ephemeral for proper unit testing
+ def override_exit(self, excp_type, excp_value, excp_traceback):
+ return
+
# Test interface seeking to ensure we are able to find the correct one
@mock.patch("cloudinit.net.dhcp.EphemeralDHCPv4.__init__", ephemeral_init)
+ @mock.patch("cloudinit.net.dhcp.EphemeralDHCPv4.__enter__", override_enter)
+ @mock.patch("cloudinit.net.dhcp.EphemeralDHCPv4.__exit__", override_exit)
+ @mock.patch("cloudinit.sources.helpers.vultr.check_route")
@mock.patch("cloudinit.sources.helpers.vultr.is_vultr")
@mock.patch("cloudinit.sources.helpers.vultr.read_metadata")
- @mock.patch("cloudinit.net.get_interfaces")
+ @mock.patch("cloudinit.sources.helpers.vultr.get_interface_list")
def test_interface_seek(
- self, mock_get_interfaces, mock_read_metadata, mock_isvultr
+ self,
+ mock_interface_list,
+ mock_read_metadata,
+ mock_isvultr,
+ mock_check_route,
):
- mock_read_metadata.side_effect = NoDHCPLeaseError(
- "Generic for testing"
+ mock_read_metadata.return_value = {}
+ mock_isvultr.return_value = True
+ mock_interface_list.return_value = FILTERED_INTERFACES
+ mock_check_route.return_value = True
+
+ source = DataSourceVultr.DataSourceVultr(
+ settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": self.tmp})
)
+
+ try:
+ source._get_data()
+ except Exception:
+ pass
+
+ self.assertEqual(FINAL_INTERFACE_USED, INTERFACES[3])
+
+ # Test route checking sucessful DHCPs
+ @mock.patch("cloudinit.sources.helpers.vultr.check_route", check_route)
+ @mock.patch(
+ "cloudinit.net.dhcp.EphemeralDHCPv4.__init__", ephemeral_init_always
+ )
+ @mock.patch("cloudinit.net.dhcp.EphemeralDHCPv4.__enter__", override_enter)
+ @mock.patch("cloudinit.net.dhcp.EphemeralDHCPv4.__exit__", override_exit)
+ @mock.patch("cloudinit.sources.helpers.vultr.get_interface_list")
+ @mock.patch("cloudinit.sources.helpers.vultr.is_vultr")
+ @mock.patch("cloudinit.sources.helpers.vultr.read_metadata")
+ def test_interface_seek_route_check(
+ self, mock_read_metadata, mock_isvultr, mock_interface_list
+ ):
+ mock_read_metadata.return_value = {}
+ mock_interface_list.return_value = FILTERED_INTERFACES
mock_isvultr.return_value = True
- mock_get_interfaces.return_value = INTERFACES
source = DataSourceVultr.DataSourceVultr(
settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": self.tmp})
@@ -333,7 +400,7 @@ class TestDataSourceVultr(CiTestCase):
except Exception:
pass
- self.assertEqual(EPHERMERAL_USED, INTERFACES[3][0])
+ self.assertEqual(FINAL_INTERFACE_USED, INTERFACES[3])
# vi: ts=4 expandtab
diff --git a/tests/unittests/sources/vmware/test_guestcust_util.py b/tests/unittests/sources/vmware/test_guestcust_util.py
index fc63bcae..43677fce 100644
--- a/tests/unittests/sources/vmware/test_guestcust_util.py
+++ b/tests/unittests/sources/vmware/test_guestcust_util.py
@@ -12,6 +12,7 @@ from cloudinit.sources.helpers.vmware.imc.guestcust_util import (
get_tools_config,
set_gc_status,
)
+from cloudinit.subp import SubpResult
from tests.unittests.helpers import CiTestCase, mock
@@ -35,7 +36,7 @@ class TestGuestCustUtil(CiTestCase):
with mock.patch.object(
subp,
"subp",
- return_value=("key=value", b""),
+ return_value=SubpResult("key=value", b""),
side_effect=subp.ProcessExecutionError(
"subp failed", exit_code=99
),
@@ -54,19 +55,21 @@ class TestGuestCustUtil(CiTestCase):
with mock.patch.object(subp, "which", return_value="/dummy/path"):
# value is not blank
with mock.patch.object(
- subp, "subp", return_value=("key = value ", b"")
+ subp, "subp", return_value=SubpResult("key = value ", b"")
):
self.assertEqual(
get_tools_config("section", "key", "defaultVal"), "value"
)
# value is blank
- with mock.patch.object(subp, "subp", return_value=("key = ", b"")):
+ with mock.patch.object(
+ subp, "subp", return_value=SubpResult("key = ", b"")
+ ):
self.assertEqual(
get_tools_config("section", "key", "defaultVal"), ""
)
# value contains =
with mock.patch.object(
- subp, "subp", return_value=("key=Bar=Wark", b"")
+ subp, "subp", return_value=SubpResult("key=Bar=Wark", b"")
):
self.assertEqual(
get_tools_config("section", "key", "defaultVal"),
@@ -75,7 +78,7 @@ class TestGuestCustUtil(CiTestCase):
# value contains specific characters
with mock.patch.object(
- subp, "subp", return_value=("[a] b.c_d=e-f", b"")
+ subp, "subp", return_value=SubpResult("[a] b.c_d=e-f", b"")
):
self.assertEqual(
get_tools_config("section", "key", "defaultVal"), "e-f"
@@ -97,7 +100,7 @@ class TestGuestCustUtil(CiTestCase):
cf._insertKey("MISC|POST-GC-STATUS", "YES")
conf = Config(cf)
with mock.patch.object(
- subp, "subp", return_value=("ok", b"")
+ subp, "subp", return_value=SubpResult("ok", b"")
) as mockobj:
self.assertEqual(set_gc_status(conf, "Successful"), ("ok", b""))
mockobj.assert_called_once_with(
diff --git a/tests/unittests/test_builtin_handlers.py b/tests/unittests/test_builtin_handlers.py
index 0dae924d..4f9eeb65 100644
--- a/tests/unittests/test_builtin_handlers.py
+++ b/tests/unittests/test_builtin_handlers.py
@@ -5,13 +5,11 @@
import copy
import errno
import os
-import shutil
-import tempfile
from textwrap import dedent
import pytest
-from cloudinit import handlers, helpers, subp, util
+from cloudinit import handlers, helpers, util
from cloudinit.cmd.devel import read_cfg_paths
from cloudinit.handlers.cloud_config import CloudConfigPartHandler
from cloudinit.handlers.jinja_template import (
@@ -24,83 +22,12 @@ from cloudinit.handlers.shell_script_by_frequency import (
get_script_folder_by_frequency,
path_map,
)
-from cloudinit.handlers.upstart_job import UpstartJobPartHandler
from cloudinit.settings import PER_ALWAYS, PER_INSTANCE, PER_ONCE
-from tests.unittests.helpers import (
- CiTestCase,
- FilesystemMockingTestCase,
- mock,
- skipUnlessJinja,
-)
+from tests.unittests.helpers import CiTestCase, mock, skipUnlessJinja
INSTANCE_DATA_FILE = "instance-data-sensitive.json"
-class TestUpstartJobPartHandler(FilesystemMockingTestCase):
-
- mpath = "cloudinit.handlers.upstart_job."
-
- def test_upstart_frequency_no_out(self):
- c_root = tempfile.mkdtemp()
- self.addCleanup(shutil.rmtree, c_root)
- up_root = tempfile.mkdtemp()
- self.addCleanup(shutil.rmtree, up_root)
- paths = helpers.Paths(
- {
- "cloud_dir": c_root,
- "upstart_dir": up_root,
- }
- )
- h = UpstartJobPartHandler(paths)
- # No files should be written out when
- # the frequency is ! per-instance
- h.handle_part("", handlers.CONTENT_START, None, None, None)
- h.handle_part(
- "blah",
- "text/upstart-job",
- "test.conf",
- "blah",
- frequency=PER_ALWAYS,
- )
- h.handle_part("", handlers.CONTENT_END, None, None, None)
- self.assertEqual(0, len(os.listdir(up_root)))
-
- def test_upstart_frequency_single(self):
- # files should be written out when frequency is ! per-instance
- new_root = tempfile.mkdtemp()
- self.addCleanup(shutil.rmtree, new_root)
-
- self.patchOS(new_root)
- self.patchUtils(new_root)
- paths = helpers.Paths(
- {
- "upstart_dir": "/etc/upstart",
- }
- )
-
- util.ensure_dir("/run")
- util.ensure_dir("/etc/upstart")
-
- with mock.patch(self.mpath + "SUITABLE_UPSTART", return_value=True):
- with mock.patch.object(subp, "subp") as m_subp:
- h = UpstartJobPartHandler(paths)
- h.handle_part("", handlers.CONTENT_START, None, None, None)
- h.handle_part(
- "blah",
- "text/upstart-job",
- "test.conf",
- "blah",
- frequency=PER_INSTANCE,
- )
- h.handle_part("", handlers.CONTENT_END, None, None, None)
-
- self.assertEqual(len(os.listdir("/etc/upstart")), 1)
-
- m_subp.assert_called_once_with(
- ["initctl", "reload-configuration"], capture=False
- )
-
-
class TestJinjaTemplatePartHandler(CiTestCase):
with_logs = True
diff --git a/tests/unittests/test_cli.py b/tests/unittests/test_cli.py
index bed73a93..7846d0d3 100644
--- a/tests/unittests/test_cli.py
+++ b/tests/unittests/test_cli.py
@@ -134,6 +134,7 @@ class TestCLI(test_helpers.FilesystemMockingTestCase):
"init",
"modules",
"single",
+ "schema",
]
for subcommand in expected_subcommands:
self.assertIn(subcommand, error)
@@ -169,6 +170,7 @@ class TestCLI(test_helpers.FilesystemMockingTestCase):
"usage: cloud-init collect-logs",
"usage: cloud-init devel",
"usage: cloud-init status",
+ "usage: cloud-init schema",
]
conditional_subcommands = [
"analyze",
@@ -176,6 +178,7 @@ class TestCLI(test_helpers.FilesystemMockingTestCase):
"collect-logs",
"devel",
"status",
+ "schema",
]
# The cloud-init entrypoint calls main without passing sys_argv
for subcommand in conditional_subcommands:
@@ -220,18 +223,18 @@ class TestCLI(test_helpers.FilesystemMockingTestCase):
self._call_main(["cloud-init", "status", "-h"])
self.assertIn("usage: cloud-init status", stdout.getvalue())
- def test_devel_subcommand_parser(self):
- """The subcommand cloud-init devel calls the correct subparser."""
- self._call_main(["cloud-init", "devel"])
+ def test_subcommand_parser(self):
+ """The subcommand cloud-init schema calls the correct subparser."""
+ self._call_main(["cloud-init"])
# These subcommands only valid for cloud-init schema script
expected_subcommands = ["schema"]
error = self.stderr.getvalue()
for subcommand in expected_subcommands:
self.assertIn(subcommand, error)
- def test_wb_devel_schema_subcommand_parser(self):
+ def test_wb_schema_subcommand_parser(self):
"""The subcommand cloud-init schema calls the correct subparser."""
- exit_code = self._call_main(["cloud-init", "devel", "schema"])
+ exit_code = self._call_main(["cloud-init", "schema"])
self.assertEqual(1, exit_code)
# Known whitebox output from schema subcommand
self.assertEqual(
@@ -240,7 +243,7 @@ class TestCLI(test_helpers.FilesystemMockingTestCase):
self.stderr.getvalue(),
)
- def test_wb_devel_schema_subcommand_doc_all_spot_check(self):
+ def test_wb_schema_subcommand_doc_all_spot_check(self):
"""Validate that doc content has correct values from known examples.
Ensure that schema doc is returned
@@ -252,7 +255,7 @@ class TestCLI(test_helpers.FilesystemMockingTestCase):
# manager
stdout = io.StringIO()
with contextlib.redirect_stdout(stdout):
- self._call_main(["cloud-init", "devel", "schema", "--docs", "all"])
+ self._call_main(["cloud-init", "schema", "--docs", "all"])
expected_doc_sections = [
"**Supported distros:** all",
"**Supported distros:** almalinux, alpine, centos, "
@@ -260,14 +263,14 @@ class TestCLI(test_helpers.FilesystemMockingTestCase):
"openEuler, opensuse, photon, rhel, rocky, sles, ubuntu, "
"virtuozzo",
"**Config schema**:\n **resize_rootfs:** "
- "(true/false/noblock)",
+ "(``true``/``false``/``noblock``)",
"**Examples**::\n\n runcmd:\n - [ ls, -l, / ]\n",
]
stdout = stdout.getvalue()
for expected in expected_doc_sections:
self.assertIn(expected, stdout)
- def test_wb_devel_schema_subcommand_single_spot_check(self):
+ def test_wb_schema_subcommand_single_spot_check(self):
"""Validate that doc content has correct values from known example.
Validate 'all' arg
@@ -279,9 +282,7 @@ class TestCLI(test_helpers.FilesystemMockingTestCase):
# manager
stdout = io.StringIO()
with contextlib.redirect_stdout(stdout):
- self._call_main(
- ["cloud-init", "devel", "schema", "--docs", "cc_runcmd"]
- )
+ self._call_main(["cloud-init", "schema", "--docs", "cc_runcmd"])
expected_doc_sections = [
"Runcmd\n------\n**Summary:** Run arbitrary commands"
]
@@ -289,7 +290,7 @@ class TestCLI(test_helpers.FilesystemMockingTestCase):
for expected in expected_doc_sections:
self.assertIn(expected, stdout)
- def test_wb_devel_schema_subcommand_multiple_spot_check(self):
+ def test_wb_schema_subcommand_multiple_spot_check(self):
"""Validate that doc content has correct values from known example.
Validate single arg
@@ -300,7 +301,6 @@ class TestCLI(test_helpers.FilesystemMockingTestCase):
self._call_main(
[
"cloud-init",
- "devel",
"schema",
"--docs",
"cc_runcmd",
@@ -315,7 +315,7 @@ class TestCLI(test_helpers.FilesystemMockingTestCase):
for expected in expected_doc_sections:
self.assertIn(expected, stdout)
- def test_wb_devel_schema_subcommand_bad_arg_fails(self):
+ def test_wb_schema_subcommand_bad_arg_fails(self):
"""Validate that doc content has correct values from known example.
Validate multiple args
@@ -328,7 +328,7 @@ class TestCLI(test_helpers.FilesystemMockingTestCase):
stderr = io.StringIO()
with contextlib.redirect_stderr(stderr):
self._call_main(
- ["cloud-init", "devel", "schema", "--docs", "garbage_value"]
+ ["cloud-init", "schema", "--docs", "garbage_value"]
)
expected_doc_sections = ["Invalid --docs value"]
stderr = stderr.getvalue()
diff --git a/tests/unittests/test_data.py b/tests/unittests/test_data.py
index a5018a42..75c304a8 100644
--- a/tests/unittests/test_data.py
+++ b/tests/unittests/test_data.py
@@ -19,6 +19,7 @@ from cloudinit import helpers as c_helpers
from cloudinit import log, safeyaml, sources, stages
from cloudinit import user_data as ud
from cloudinit import util
+from cloudinit.config.modules import Modules
from cloudinit.settings import PER_INSTANCE
from tests.unittests import helpers
@@ -141,7 +142,7 @@ class TestConsumeUserData(helpers.FilesystemMockingTestCase):
args=[PER_INSTANCE],
freq=PER_INSTANCE,
)
- mods = stages.Modules(initer)
+ mods = Modules(initer)
(_which_ran, _failures) = mods.run_section("cloud_init_modules")
cfg = mods.cfg
self.assertIn("vendor_data", cfg)
@@ -187,7 +188,7 @@ class TestConsumeUserData(helpers.FilesystemMockingTestCase):
args=[PER_INSTANCE],
freq=PER_INSTANCE,
)
- mods = stages.Modules(initer)
+ mods = Modules(initer)
(_which_ran, _failures) = mods.run_section("cloud_init_modules")
cfg = mods.cfg
self.assertEqual("qux", cfg["baz"])
@@ -276,7 +277,7 @@ run:
#cloud-config
a: c
vendor_data:
- enabled: True
+ enabled: true
prefix: /bin/true
name: user
run:
@@ -296,7 +297,7 @@ run:
args=[PER_INSTANCE],
freq=PER_INSTANCE,
)
- mods = stages.Modules(initer)
+ mods = Modules(initer)
(_which_ran, _failures) = mods.run_section("cloud_init_modules")
cfg = mods.cfg
self.assertIn("vendor_data", cfg)
@@ -319,7 +320,7 @@ echo "dynamic test"
user_blob = """
#cloud-config
vendor_data:
- enabled: True
+ enabled: true
prefix: /bin/true
"""
new_root = self.reRoot()
@@ -338,7 +339,7 @@ vendor_data:
args=[PER_INSTANCE],
freq=PER_INSTANCE,
)
- mods = stages.Modules(initer)
+ mods = Modules(initer)
(_which_ran, _failures) = mods.run_section("cloud_init_modules")
vendor_script = initer.paths.get_ipath_cur("vendor_scripts")
vendor_script_fns = "%s%s/part-001" % (new_root, vendor_script)
@@ -634,7 +635,7 @@ c: 4
args=[PER_INSTANCE],
freq=PER_INSTANCE,
)
- mods = stages.Modules(initer)
+ mods = Modules(initer)
(_which_ran, _failures) = mods.run_section("cloud_init_modules")
cfg = mods.cfg
self.assertIn("vendor_data", cfg)
diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py
index 0b0de395..11048750 100644
--- a/tests/unittests/test_ds_identify.py
+++ b/tests/unittests/test_ds_identify.py
@@ -3,6 +3,7 @@
import copy
import os
from collections import namedtuple
+from textwrap import dedent
from uuid import uuid4
from cloudinit import safeyaml, subp, util
@@ -77,6 +78,7 @@ RC_FOUND = 0
RC_NOT_FOUND = 1
DS_NONE = "None"
+P_BOARD_NAME = "sys/class/dmi/id/board_name"
P_CHASSIS_ASSET_TAG = "sys/class/dmi/id/chassis_asset_tag"
P_PRODUCT_NAME = "sys/class/dmi/id/product_name"
P_PRODUCT_SERIAL = "sys/class/dmi/id/product_serial"
@@ -94,6 +96,10 @@ MOCK_VIRT_IS_CONTAINER_OTHER = {
}
MOCK_NOT_LXD_DATASOURCE = {"name": "dscheck_LXD", "ret": 1}
MOCK_VIRT_IS_KVM = {"name": "detect_virt", "RET": "kvm", "ret": 0}
+# qemu support for LXD is only for host systems > 5.10 kernel as lxd
+# passed `hv_passthrough` which causes systemd < v.251 to misinterpret CPU
+# as "qemu" instead of "kvm"
+MOCK_VIRT_IS_KVM_QEMU = {"name": "detect_virt", "RET": "qemu", "ret": 0}
MOCK_VIRT_IS_VMWARE = {"name": "detect_virt", "RET": "vmware", "ret": 0}
# currenty' SmartOS hypervisor "bhyve" is unknown by systemd-detect-virt.
MOCK_VIRT_IS_VM_OTHER = {"name": "detect_virt", "RET": "vm-other", "ret": 0}
@@ -101,8 +107,6 @@ MOCK_VIRT_IS_XEN = {"name": "detect_virt", "RET": "xen", "ret": 0}
MOCK_UNAME_IS_PPC64 = {"name": "uname", "out": UNAME_PPC64EL, "ret": 0}
MOCK_UNAME_IS_FREEBSD = {"name": "uname", "out": UNAME_FREEBSD, "ret": 0}
-DEFAULT_MOCKS = [MOCK_NOT_LXD_DATASOURCE]
-
shell_true = 0
shell_false = 1
@@ -119,6 +123,7 @@ class DsIdentifyBase(CiTestCase):
self,
rootd=None,
mocks=None,
+ no_mocks=None,
func="main",
args=None,
files=None,
@@ -159,13 +164,14 @@ class DsIdentifyBase(CiTestCase):
def write_mock(data):
ddata = {"out": None, "err": None, "ret": 0, "RET": None}
ddata.update(data)
- for k in ddata:
+ for k in ddata.keys():
if ddata[k] is None:
ddata[k] = unset
return SHELL_MOCK_TMPL % ddata
mocklines = []
- defaults = [
+ default_mocks = [
+ MOCK_NOT_LXD_DATASOURCE,
{"name": "detect_virt", "RET": "none", "ret": 1},
{"name": "uname", "out": UNAME_MYSYS},
{"name": "blkid", "out": BLKID_EFI_ROOT},
@@ -189,7 +195,9 @@ class DsIdentifyBase(CiTestCase):
written = [d["name"] for d in mocks]
for data in mocks:
mocklines.append(write_mock(data))
- for d in defaults:
+ for d in default_mocks:
+ if no_mocks and d["name"] in no_mocks:
+ continue
if d["name"] not in written:
mocklines.append(write_mock(d))
@@ -221,6 +229,7 @@ class DsIdentifyBase(CiTestCase):
# return output of self.call with a dict input like VALID_CFG[item]
xwargs = {"rootd": rootd}
passthrough = (
+ "no_mocks", # named mocks to ignore
"mocks",
"func",
"args",
@@ -233,14 +242,6 @@ class DsIdentifyBase(CiTestCase):
xwargs[k] = data[k]
if k in kwargs:
xwargs[k] = kwargs[k]
- if "mocks" not in xwargs:
- xwargs["mocks"] = DEFAULT_MOCKS
- else:
- mocked_funcs = [m["name"] for m in xwargs["mocks"]]
- for default_mock in DEFAULT_MOCKS:
- if default_mock["name"] not in mocked_funcs:
- xwargs["mocks"].append(default_mock)
-
return self.call(**xwargs)
def _test_ds_found(self, name):
@@ -338,6 +339,28 @@ class TestDsIdentify(DsIdentifyBase):
"""Older gce compute instances must be identified by serial."""
self._test_ds_found("GCE-serial")
+ def test_lxd_kvm(self):
+ """LXD KVM has race on absent /dev/lxd/socket. Use DMI board_name."""
+ self._test_ds_found("LXD-kvm")
+
+ def test_lxd_kvm_jammy(self):
+ """LXD KVM on host systems with a kernel > 5.10 need to match "qemu".
+ LXD provides `hv_passthrough` when launching kvm instances when host
+ kernel is > 5.10. This results in systemd being unable to detect the
+ virtualized CPUID="Linux KVM Hv" as type "kvm" and results in
+ systemd-detect-virt returning "qemu" in this case.
+
+ Assert ds-identify can match systemd-detect-virt="qemu" and
+ /sys/class/dmi/id/board_name = LXD.
+ Once systemd 251 is available on a target distro, the virtualized
+ CPUID will be represented properly as "kvm"
+ """
+ self._test_ds_found("LXD-kvm-qemu-kernel-gt-5.10")
+
+ def test_lxd_containers(self):
+ """LXD containers will have /dev/lxd/socket at generator time."""
+ self._test_ds_found("LXD")
+
def test_config_drive(self):
"""ConfigDrive datasource has a disk with LABEL=config-2."""
self._test_ds_found("ConfigDrive")
@@ -710,6 +733,10 @@ class TestDsIdentify(DsIdentifyBase):
"""NoCloud is found with uppercase filesystem label."""
self._test_ds_found("NoCloudUpper")
+ def test_nocloud_seed_in_cfg(self):
+ """NoCloud seed definition can go in /etc/cloud/cloud.cfg[.d]"""
+ self._test_ds_found("NoCloud-cfg")
+
def test_nocloud_fatboot(self):
"""NoCloud fatboot label - LP: #184166."""
self._test_ds_found("NoCloud-fatboot")
@@ -1020,6 +1047,26 @@ VALID_CFG = {
"files": {P_PRODUCT_SERIAL: "GoogleCloud-8f2e88f\n"},
"mocks": [MOCK_VIRT_IS_KVM],
},
+ "LXD-kvm": {
+ "ds": "LXD",
+ "files": {P_BOARD_NAME: "LXD\n"},
+ # /dev/lxd/sock does not exist and KVM virt-type
+ "mocks": [{"name": "is_socket_file", "ret": 1}, MOCK_VIRT_IS_KVM],
+ "no_mocks": ["dscheck_LXD"], # Don't default mock dscheck_LXD
+ },
+ "LXD-kvm-qemu-kernel-gt-5.10": { # LXD host > 5.10 kvm launch virt==qemu
+ "ds": "LXD",
+ "files": {P_BOARD_NAME: "LXD\n"},
+ # /dev/lxd/sock does not exist and KVM virt-type
+ "mocks": [{"name": "is_socket_file", "ret": 1}, MOCK_VIRT_IS_KVM_QEMU],
+ "no_mocks": ["dscheck_LXD"], # Don't default mock dscheck_LXD
+ },
+ "LXD": {
+ "ds": "LXD",
+ # /dev/lxd/sock exists
+ "mocks": [{"name": "is_socket_file", "ret": 0}],
+ "no_mocks": ["dscheck_LXD"], # Don't default mock dscheck_LXD
+ },
"NoCloud": {
"ds": "NoCloud",
"mocks": [
@@ -1043,6 +1090,26 @@ VALID_CFG = {
"dev/vdb": "pretend iso content for cidata\n",
},
},
+ "NoCloud-cfg": {
+ "ds": "NoCloud",
+ "files": {
+ # Also include a datasource list of more than just
+ # [NoCloud, None], because that would automatically select
+ # NoCloud without checking
+ "/etc/cloud/cloud.cfg": dedent(
+ """\
+ datasource_list: [ Azure, Openstack, NoCloud, None ]
+ datasource:
+ NoCloud:
+ user-data: |
+ #cloud-config
+ hostname: footbar
+ meta-data: |
+ instance_id: cloud-image
+ """
+ )
+ },
+ },
"NoCloud-fbsd": {
"ds": "NoCloud",
"mocks": [
diff --git a/tests/unittests/test_gpg.py b/tests/unittests/test_gpg.py
index c3772e3f..caa2aeb4 100644
--- a/tests/unittests/test_gpg.py
+++ b/tests/unittests/test_gpg.py
@@ -3,6 +3,7 @@ from unittest import mock
import pytest
from cloudinit import gpg, subp
+from cloudinit.subp import SubpResult
from tests.unittests.helpers import CiTestCase
TEST_KEY_HUMAN = """
@@ -64,7 +65,9 @@ class TestGPGCommands:
"--with-colons",
"key",
]
- with mock.patch.object(subp, "subp", return_value=("", "")) as m_subp:
+ with mock.patch.object(
+ subp, "subp", return_value=SubpResult("", "")
+ ) as m_subp:
gpg.list("key")
assert mock.call(colons, capture=True) == m_subp.call_args
@@ -74,7 +77,9 @@ class TestGPGCommands:
def test_gpg_dearmor_args(self):
"""Verify correct command gets called to dearmor keys"""
- with mock.patch.object(subp, "subp", return_value=("", "")) as m_subp:
+ with mock.patch.object(
+ subp, "subp", return_value=SubpResult("", "")
+ ) as m_subp:
gpg.dearmor("key")
test_call = mock.call(
["gpg", "--dearmor"], data="key", decode=False
diff --git a/tests/unittests/test_merging.py b/tests/unittests/test_merging.py
index cf484dda..dfa20cbb 100644
--- a/tests/unittests/test_merging.py
+++ b/tests/unittests/test_merging.py
@@ -41,7 +41,7 @@ def _old_mergemanydict(*args):
def _random_str(rand):
base = ""
- for _i in range(rand.randint(1, 2 ** 8)):
+ for _i in range(rand.randint(1, 2**8)):
base += rand.choice(string.ascii_letters + string.digits)
return base
@@ -81,7 +81,7 @@ def _make_dict(current_depth, max_depth, rand):
if t in [tuple]:
base = tuple(base)
elif t in [int]:
- base = rand.randint(0, 2 ** 8)
+ base = rand.randint(0, 2**8)
elif t in [str]:
base = _random_str(rand)
return base
diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py
index 47e4ba00..ecf33070 100644
--- a/tests/unittests/test_net.py
+++ b/tests/unittests/test_net.py
@@ -19,8 +19,10 @@ from cloudinit.net import (
cmdline,
eni,
interface_has_own_mac,
+ mask_and_ipv4_to_bcast_addr,
natural_sort_key,
netplan,
+ network_manager,
network_state,
networkd,
renderers,
@@ -612,6 +614,37 @@ dns = none
),
),
],
+ "expected_network_manager": [
+ (
+ "".join(
+ [
+ "etc/NetworkManager/system-connections",
+ "/cloud-init-eth0.nmconnection",
+ ]
+ ),
+ """
+# Generated by cloud-init. Changes will be lost.
+
+[connection]
+id=cloud-init eth0
+uuid=1dd9a779-d327-56e1-8454-c65e2556c12c
+type=ethernet
+
+[user]
+org.freedesktop.NetworkManager.origin=cloud-init
+
+[ethernet]
+mac-address=FA:16:3E:ED:9A:59
+
+[ipv4]
+method=manual
+may-fail=false
+address1=172.19.1.34/22
+route1=0.0.0.0/0,172.19.3.254
+
+""".lstrip(),
+ ),
+ ],
},
{
"in_data": {
@@ -1078,6 +1111,50 @@ NETWORK_CONFIGS = {
USERCTL=no"""
),
},
+ "expected_network_manager": {
+ "cloud-init-eth1.nmconnection": textwrap.dedent(
+ """\
+ # Generated by cloud-init. Changes will be lost.
+
+ [connection]
+ id=cloud-init eth1
+ uuid=3c50eb47-7260-5a6d-801d-bd4f587d6b58
+ type=ethernet
+
+ [user]
+ org.freedesktop.NetworkManager.origin=cloud-init
+
+ [ethernet]
+ mac-address=CF:D6:AF:48:E8:80
+
+ """
+ ),
+ "cloud-init-eth99.nmconnection": textwrap.dedent(
+ """\
+ # Generated by cloud-init. Changes will be lost.
+
+ [connection]
+ id=cloud-init eth99
+ uuid=b1b88000-1f03-5360-8377-1a2205efffb4
+ type=ethernet
+
+ [user]
+ org.freedesktop.NetworkManager.origin=cloud-init
+
+ [ethernet]
+ mac-address=C0:D6:9F:2C:E8:80
+
+ [ipv4]
+ method=auto
+ may-fail=false
+ address1=192.168.21.3/24
+ route1=0.0.0.0/0,65.61.151.37
+ dns=8.8.8.8;8.8.4.4;
+ dns-search=barley.maas;sach.maas;
+
+ """
+ ),
+ },
"yaml": textwrap.dedent(
"""
version: 1
@@ -1150,6 +1227,34 @@ NETWORK_CONFIGS = {
STARTMODE=auto"""
)
},
+ "expected_network_manager": {
+ "cloud-init-iface0.nmconnection": textwrap.dedent(
+ """\
+ # Generated by cloud-init. Changes will be lost.
+
+ [connection]
+ id=cloud-init iface0
+ uuid=8ddfba48-857c-5e86-ac09-1b43eae0bf70
+ type=ethernet
+ interface-name=iface0
+
+ [user]
+ org.freedesktop.NetworkManager.origin=cloud-init
+
+ [ethernet]
+
+ [ipv4]
+ method=auto
+ may-fail=false
+
+ [ipv6]
+ method=dhcp
+ may-fail=false
+ addr-gen-mode=stable-privacy
+
+ """
+ ),
+ },
"yaml": textwrap.dedent(
"""\
version: 1
@@ -1253,6 +1358,37 @@ NETWORK_CONFIGS = {
"""
),
},
+ "expected_network_manager": {
+ "cloud-init-iface0.nmconnection": textwrap.dedent(
+ """\
+ # Generated by cloud-init. Changes will be lost.
+
+ [connection]
+ id=cloud-init iface0
+ uuid=8ddfba48-857c-5e86-ac09-1b43eae0bf70
+ type=ethernet
+ interface-name=iface0
+
+ [user]
+ org.freedesktop.NetworkManager.origin=cloud-init
+
+ [ethernet]
+ mtu=9000
+
+ [ipv4]
+ method=manual
+ may-fail=false
+ address1=192.168.14.2/24
+
+ [ipv6]
+ method=manual
+ may-fail=false
+ addr-gen-mode=stable-privacy
+ address1=2001:1::1/64
+
+ """
+ ),
+ },
},
"v6_and_v4": {
"expected_sysconfig_opensuse": {
@@ -1263,6 +1399,34 @@ NETWORK_CONFIGS = {
STARTMODE=auto"""
)
},
+ "expected_network_manager": {
+ "cloud-init-iface0.nmconnection": textwrap.dedent(
+ """\
+ # Generated by cloud-init. Changes will be lost.
+
+ [connection]
+ id=cloud-init iface0
+ uuid=8ddfba48-857c-5e86-ac09-1b43eae0bf70
+ type=ethernet
+ interface-name=iface0
+
+ [user]
+ org.freedesktop.NetworkManager.origin=cloud-init
+
+ [ethernet]
+
+ [ipv6]
+ method=dhcp
+ may-fail=false
+ addr-gen-mode=stable-privacy
+
+ [ipv4]
+ method=auto
+ may-fail=false
+
+ """
+ ),
+ },
"yaml": textwrap.dedent(
"""\
version: 1
@@ -1336,6 +1500,30 @@ NETWORK_CONFIGS = {
"""
),
},
+ "expected_network_manager": {
+ "cloud-init-iface0.nmconnection": textwrap.dedent(
+ """\
+ # Generated by cloud-init. Changes will be lost.
+
+ [connection]
+ id=cloud-init iface0
+ uuid=8ddfba48-857c-5e86-ac09-1b43eae0bf70
+ type=ethernet
+ interface-name=iface0
+
+ [user]
+ org.freedesktop.NetworkManager.origin=cloud-init
+
+ [ethernet]
+
+ [ipv6]
+ method=dhcp
+ may-fail=false
+ addr-gen-mode=stable-privacy
+
+ """
+ ),
+ },
},
"dhcpv6_accept_ra": {
"expected_eni": textwrap.dedent(
@@ -1543,6 +1731,30 @@ NETWORK_CONFIGS = {
"""
),
},
+ "expected_network_manager": {
+ "cloud-init-iface0.nmconnection": textwrap.dedent(
+ """\
+ # Generated by cloud-init. Changes will be lost.
+
+ [connection]
+ id=cloud-init iface0
+ uuid=8ddfba48-857c-5e86-ac09-1b43eae0bf70
+ type=ethernet
+ interface-name=iface0
+
+ [user]
+ org.freedesktop.NetworkManager.origin=cloud-init
+
+ [ethernet]
+
+ [ipv6]
+ method=auto
+ may-fail=false
+ addr-gen-mode=stable-privacy
+
+ """
+ ),
+ },
},
"static6": {
"yaml": textwrap.dedent(
@@ -1631,6 +1843,30 @@ NETWORK_CONFIGS = {
"""
),
},
+ "expected_network_manager": {
+ "cloud-init-iface0.nmconnection": textwrap.dedent(
+ """\
+ # Generated by cloud-init. Changes will be lost.
+
+ [connection]
+ id=cloud-init iface0
+ uuid=8ddfba48-857c-5e86-ac09-1b43eae0bf70
+ type=ethernet
+ interface-name=iface0
+
+ [user]
+ org.freedesktop.NetworkManager.origin=cloud-init
+
+ [ethernet]
+
+ [ipv6]
+ method=auto
+ may-fail=false
+ addr-gen-mode=stable-privacy
+
+ """
+ ),
+ },
},
"dhcpv6_stateful": {
"expected_eni": textwrap.dedent(
@@ -1730,6 +1966,29 @@ NETWORK_CONFIGS = {
"""
),
},
+ "expected_network_manager": {
+ "cloud-init-iface0.nmconnection": textwrap.dedent(
+ """\
+ # Generated by cloud-init. Changes will be lost.
+
+ [connection]
+ id=cloud-init iface0
+ uuid=8ddfba48-857c-5e86-ac09-1b43eae0bf70
+ type=ethernet
+ interface-name=iface0
+
+ [user]
+ org.freedesktop.NetworkManager.origin=cloud-init
+
+ [ethernet]
+
+ [ipv4]
+ method=auto
+ may-fail=false
+
+ """
+ ),
+ },
"yaml_v2": textwrap.dedent(
"""\
version: 2
@@ -1783,6 +2042,30 @@ NETWORK_CONFIGS = {
"""
),
},
+ "expected_network_manager": {
+ "cloud-init-iface0.nmconnection": textwrap.dedent(
+ """\
+ # Generated by cloud-init. Changes will be lost.
+
+ [connection]
+ id=cloud-init iface0
+ uuid=8ddfba48-857c-5e86-ac09-1b43eae0bf70
+ type=ethernet
+ interface-name=iface0
+
+ [user]
+ org.freedesktop.NetworkManager.origin=cloud-init
+
+ [ethernet]
+ wake-on-lan=64
+
+ [ipv4]
+ method=auto
+ may-fail=false
+
+ """
+ ),
+ },
"yaml_v2": textwrap.dedent(
"""\
version: 2
@@ -2231,6 +2514,254 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true
USERCTL=no"""
),
},
+ "expected_network_manager": {
+ "cloud-init-eth3.nmconnection": textwrap.dedent(
+ """\
+ # Generated by cloud-init. Changes will be lost.
+
+ [connection]
+ id=cloud-init eth3
+ uuid=b7e95dda-7746-5bf8-bf33-6e5f3c926790
+ type=ethernet
+ slave-type=bridge
+ master=dee46ce4-af7a-5e7c-aa08-b25533ae9213
+
+ [user]
+ org.freedesktop.NetworkManager.origin=cloud-init
+
+ [ethernet]
+ mac-address=66:BB:9F:2C:E8:80
+
+ """
+ ),
+ "cloud-init-eth5.nmconnection": textwrap.dedent(
+ """\
+ # Generated by cloud-init. Changes will be lost.
+
+ [connection]
+ id=cloud-init eth5
+ uuid=5fda13c7-9942-5e90-a41b-1d043bd725dc
+ type=ethernet
+
+ [user]
+ org.freedesktop.NetworkManager.origin=cloud-init
+
+ [ethernet]
+ mac-address=98:BB:9F:2C:E8:8A
+
+ [ipv4]
+ method=auto
+ may-fail=false
+
+ """
+ ),
+ "cloud-init-ib0.nmconnection": textwrap.dedent(
+ """\
+ # Generated by cloud-init. Changes will be lost.
+
+ [connection]
+ id=cloud-init ib0
+ uuid=11a1dda7-78b4-5529-beba-d9b5f549ad7b
+ type=infiniband
+
+ [user]
+ org.freedesktop.NetworkManager.origin=cloud-init
+
+ [infiniband]
+ transport-mode=datagram
+ mtu=9000
+ mac-address=A0:00:02:20:FE:80:00:00:00:00:00:00:EC:0D:9A:03:00:15:E2:C1
+
+ [ipv4]
+ method=manual
+ may-fail=false
+ address1=192.168.200.7/24
+
+ """
+ ),
+ "cloud-init-bond0.200.nmconnection": textwrap.dedent(
+ """\
+ # Generated by cloud-init. Changes will be lost.
+
+ [connection]
+ id=cloud-init bond0.200
+ uuid=88984a9c-ff22-5233-9267-86315e0acaa7
+ type=vlan
+ interface-name=bond0.200
+
+ [user]
+ org.freedesktop.NetworkManager.origin=cloud-init
+
+ [vlan]
+ id=200
+ parent=54317911-f840-516b-a10d-82cb4c1f075c
+
+ [ipv4]
+ method=auto
+ may-fail=false
+
+ """
+ ),
+ "cloud-init-eth0.nmconnection": textwrap.dedent(
+ """\
+ # Generated by cloud-init. Changes will be lost.
+
+ [connection]
+ id=cloud-init eth0
+ uuid=1dd9a779-d327-56e1-8454-c65e2556c12c
+ type=ethernet
+
+ [user]
+ org.freedesktop.NetworkManager.origin=cloud-init
+
+ [ethernet]
+ mac-address=C0:D6:9F:2C:E8:80
+
+ """
+ ),
+ "cloud-init-eth4.nmconnection": textwrap.dedent(
+ """\
+ # Generated by cloud-init. Changes will be lost.
+
+ [connection]
+ id=cloud-init eth4
+ uuid=e27e4959-fb50-5580-b9a4-2073554627b9
+ type=ethernet
+ slave-type=bridge
+ master=dee46ce4-af7a-5e7c-aa08-b25533ae9213
+
+ [user]
+ org.freedesktop.NetworkManager.origin=cloud-init
+
+ [ethernet]
+ mac-address=98:BB:9F:2C:E8:80
+
+ """
+ ),
+ "cloud-init-eth1.nmconnection": textwrap.dedent(
+ """\
+ # Generated by cloud-init. Changes will be lost.
+
+ [connection]
+ id=cloud-init eth1
+ uuid=3c50eb47-7260-5a6d-801d-bd4f587d6b58
+ type=ethernet
+ slave-type=bond
+ master=54317911-f840-516b-a10d-82cb4c1f075c
+
+ [user]
+ org.freedesktop.NetworkManager.origin=cloud-init
+
+ [ethernet]
+ mac-address=AA:D6:9F:2C:E8:80
+
+ """
+ ),
+ "cloud-init-br0.nmconnection": textwrap.dedent(
+ """\
+ # Generated by cloud-init. Changes will be lost.
+
+ [connection]
+ id=cloud-init br0
+ uuid=dee46ce4-af7a-5e7c-aa08-b25533ae9213
+ type=bridge
+ interface-name=br0
+
+ [user]
+ org.freedesktop.NetworkManager.origin=cloud-init
+
+ [bridge]
+ stp=false
+ priority=22
+ mac-address=BB:BB:BB:BB:BB:AA
+
+ [ipv4]
+ method=manual
+ may-fail=false
+ address1=192.168.14.2/24
+
+ [ipv6]
+ method=manual
+ may-fail=false
+ addr-gen-mode=stable-privacy
+ address1=2001:1::1/64
+ route1=::/0,2001:4800:78ff:1b::1
+
+ """
+ ),
+ "cloud-init-eth0.101.nmconnection": textwrap.dedent(
+ """\
+ # Generated by cloud-init. Changes will be lost.
+
+ [connection]
+ id=cloud-init eth0.101
+ uuid=b5acec5e-db80-5935-8b02-0d5619fc42bf
+ type=vlan
+ interface-name=eth0.101
+
+ [user]
+ org.freedesktop.NetworkManager.origin=cloud-init
+
+ [vlan]
+ id=101
+ parent=1dd9a779-d327-56e1-8454-c65e2556c12c
+
+ [ipv4]
+ method=manual
+ may-fail=false
+ address1=192.168.0.2/24
+ gateway=192.168.0.1
+ dns=192.168.0.10;10.23.23.134;
+ dns-search=barley.maas;sacchromyces.maas;brettanomyces.maas;
+ address2=192.168.2.10/24
+
+ """
+ ),
+ "cloud-init-bond0.nmconnection": textwrap.dedent(
+ """\
+ # Generated by cloud-init. Changes will be lost.
+
+ [connection]
+ id=cloud-init bond0
+ uuid=54317911-f840-516b-a10d-82cb4c1f075c
+ type=bond
+ interface-name=bond0
+
+ [user]
+ org.freedesktop.NetworkManager.origin=cloud-init
+
+ [bond]
+ mode=active-backup
+ miimon=100
+ xmit_hash_policy=layer3+4
+
+ [ipv6]
+ method=dhcp
+ may-fail=false
+ addr-gen-mode=stable-privacy
+
+ """
+ ),
+ "cloud-init-eth2.nmconnection": textwrap.dedent(
+ """\
+ # Generated by cloud-init. Changes will be lost.
+
+ [connection]
+ id=cloud-init eth2
+ uuid=5559a242-3421-5fdd-896e-9cb8313d5804
+ type=ethernet
+ slave-type=bond
+ master=54317911-f840-516b-a10d-82cb4c1f075c
+
+ [user]
+ org.freedesktop.NetworkManager.origin=cloud-init
+
+ [ethernet]
+ mac-address=C0:BB:9F:2C:E8:80
+
+ """
+ ),
+ },
"yaml": textwrap.dedent(
"""
version: 1
@@ -2419,11 +2950,11 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true
- type: static
address: 2001:1::1/92
routes:
- - gateway: 2001:67c:1562:1
- network: 2001:67c:1
+ - gateway: 2001:67c:1562::1
+ network: "2001:67c::"
netmask: "ffff:ffff::"
- - gateway: 3001:67c:1562:1
- network: 3001:67c:1
+ - gateway: 3001:67c:15::1
+ network: "3001:67c::"
netmask: "ffff:ffff::"
metric: 10000
"""
@@ -2466,11 +2997,11 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true
routes:
- to: 10.1.3.0/24
via: 192.168.0.3
- - to: 2001:67c:1/32
- via: 2001:67c:1562:1
+ - to: 2001:67c::/32
+ via: 2001:67c:1562::1
- metric: 10000
- to: 3001:67c:1/32
- via: 3001:67c:1562:1
+ to: 3001:67c::/32
+ via: 3001:67c:15::1
"""
),
"expected_eni": textwrap.dedent(
@@ -2530,11 +3061,11 @@ iface bond0 inet static
# control-alias bond0
iface bond0 inet6 static
address 2001:1::1/92
- post-up route add -A inet6 2001:67c:1/32 gw 2001:67c:1562:1 || true
- pre-down route del -A inet6 2001:67c:1/32 gw 2001:67c:1562:1 || true
- post-up route add -A inet6 3001:67c:1/32 gw 3001:67c:1562:1 metric 10000 \
+ post-up route add -A inet6 2001:67c::/32 gw 2001:67c:1562::1 || true
+ pre-down route del -A inet6 2001:67c::/32 gw 2001:67c:1562::1 || true
+ post-up route add -A inet6 3001:67c::/32 gw 3001:67c:15::1 metric 10000 \
|| true
- pre-down route del -A inet6 3001:67c:1/32 gw 3001:67c:1562:1 metric 10000 \
+ pre-down route del -A inet6 3001:67c::/32 gw 3001:67c:15::1 metric 10000 \
|| true
"""
),
@@ -2577,8 +3108,8 @@ iface bond0 inet6 static
- to: 2001:67c:1562:8007::1/64
via: 2001:67c:1562:8007::aac:40b2
- metric: 10000
- to: 3001:67c:1562:8007::1/64
- via: 3001:67c:1562:8007::aac:40b2
+ to: 3001:67c:15:8007::1/64
+ via: 3001:67c:15:8007::aac:40b2
"""
),
"expected_netplan-v2": textwrap.dedent(
@@ -2610,8 +3141,8 @@ iface bond0 inet6 static
- to: 2001:67c:1562:8007::1/64
via: 2001:67c:1562:8007::aac:40b2
- metric: 10000
- to: 3001:67c:1562:8007::1/64
- via: 3001:67c:1562:8007::aac:40b2
+ to: 3001:67c:15:8007::1/64
+ via: 3001:67c:15:8007::aac:40b2
ethernets:
eth0:
match:
@@ -2712,8 +3243,8 @@ iface bond0 inet6 static
"""\
# Created by cloud-init on instance boot automatically, do not edit.
#
- 2001:67c:1/32 via 2001:67c:1562:1 dev bond0
- 3001:67c:1/32 via 3001:67c:1562:1 metric 10000 dev bond0
+ 2001:67c::/32 via 2001:67c:1562::1 dev bond0
+ 3001:67c::/32 via 3001:67c:15::1 metric 10000 dev bond0
"""
),
"route-bond0": textwrap.dedent(
@@ -2737,6 +3268,88 @@ iface bond0 inet6 static
"""
),
},
+ "expected_network_manager": {
+ "cloud-init-bond0s0.nmconnection": textwrap.dedent(
+ """\
+ # Generated by cloud-init. Changes will be lost.
+
+ [connection]
+ id=cloud-init bond0s0
+ uuid=09d0b5b9-67e7-5577-a1af-74d1cf17a71e
+ type=ethernet
+ slave-type=bond
+ master=54317911-f840-516b-a10d-82cb4c1f075c
+
+ [user]
+ org.freedesktop.NetworkManager.origin=cloud-init
+
+ [ethernet]
+ mac-address=AA:BB:CC:DD:E8:00
+
+ """
+ ),
+ "cloud-init-bond0s1.nmconnection": textwrap.dedent(
+ """\
+ # Generated by cloud-init. Changes will be lost.
+
+ [connection]
+ id=cloud-init bond0s1
+ uuid=4d9aca96-b515-5630-ad83-d13daac7f9d0
+ type=ethernet
+ slave-type=bond
+ master=54317911-f840-516b-a10d-82cb4c1f075c
+
+ [user]
+ org.freedesktop.NetworkManager.origin=cloud-init
+
+ [ethernet]
+ mac-address=AA:BB:CC:DD:E8:01
+
+ """
+ ),
+ "cloud-init-bond0.nmconnection": textwrap.dedent(
+ """\
+ # Generated by cloud-init. Changes will be lost.
+
+ [connection]
+ id=cloud-init bond0
+ uuid=54317911-f840-516b-a10d-82cb4c1f075c
+ type=bond
+ interface-name=bond0
+
+ [user]
+ org.freedesktop.NetworkManager.origin=cloud-init
+
+ [bond]
+ mode=active-backup
+ miimon=100
+ xmit_hash_policy=layer3+4
+ num_grat_arp=5
+ downdelay=10
+ updelay=20
+ fail_over_mac=active
+ primary_reselect=always
+ primary=bond0s0
+
+ [ipv4]
+ method=manual
+ may-fail=false
+ address1=192.168.0.2/24
+ gateway=192.168.0.1
+ route1=10.1.3.0/24,192.168.0.3
+ address2=192.168.1.2/24
+
+ [ipv6]
+ method=manual
+ may-fail=false
+ addr-gen-mode=stable-privacy
+ address1=2001:1::1/92
+ route1=2001:67c::/32,2001:67c:1562::1
+ route2=3001:67c::/32,3001:67c:15::1
+
+ """
+ ),
+ },
},
"vlan": {
"yaml": textwrap.dedent(
@@ -2822,6 +3435,58 @@ iface bond0 inet6 static
VLAN=yes"""
),
},
+ "expected_network_manager": {
+ "cloud-init-en0.99.nmconnection": textwrap.dedent(
+ """\
+ # Generated by cloud-init. Changes will be lost.
+
+ [connection]
+ id=cloud-init en0.99
+ uuid=f594e2ed-f107-51df-b225-1dc530a5356b
+ type=vlan
+ interface-name=en0.99
+
+ [user]
+ org.freedesktop.NetworkManager.origin=cloud-init
+
+ [vlan]
+ id=99
+ parent=e0ca478b-8d84-52ab-8fae-628482c629b5
+
+ [ipv4]
+ method=manual
+ may-fail=false
+ address1=192.168.2.2/24
+ address2=192.168.1.2/24
+ gateway=192.168.1.1
+
+ [ipv6]
+ method=manual
+ may-fail=false
+ addr-gen-mode=stable-privacy
+ address1=2001:1::bbbb/96
+ route1=::/0,2001:1::1
+
+ """
+ ),
+ "cloud-init-en0.nmconnection": textwrap.dedent(
+ """\
+ # Generated by cloud-init. Changes will be lost.
+
+ [connection]
+ id=cloud-init en0
+ uuid=e0ca478b-8d84-52ab-8fae-628482c629b5
+ type=ethernet
+
+ [user]
+ org.freedesktop.NetworkManager.origin=cloud-init
+
+ [ethernet]
+ mac-address=AA:BB:CC:DD:E8:00
+
+ """
+ ),
+ },
},
"bridge": {
"yaml": textwrap.dedent(
@@ -2931,6 +3596,82 @@ iface bond0 inet6 static
"""
),
},
+ "expected_network_manager": {
+ "cloud-init-br0.nmconnection": textwrap.dedent(
+ """\
+ # Generated by cloud-init. Changes will be lost.
+
+ [connection]
+ id=cloud-init br0
+ uuid=dee46ce4-af7a-5e7c-aa08-b25533ae9213
+ type=bridge
+ interface-name=br0
+
+ [user]
+ org.freedesktop.NetworkManager.origin=cloud-init
+
+ [bridge]
+ stp=false
+ priority=22
+
+ [ipv4]
+ method=manual
+ may-fail=false
+ address1=192.168.2.2/24
+
+ """
+ ),
+ "cloud-init-eth0.nmconnection": textwrap.dedent(
+ """\
+ # Generated by cloud-init. Changes will be lost.
+
+ [connection]
+ id=cloud-init eth0
+ uuid=1dd9a779-d327-56e1-8454-c65e2556c12c
+ type=ethernet
+ slave-type=bridge
+ master=dee46ce4-af7a-5e7c-aa08-b25533ae9213
+
+ [user]
+ org.freedesktop.NetworkManager.origin=cloud-init
+
+ [ethernet]
+ mac-address=52:54:00:12:34:00
+
+ [ipv6]
+ method=manual
+ may-fail=false
+ addr-gen-mode=stable-privacy
+ address1=2001:1::100/96
+
+ """
+ ),
+ "cloud-init-eth1.nmconnection": textwrap.dedent(
+ """\
+ # Generated by cloud-init. Changes will be lost.
+
+ [connection]
+ id=cloud-init eth1
+ uuid=3c50eb47-7260-5a6d-801d-bd4f587d6b58
+ type=ethernet
+ slave-type=bridge
+ master=dee46ce4-af7a-5e7c-aa08-b25533ae9213
+
+ [user]
+ org.freedesktop.NetworkManager.origin=cloud-init
+
+ [ethernet]
+ mac-address=52:54:00:12:34:01
+
+ [ipv6]
+ method=manual
+ may-fail=false
+ addr-gen-mode=stable-privacy
+ address1=2001:1::101/96
+
+ """
+ ),
+ },
},
"manual": {
"yaml": textwrap.dedent(
@@ -3062,6 +3803,96 @@ iface bond0 inet6 static
"""
),
},
+ "expected_network_manager": {
+ "cloud-init-eth0.nmconnection": textwrap.dedent(
+ """\
+ # Generated by cloud-init. Changes will be lost.
+
+ [connection]
+ id=cloud-init eth0
+ uuid=1dd9a779-d327-56e1-8454-c65e2556c12c
+ type=ethernet
+
+ [user]
+ org.freedesktop.NetworkManager.origin=cloud-init
+
+ [ethernet]
+ mac-address=52:54:00:12:34:00
+
+ [ipv4]
+ method=manual
+ may-fail=false
+ address1=192.168.1.2/24
+
+ """
+ ),
+ "cloud-init-eth1.nmconnection": textwrap.dedent(
+ """\
+ # Generated by cloud-init. Changes will be lost.
+
+ [connection]
+ id=cloud-init eth1
+ uuid=3c50eb47-7260-5a6d-801d-bd4f587d6b58
+ type=ethernet
+
+ [user]
+ org.freedesktop.NetworkManager.origin=cloud-init
+
+ [ethernet]
+ mtu=1480
+ mac-address=52:54:00:12:34:AA
+
+ [ipv4]
+ method=auto
+ may-fail=true
+
+ """
+ ),
+ "cloud-init-eth2.nmconnection": textwrap.dedent(
+ """\
+ # Generated by cloud-init. Changes will be lost.
+
+ [connection]
+ id=cloud-init eth2
+ uuid=5559a242-3421-5fdd-896e-9cb8313d5804
+ type=ethernet
+
+ [user]
+ org.freedesktop.NetworkManager.origin=cloud-init
+
+ [ethernet]
+ mac-address=52:54:00:12:34:FF
+
+ [ipv4]
+ method=auto
+ may-fail=true
+
+ """
+ ),
+ },
+ },
+ "v2-dev-name-via-mac-lookup": {
+ "expected_sysconfig_rhel": {
+ "ifcfg-eth0": textwrap.dedent(
+ """\
+ BOOTPROTO=none
+ DEVICE=eth0
+ HWADDR=cf:d6:af:48:e8:80
+ NM_CONTROLLED=no
+ ONBOOT=yes
+ TYPE=Ethernet
+ USERCTL=no"""
+ ),
+ },
+ "yaml": textwrap.dedent(
+ """\
+ version: 2
+ ethernets:
+ nic0:
+ match:
+ macaddress: 'cf:d6:af:48:e8:80'
+ """
+ ),
},
}
@@ -3146,6 +3977,9 @@ DEFAULT_DEV_ATTRS = {
"device/driver": None,
"device/device": None,
"name_assign_type": "4",
+ "addr_assign_type": "0",
+ "uevent": "",
+ "type": "32",
}
}
@@ -3272,16 +4106,22 @@ class TestGenerateFallbackConfig(CiTestCase):
"device/driver": "hv_netsvc",
"device/device": "0x3",
"name_assign_type": "4",
+ "addr_assign_type": "0",
+ "uevent": "",
+ "type": "32",
},
"eth1": {
"bridge": False,
"carrier": False,
"dormant": False,
"operstate": "down",
- "address": "00:11:22:33:44:55",
+ "address": "00:11:22:33:44:56",
"device/driver": "mlx4_core",
"device/device": "0x7",
"name_assign_type": "4",
+ "addr_assign_type": "0",
+ "uevent": "",
+ "type": "32",
},
}
@@ -3350,16 +4190,22 @@ iface eth0 inet dhcp
"device/driver": "hv_netsvc",
"device/device": "0x3",
"name_assign_type": "4",
+ "addr_assign_type": "0",
+ "uevent": "",
+ "type": "32",
},
"eth0": {
"bridge": False,
"carrier": False,
"dormant": False,
"operstate": "down",
- "address": "00:11:22:33:44:55",
+ "address": "00:11:22:33:44:56",
"device/driver": "mlx4_core",
"device/device": "0x7",
"name_assign_type": "4",
+ "addr_assign_type": "0",
+ "uevent": "",
+ "type": "32",
},
}
@@ -3522,7 +4368,6 @@ class TestRhelSysConfigRendering(CiTestCase):
with_logs = True
- nm_cfg_file = "/etc/NetworkManager/NetworkManager.conf"
scripts_dir = "/etc/sysconfig/network-scripts"
header = (
"# Created by cloud-init on instance boot automatically, "
@@ -4100,78 +4945,6 @@ USERCTL=no
self._compare_files_to_expected(entry[self.expected_name], found)
self._assert_headers(found)
- def test_check_ifcfg_rh(self):
- """ifcfg-rh plugin is added NetworkManager.conf if conf present."""
- render_dir = self.tmp_dir()
- nm_cfg = subp.target_path(render_dir, path=self.nm_cfg_file)
- util.ensure_dir(os.path.dirname(nm_cfg))
-
- # write a template nm.conf, note plugins is a list here
- with open(nm_cfg, "w") as fh:
- fh.write("# test_check_ifcfg_rh\n[main]\nplugins=foo,bar\n")
- self.assertTrue(os.path.exists(nm_cfg))
-
- # render and read
- entry = NETWORK_CONFIGS["small"]
- found = self._render_and_read(
- network_config=yaml.load(entry["yaml"]), dir=render_dir
- )
- self._compare_files_to_expected(entry[self.expected_name], found)
- self._assert_headers(found)
-
- # check ifcfg-rh is in the 'plugins' list
- config = sysconfig.ConfigObj(nm_cfg)
- self.assertIn("ifcfg-rh", config["main"]["plugins"])
-
- def test_check_ifcfg_rh_plugins_string(self):
- """ifcfg-rh plugin is append when plugins is a string."""
- render_dir = self.tmp_path("render")
- os.makedirs(render_dir)
- nm_cfg = subp.target_path(render_dir, path=self.nm_cfg_file)
- util.ensure_dir(os.path.dirname(nm_cfg))
-
- # write a template nm.conf, note plugins is a value here
- util.write_file(nm_cfg, "# test_check_ifcfg_rh\n[main]\nplugins=foo\n")
-
- # render and read
- entry = NETWORK_CONFIGS["small"]
- found = self._render_and_read(
- network_config=yaml.load(entry["yaml"]), dir=render_dir
- )
- self._compare_files_to_expected(entry[self.expected_name], found)
- self._assert_headers(found)
-
- # check raw content has plugin
- nm_file_content = util.load_file(nm_cfg)
- self.assertIn("ifcfg-rh", nm_file_content)
-
- # check ifcfg-rh is in the 'plugins' list
- config = sysconfig.ConfigObj(nm_cfg)
- self.assertIn("ifcfg-rh", config["main"]["plugins"])
-
- def test_check_ifcfg_rh_plugins_no_plugins(self):
- """enable_ifcfg_plugin creates plugins value if missing."""
- render_dir = self.tmp_path("render")
- os.makedirs(render_dir)
- nm_cfg = subp.target_path(render_dir, path=self.nm_cfg_file)
- util.ensure_dir(os.path.dirname(nm_cfg))
-
- # write a template nm.conf, note plugins is missing
- util.write_file(nm_cfg, "# test_check_ifcfg_rh\n[main]\n")
- self.assertTrue(os.path.exists(nm_cfg))
-
- # render and read
- entry = NETWORK_CONFIGS["small"]
- found = self._render_and_read(
- network_config=yaml.load(entry["yaml"]), dir=render_dir
- )
- self._compare_files_to_expected(entry[self.expected_name], found)
- self._assert_headers(found)
-
- # check ifcfg-rh is in the 'plugins' list
- config = sysconfig.ConfigObj(nm_cfg)
- self.assertIn("ifcfg-rh", config["main"]["plugins"])
-
def test_netplan_dhcp_false_disable_dhcp_in_state(self):
"""netplan config with dhcp[46]: False should not add dhcp in state"""
net_config = yaml.load(NETPLAN_DHCP_FALSE)
@@ -4364,6 +5137,147 @@ USERCTL=no
expected, self._render_and_read(network_config=v2data)
)
+ def test_from_v2_routes(self):
+ """verify routes (including IPv6) get rendered using v2 config.
+
+ LP: #1958506
+ """
+ v2_data = {
+ "version": 2,
+ "ethernets": {
+ "eth0": {
+ "addresses": [
+ "10.54.2.19/21",
+ "2a00:1730:fff9:100::52/128",
+ ],
+ "gateway4": "10.54.0.1",
+ "gateway6": "2a00:1730:fff9:100::1",
+ "match": {"macaddress": "52:54:00:3f:fc:f7"},
+ "mtu": 1400,
+ "nameservers": {
+ "addresses": [
+ "10.52.1.1",
+ "10.52.1.71",
+ "2001:4860:4860::8888",
+ "2001:4860:4860::8844",
+ ]
+ },
+ "routes": [
+ {
+ "scope": "link",
+ "to": "10.54.0.1/32",
+ "via": "0.0.0.0",
+ },
+ {
+ "scope": "link",
+ "to": "0.0.0.0/0",
+ "via": "10.54.0.1",
+ },
+ {
+ "scope": "link",
+ "to": "2a00:1730:fff9:100::1/128",
+ "via": "::0",
+ },
+ {
+ "scope": "link",
+ "to": "::0/0",
+ "via": "2a00:1730:fff9:100::1",
+ },
+ ],
+ "set-name": "eth0",
+ }
+ },
+ }
+
+ expected = {
+ "ifcfg-eth0": textwrap.dedent(
+ """\
+ # Created by cloud-init on instance boot automatically, do not edit.
+ #
+ BOOTPROTO=none
+ DEFROUTE=yes
+ DEVICE=eth0
+ DNS1=10.52.1.1
+ DNS2=10.52.1.71
+ DNS3=2001:4860:4860::8888
+ GATEWAY=10.54.0.1
+ HWADDR=52:54:00:3f:fc:f7
+ IPADDR=10.54.2.19
+ IPV6ADDR=2a00:1730:fff9:100::52/128
+ IPV6INIT=yes
+ IPV6_AUTOCONF=no
+ IPV6_DEFAULTGW=2a00:1730:fff9:100::1
+ IPV6_FORCE_ACCEPT_RA=no
+ MTU=1400
+ NETMASK=255.255.248.0
+ NM_CONTROLLED=no
+ ONBOOT=yes
+ TYPE=Ethernet
+ USERCTL=no
+ """ # noqa: E501
+ ),
+ "route-eth0": textwrap.dedent(
+ """\
+ # Created by cloud-init on instance boot automatically, do not edit.
+ #
+ ADDRESS0=10.54.0.1
+ GATEWAY0=0.0.0.0
+ NETMASK0=255.255.255.255
+ """ # noqa: E501
+ ),
+ "route6-eth0": textwrap.dedent(
+ """\
+ # Created by cloud-init on instance boot automatically, do not edit.
+ #
+ 2a00:1730:fff9:100::1/128 via ::0 dev eth0
+ ::0/64 via 2a00:1730:fff9:100::1 dev eth0
+ """ # noqa: E501
+ ),
+ }
+
+ found = self._render_and_read(network_config=v2_data)
+ self._compare_files_to_expected(expected, found)
+ self._assert_headers(found)
+
+ @mock.patch("cloudinit.net.sys_dev_path")
+ @mock.patch("cloudinit.net.read_sys_net")
+ @mock.patch("cloudinit.net.get_devicelist")
+ def test_iface_name_from_device_with_matching_mac_address(
+ self,
+ mock_get_devicelist,
+ mock_read_sys_net,
+ mock_sys_dev_path,
+ ):
+ devices = {
+ "eth0": {
+ "bridge": False,
+ "carrier": False,
+ "dormant": False,
+ "operstate": "down",
+ "address": "CF:D6:AF:48:E8:80",
+ "device/driver": "hv_netsvc",
+ "device/device": "0x3",
+ "name_assign_type": "4",
+ "addr_assign_type": "0",
+ "uevent": "",
+ "type": "32",
+ },
+ }
+
+ tmp_dir = self.tmp_dir()
+ _setup_test(
+ tmp_dir,
+ mock_get_devicelist,
+ mock_read_sys_net,
+ mock_sys_dev_path,
+ dev_attrs=devices,
+ )
+
+ entry = NETWORK_CONFIGS["v2-dev-name-via-mac-lookup"]
+ found = self._render_and_read(network_config=yaml.load(entry["yaml"]))
+ self._compare_files_to_expected(entry[self.expected_name], found)
+ self._assert_headers(found)
+
@mock.patch(
"cloudinit.net.is_openvswitch_internal_interface",
@@ -4727,6 +5641,281 @@ STARTMODE=auto
self._assert_headers(found)
+@mock.patch(
+ "cloudinit.net.is_openvswitch_internal_interface",
+ mock.Mock(return_value=False),
+)
+class TestNetworkManagerRendering(CiTestCase):
+
+ with_logs = True
+
+ scripts_dir = "/etc/NetworkManager/system-connections"
+
+ expected_name = "expected_network_manager"
+
+ def _get_renderer(self):
+ return network_manager.Renderer()
+
+ def _render_and_read(self, network_config=None, state=None, dir=None):
+ if dir is None:
+ dir = self.tmp_dir()
+
+ if network_config:
+ ns = network_state.parse_net_config_data(network_config)
+ elif state:
+ ns = state
+ else:
+ raise ValueError("Expected data or state, got neither")
+
+ renderer = self._get_renderer()
+ renderer.render_network_state(ns, target=dir)
+ return dir2dict(dir)
+
+ def _compare_files_to_expected(self, expected, found):
+ orig_maxdiff = self.maxDiff
+ expected_d = dict(
+ (os.path.join(self.scripts_dir, k), v) for k, v in expected.items()
+ )
+
+ try:
+ self.maxDiff = None
+ self.assertEqual(expected_d, found)
+ finally:
+ self.maxDiff = orig_maxdiff
+
+ @mock.patch("cloudinit.net.util.get_cmdline", return_value="root=myroot")
+ @mock.patch("cloudinit.net.sys_dev_path")
+ @mock.patch("cloudinit.net.read_sys_net")
+ @mock.patch("cloudinit.net.get_devicelist")
+ def test_default_generation(
+ self,
+ mock_get_devicelist,
+ mock_read_sys_net,
+ mock_sys_dev_path,
+ m_get_cmdline,
+ ):
+ tmp_dir = self.tmp_dir()
+ _setup_test(
+ tmp_dir, mock_get_devicelist, mock_read_sys_net, mock_sys_dev_path
+ )
+
+ network_cfg = net.generate_fallback_config()
+ ns = network_state.parse_net_config_data(
+ network_cfg, skip_broken=False
+ )
+
+ render_dir = os.path.join(tmp_dir, "render")
+ os.makedirs(render_dir)
+
+ renderer = self._get_renderer()
+ renderer.render_network_state(ns, target=render_dir)
+
+ found = dir2dict(render_dir)
+ self._compare_files_to_expected(
+ {
+ "cloud-init-eth1000.nmconnection": textwrap.dedent(
+ """\
+ # Generated by cloud-init. Changes will be lost.
+
+ [connection]
+ id=cloud-init eth1000
+ uuid=8c517500-0c95-5308-9c8a-3092eebc44eb
+ type=ethernet
+
+ [user]
+ org.freedesktop.NetworkManager.origin=cloud-init
+
+ [ethernet]
+ mac-address=07:1C:C6:75:A4:BE
+
+ [ipv4]
+ method=auto
+ may-fail=false
+
+ """
+ ),
+ },
+ found,
+ )
+
+ def test_openstack_rendering_samples(self):
+ for os_sample in OS_SAMPLES:
+ render_dir = self.tmp_dir()
+ ex_input = os_sample["in_data"]
+ ex_mac_addrs = os_sample["in_macs"]
+ network_cfg = openstack.convert_net_json(
+ ex_input, known_macs=ex_mac_addrs
+ )
+ ns = network_state.parse_net_config_data(
+ network_cfg, skip_broken=False
+ )
+ renderer = self._get_renderer()
+ # render a multiple times to simulate reboots
+ renderer.render_network_state(ns, target=render_dir)
+ renderer.render_network_state(ns, target=render_dir)
+ renderer.render_network_state(ns, target=render_dir)
+ for fn, expected_content in os_sample.get(self.expected_name, []):
+ with open(os.path.join(render_dir, fn)) as fh:
+ self.assertEqual(expected_content, fh.read())
+
+ def test_network_config_v1_samples(self):
+ ns = network_state.parse_net_config_data(CONFIG_V1_SIMPLE_SUBNET)
+ render_dir = self.tmp_path("render")
+ os.makedirs(render_dir)
+ renderer = self._get_renderer()
+ renderer.render_network_state(ns, target=render_dir)
+ found = dir2dict(render_dir)
+ self._compare_files_to_expected(
+ {
+ "cloud-init-interface0.nmconnection": textwrap.dedent(
+ """\
+ # Generated by cloud-init. Changes will be lost.
+
+ [connection]
+ id=cloud-init interface0
+ uuid=8b6862ed-dbd6-5830-93f7-a91451c13828
+ type=ethernet
+
+ [user]
+ org.freedesktop.NetworkManager.origin=cloud-init
+
+ [ethernet]
+ mac-address=52:54:00:12:34:00
+
+ [ipv4]
+ method=manual
+ may-fail=false
+ address1=10.0.2.15/24
+ gateway=10.0.2.2
+
+ """
+ ),
+ },
+ found,
+ )
+
+ def test_config_with_explicit_loopback(self):
+ render_dir = self.tmp_path("render")
+ os.makedirs(render_dir)
+ ns = network_state.parse_net_config_data(CONFIG_V1_EXPLICIT_LOOPBACK)
+ renderer = self._get_renderer()
+ renderer.render_network_state(ns, target=render_dir)
+ found = dir2dict(render_dir)
+ self._compare_files_to_expected(
+ {
+ "cloud-init-eth0.nmconnection": textwrap.dedent(
+ """\
+ # Generated by cloud-init. Changes will be lost.
+
+ [connection]
+ id=cloud-init eth0
+ uuid=1dd9a779-d327-56e1-8454-c65e2556c12c
+ type=ethernet
+ interface-name=eth0
+
+ [user]
+ org.freedesktop.NetworkManager.origin=cloud-init
+
+ [ethernet]
+
+ [ipv4]
+ method=auto
+ may-fail=false
+
+ """
+ ),
+ },
+ found,
+ )
+
+ def test_bond_config(self):
+ entry = NETWORK_CONFIGS["bond"]
+ found = self._render_and_read(network_config=yaml.load(entry["yaml"]))
+ self._compare_files_to_expected(entry[self.expected_name], found)
+
+ def test_vlan_config(self):
+ entry = NETWORK_CONFIGS["vlan"]
+ found = self._render_and_read(network_config=yaml.load(entry["yaml"]))
+ self._compare_files_to_expected(entry[self.expected_name], found)
+
+ def test_bridge_config(self):
+ entry = NETWORK_CONFIGS["bridge"]
+ found = self._render_and_read(network_config=yaml.load(entry["yaml"]))
+ self._compare_files_to_expected(entry[self.expected_name], found)
+
+ def test_manual_config(self):
+ entry = NETWORK_CONFIGS["manual"]
+ found = self._render_and_read(network_config=yaml.load(entry["yaml"]))
+ self._compare_files_to_expected(entry[self.expected_name], found)
+
+ def test_all_config(self):
+ entry = NETWORK_CONFIGS["all"]
+ found = self._render_and_read(network_config=yaml.load(entry["yaml"]))
+ self._compare_files_to_expected(entry[self.expected_name], found)
+ self.assertNotIn(
+ "WARNING: Network config: ignoring eth0.101 device-level mtu",
+ self.logs.getvalue(),
+ )
+
+ def test_small_config(self):
+ entry = NETWORK_CONFIGS["small"]
+ found = self._render_and_read(network_config=yaml.load(entry["yaml"]))
+ self._compare_files_to_expected(entry[self.expected_name], found)
+
+ def test_v4_and_v6_static_config(self):
+ entry = NETWORK_CONFIGS["v4_and_v6_static"]
+ found = self._render_and_read(network_config=yaml.load(entry["yaml"]))
+ self._compare_files_to_expected(entry[self.expected_name], found)
+ expected_msg = (
+ "WARNING: Network config: ignoring iface0 device-level mtu:8999"
+ " because ipv4 subnet-level mtu:9000 provided."
+ )
+ self.assertIn(expected_msg, self.logs.getvalue())
+
+ def test_dhcpv6_only_config(self):
+ entry = NETWORK_CONFIGS["dhcpv6_only"]
+ found = self._render_and_read(network_config=yaml.load(entry["yaml"]))
+ self._compare_files_to_expected(entry[self.expected_name], found)
+
+ def test_simple_render_ipv6_slaac(self):
+ entry = NETWORK_CONFIGS["ipv6_slaac"]
+ found = self._render_and_read(network_config=yaml.load(entry["yaml"]))
+ self._compare_files_to_expected(entry[self.expected_name], found)
+
+ def test_dhcpv6_stateless_config(self):
+ entry = NETWORK_CONFIGS["dhcpv6_stateless"]
+ found = self._render_and_read(network_config=yaml.load(entry["yaml"]))
+ self._compare_files_to_expected(entry[self.expected_name], found)
+
+ def test_wakeonlan_disabled_config_v2(self):
+ entry = NETWORK_CONFIGS["wakeonlan_disabled"]
+ found = self._render_and_read(
+ network_config=yaml.load(entry["yaml_v2"])
+ )
+ self._compare_files_to_expected(entry[self.expected_name], found)
+
+ def test_wakeonlan_enabled_config_v2(self):
+ entry = NETWORK_CONFIGS["wakeonlan_enabled"]
+ found = self._render_and_read(
+ network_config=yaml.load(entry["yaml_v2"])
+ )
+ self._compare_files_to_expected(entry[self.expected_name], found)
+
+ def test_render_v4_and_v6(self):
+ entry = NETWORK_CONFIGS["v4_and_v6"]
+ found = self._render_and_read(network_config=yaml.load(entry["yaml"]))
+ self._compare_files_to_expected(entry[self.expected_name], found)
+
+ def test_render_v6_and_v4(self):
+ entry = NETWORK_CONFIGS["v6_and_v4"]
+ found = self._render_and_read(network_config=yaml.load(entry["yaml"]))
+ self._compare_files_to_expected(entry[self.expected_name], found)
+
+
+@mock.patch(
+ "cloudinit.net.is_openvswitch_internal_interface",
+ mock.Mock(return_value=False),
+)
class TestEniNetRendering(CiTestCase):
@mock.patch("cloudinit.net.util.get_cmdline", return_value="root=myroot")
@mock.patch("cloudinit.net.sys_dev_path")
@@ -6164,9 +7353,9 @@ class TestNetworkdRoundTrip(CiTestCase):
class TestRenderersSelect:
@pytest.mark.parametrize(
- "renderer_selected,netplan,eni,nm,scfg,sys,networkd",
+ "renderer_selected,netplan,eni,sys,network_manager,networkd",
(
- # -netplan -ifupdown -nm -scfg -sys raises error
+ # -netplan -ifupdown -sys -network-manager -networkd raises error
(
net.RendererNotFoundError,
False,
@@ -6174,52 +7363,51 @@ class TestRenderersSelect:
False,
False,
False,
- False,
),
- # -netplan +ifupdown -nm -scfg -sys selects eni
- ("eni", False, True, False, False, False, False),
- # +netplan +ifupdown -nm -scfg -sys selects eni
- ("eni", True, True, False, False, False, False),
- # +netplan -ifupdown -nm -scfg -sys selects netplan
- ("netplan", True, False, False, False, False, False),
- # Ubuntu with Network-Manager installed
- # +netplan -ifupdown +nm -scfg -sys selects netplan
- ("netplan", True, False, True, False, False, False),
- # Centos/OpenSuse with Network-Manager installed selects sysconfig
- # -netplan -ifupdown +nm -scfg +sys selects netplan
- ("sysconfig", False, False, True, False, True, False),
- # -netplan -ifupdown -nm -scfg -sys +networkd selects networkd
- ("networkd", False, False, False, False, False, True),
+ # -netplan +ifupdown -sys -nm -networkd selects eni
+ ("eni", False, True, False, False, False),
+ # +netplan +ifupdown -sys -nm -networkd selects eni
+ ("eni", True, True, False, False, False),
+ # +netplan -ifupdown -sys -nm -networkd selects netplan
+ ("netplan", True, False, False, False, False),
+ # +netplan -ifupdown -sys -nm -networkd selects netplan
+ ("netplan", True, False, False, False, False),
+ # -netplan -ifupdown +sys -nm -networkd selects sysconfig
+ ("sysconfig", False, False, True, False, False),
+ # -netplan -ifupdown +sys +nm -networkd selects sysconfig
+ ("sysconfig", False, False, True, True, False),
+ # -netplan -ifupdown -sys +nm -networkd selects nm
+ ("network-manager", False, False, False, True, False),
+ # -netplan -ifupdown -sys +nm +networkd selects nm
+ ("network-manager", False, False, False, True, True),
+ # -netplan -ifupdown -sys -nm +networkd selects networkd
+ ("networkd", False, False, False, False, True),
),
)
@mock.patch("cloudinit.net.renderers.networkd.available")
+ @mock.patch("cloudinit.net.renderers.network_manager.available")
@mock.patch("cloudinit.net.renderers.netplan.available")
@mock.patch("cloudinit.net.renderers.sysconfig.available")
- @mock.patch("cloudinit.net.renderers.sysconfig.available_sysconfig")
- @mock.patch("cloudinit.net.renderers.sysconfig.available_nm")
@mock.patch("cloudinit.net.renderers.eni.available")
def test_valid_renderer_from_defaults_depending_on_availability(
self,
m_eni_avail,
- m_nm_avail,
- m_scfg_avail,
m_sys_avail,
m_netplan_avail,
+ m_network_manager_avail,
m_networkd_avail,
renderer_selected,
netplan,
eni,
- nm,
- scfg,
sys,
+ network_manager,
networkd,
):
"""Assert proper renderer per DEFAULT_PRIORITY given availability."""
m_eni_avail.return_value = eni # ifupdown pkg presence
- m_nm_avail.return_value = nm # network-manager presence
- m_scfg_avail.return_value = scfg # sysconfig presence
m_sys_avail.return_value = sys # sysconfig/ifup/down presence
m_netplan_avail.return_value = netplan # netplan presence
+ m_network_manager_avail.return_value = network_manager # NM presence
m_networkd_avail.return_value = networkd # networkd presence
if isinstance(renderer_selected, str):
(renderer_name, _rnd_class) = renderers.select(
@@ -6277,7 +7465,7 @@ class TestNetRenderers(CiTestCase):
priority=["sysconfig", "eni"],
)
- @mock.patch("cloudinit.net.sysconfig.available_sysconfig")
+ @mock.patch("cloudinit.net.sysconfig.available")
@mock.patch("cloudinit.util.system_info")
def test_sysconfig_available_uses_variant_mapping(self, m_info, m_avail):
m_avail.return_value = True
@@ -6368,6 +7556,12 @@ class TestGetInterfaces(CiTestCase):
def _se_interface_has_own_mac(self, name):
return name in self.data["own_macs"]
+ def _se_is_bond(self, name):
+ return name in self.data["bonds"]
+
+ def _se_is_netfailover(self, name):
+ return False
+
def _mock_setup(self):
self.data = copy.deepcopy(self._data)
self.data["devices"] = set(list(self.data["macs"].keys()))
@@ -6379,6 +7573,8 @@ class TestGetInterfaces(CiTestCase):
"is_vlan",
"device_driver",
"device_devid",
+ "is_bond",
+ "is_netfailover",
)
self.mocks = {}
for n in mocks:
@@ -7068,7 +8264,7 @@ class TestRenameInterfaces(CiTestCase):
class TestNetworkState(CiTestCase):
def test_bcast_addr(self):
"""Test mask_and_ipv4_to_bcast_addr proper execution."""
- bcast_addr = network_state.mask_and_ipv4_to_bcast_addr
+ bcast_addr = mask_and_ipv4_to_bcast_addr
self.assertEqual(
"192.168.1.255", bcast_addr("255.255.255.0", "192.168.1.1")
)
diff --git a/tests/unittests/test_net_activators.py b/tests/unittests/test_net_activators.py
index 3c29e2f7..9eec74c9 100644
--- a/tests/unittests/test_net_activators.py
+++ b/tests/unittests/test_net_activators.py
@@ -1,4 +1,5 @@
from collections import namedtuple
+from contextlib import ExitStack
from unittest.mock import patch
import pytest
@@ -41,18 +42,40 @@ NETPLAN_CALL_LIST = [
@pytest.fixture
def available_mocks():
- mocks = namedtuple("Mocks", "m_which, m_file")
- with patch("cloudinit.subp.which", return_value=True) as m_which:
- with patch("os.path.isfile", return_value=True) as m_file:
- yield mocks(m_which, m_file)
+ mocks = namedtuple("Mocks", "m_which, m_file, m_exists")
+ with ExitStack() as mocks_context:
+ mocks_context.enter_context(
+ patch("cloudinit.distros.uses_systemd", return_value=False)
+ )
+ m_which = mocks_context.enter_context(
+ patch("cloudinit.subp.which", return_value=True)
+ )
+ m_file = mocks_context.enter_context(
+ patch("os.path.isfile", return_value=True)
+ )
+ m_exists = mocks_context.enter_context(
+ patch("os.path.exists", return_value=True)
+ )
+ yield mocks(m_which, m_file, m_exists)
@pytest.fixture
def unavailable_mocks():
- mocks = namedtuple("Mocks", "m_which, m_file")
- with patch("cloudinit.subp.which", return_value=False) as m_which:
- with patch("os.path.isfile", return_value=False) as m_file:
- yield mocks(m_which, m_file)
+ mocks = namedtuple("Mocks", "m_which, m_file, m_exists")
+ with ExitStack() as mocks_context:
+ mocks_context.enter_context(
+ patch("cloudinit.distros.uses_systemd", return_value=False)
+ )
+ m_which = mocks_context.enter_context(
+ patch("cloudinit.subp.which", return_value=False)
+ )
+ m_file = mocks_context.enter_context(
+ patch("os.path.isfile", return_value=False)
+ )
+ m_exists = mocks_context.enter_context(
+ patch("os.path.exists", return_value=False)
+ )
+ yield mocks(m_which, m_file, m_exists)
class TestSearchAndSelect:
@@ -113,10 +136,6 @@ NETPLAN_AVAILABLE_CALLS = [
(("netplan",), {"search": ["/usr/sbin", "/sbin"], "target": None}),
]
-NETWORK_MANAGER_AVAILABLE_CALLS = [
- (("nmcli",), {"target": None}),
-]
-
NETWORKD_AVAILABLE_CALLS = [
(("ip",), {"search": ["/usr/sbin", "/bin"], "target": None}),
(("systemctl",), {"search": ["/usr/sbin", "/bin"], "target": None}),
@@ -128,7 +147,6 @@ NETWORKD_AVAILABLE_CALLS = [
[
(IfUpDownActivator, IF_UP_DOWN_AVAILABLE_CALLS),
(NetplanActivator, NETPLAN_AVAILABLE_CALLS),
- (NetworkManagerActivator, NETWORK_MANAGER_AVAILABLE_CALLS),
(NetworkdActivator, NETWORKD_AVAILABLE_CALLS),
],
)
@@ -144,8 +162,72 @@ IF_UP_DOWN_BRING_UP_CALL_LIST = [
]
NETWORK_MANAGER_BRING_UP_CALL_LIST = [
- ((["nmcli", "connection", "up", "ifname", "eth0"],), {}),
- ((["nmcli", "connection", "up", "ifname", "eth1"],), {}),
+ (
+ (
+ [
+ "nmcli",
+ "connection",
+ "load",
+ "".join(
+ [
+ "/etc/NetworkManager/system-connections",
+ "/cloud-init-eth0.nmconnection",
+ ]
+ ),
+ ],
+ ),
+ {},
+ ),
+ (
+ (
+ [
+ "nmcli",
+ "connection",
+ "up",
+ "filename",
+ "".join(
+ [
+ "/etc/NetworkManager/system-connections",
+ "/cloud-init-eth0.nmconnection",
+ ]
+ ),
+ ],
+ ),
+ {},
+ ),
+ (
+ (
+ [
+ "nmcli",
+ "connection",
+ "load",
+ "".join(
+ [
+ "/etc/NetworkManager/system-connections",
+ "/cloud-init-eth1.nmconnection",
+ ]
+ ),
+ ],
+ ),
+ {},
+ ),
+ (
+ (
+ [
+ "nmcli",
+ "connection",
+ "up",
+ "filename",
+ "".join(
+ [
+ "/etc/NetworkManager/system-connections",
+ "/cloud-init-eth1.nmconnection",
+ ]
+ ),
+ ],
+ ),
+ {},
+ ),
]
NETWORKD_BRING_UP_CALL_LIST = [
@@ -169,9 +251,11 @@ class TestActivatorsBringUp:
def test_bring_up_interface(
self, m_subp, activator, expected_call_list, available_mocks
):
+ index = 0
activator.bring_up_interface("eth0")
- assert len(m_subp.call_args_list) == 1
- assert m_subp.call_args_list[0] == expected_call_list[0]
+ for call in m_subp.call_args_list:
+ assert call == expected_call_list[index]
+ index += 1
@patch("cloudinit.subp.subp", return_value=("", ""))
def test_bring_up_interfaces(
@@ -208,8 +292,8 @@ IF_UP_DOWN_BRING_DOWN_CALL_LIST = [
]
NETWORK_MANAGER_BRING_DOWN_CALL_LIST = [
- ((["nmcli", "connection", "down", "eth0"],), {}),
- ((["nmcli", "connection", "down", "eth1"],), {}),
+ ((["nmcli", "device", "disconnect", "eth0"],), {}),
+ ((["nmcli", "device", "disconnect", "eth1"],), {}),
]
NETWORKD_BRING_DOWN_CALL_LIST = [
diff --git a/tests/unittests/test_net_freebsd.py b/tests/unittests/test_net_freebsd.py
index 3facb2bb..1288c259 100644
--- a/tests/unittests/test_net_freebsd.py
+++ b/tests/unittests/test_net_freebsd.py
@@ -60,9 +60,11 @@ class TestFreeBSDRoundTrip(CiTestCase):
renderer.render_network_state(ns, target=target)
return dir2dict(target)
- @mock.patch("cloudinit.subp.subp")
- def test_render_output_has_yaml(self, mock_subp):
-
+ @mock.patch(
+ "cloudinit.subp.subp", return_value=(SAMPLE_FREEBSD_IFCONFIG_OUT, 0)
+ )
+ @mock.patch("cloudinit.util.is_FreeBSD", return_value=True)
+ def test_render_output_has_yaml(self, m_is_freebsd, m_subp):
entry = {
"yaml": V1,
}
diff --git a/tests/unittests/test_render_cloudcfg.py b/tests/unittests/test_render_cloudcfg.py
index 30fbd1a4..9f95d448 100644
--- a/tests/unittests/test_render_cloudcfg.py
+++ b/tests/unittests/test_render_cloudcfg.py
@@ -68,6 +68,7 @@ class TestRenderCloudCfg:
default_user_exceptions = {
"amazon": "ec2-user",
"debian": "ubuntu",
+ "rhel": "cloud-user",
"unknown": "ubuntu",
}
default_user = system_cfg["system_info"]["default_user"]["name"]
diff --git a/tests/unittests/test_safeyaml.py b/tests/unittests/test_safeyaml.py
new file mode 100644
index 00000000..5be09b21
--- /dev/null
+++ b/tests/unittests/test_safeyaml.py
@@ -0,0 +1,60 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+"""Tests for cloudinit.safeyaml."""
+
+import pytest
+
+from cloudinit.safeyaml import load_with_marks
+
+
+class TestLoadWithMarks:
+ @pytest.mark.parametrize(
+ "source_yaml,loaded_yaml,schemamarks",
+ (
+ # Invalid cloud-config, non-dict types don't cause an error
+ (b"scalar", "scalar", {}),
+ # Multiple keys account for comments and whitespace lines
+ (
+ b"#\na: va\n \nb: vb\n#\nc: vc",
+ {"a": "va", "b": "vb", "c": "vc"},
+ {"a": 2, "b": 4, "c": 6},
+ ),
+ # List items represented on correct line number
+ (
+ b"a:\n - a1\n\n - a2\n",
+ {"a": ["a1", "a2"]},
+ {"a": 1, "a.0": 2, "a.1": 4},
+ ),
+ # Nested dicts represented on correct line number
+ (
+ b"a:\n a1:\n\n aa1: aa1v\n",
+ {"a": {"a1": {"aa1": "aa1v"}}},
+ {"a": 1, "a.a1": 2, "a.a1.aa1": 4},
+ ),
+ (b"[list, of, scalar]", ["list", "of", "scalar"], {}),
+ (
+ b"{a: [a1, a2], b: [b3]}",
+ {"a": ["a1", "a2"], "b": ["b3"]},
+ {"a": 1, "a.0": 1, "a.1": 1, "b": 1},
+ ),
+ (
+ b"a: [a1, a2]\nb: [b3]",
+ {"a": ["a1", "a2"], "b": ["b3"]},
+ {"a": 1, "a.0": 1, "a.1": 1, "b": 2, "b.0": 2},
+ ),
+ (
+ b"a:\n- a1\n- a2\nb: [b3]",
+ {"a": ["a1", "a2"], "b": ["b3"]},
+ {"a": 1, "a.0": 2, "a.1": 3, "b": 4, "b.0": 4},
+ ),
+ (
+ b"a:\n- a1\n- a2\nb:\n- b3",
+ {"a": ["a1", "a2"], "b": ["b3"]},
+ {"a": 1, "a.0": 2, "a.1": 3, "b": 4, "b.0": 5},
+ ),
+ ),
+ )
+ def test_schema_marks_preserved(
+ self, source_yaml, loaded_yaml, schemamarks
+ ):
+ assert (loaded_yaml, schemamarks) == load_with_marks(source_yaml)
diff --git a/tests/unittests/test_sshutil.py b/tests/unittests/test_sshutil.py
index d614350e..3328b8f4 100644
--- a/tests/unittests/test_sshutil.py
+++ b/tests/unittests/test_sshutil.py
@@ -6,6 +6,7 @@ from functools import partial
from unittest.mock import patch
from cloudinit import ssh_util, util
+from cloudinit.temp_utils import mkdtemp
from tests.unittests import helpers as test_helpers
# https://stackoverflow.com/questions/11351032/
@@ -691,6 +692,8 @@ class TestBasicAuthorizedKeyParse(test_helpers.CiTestCase):
class TestMultipleSshAuthorizedKeysFile(test_helpers.CiTestCase):
+ tmp_d = mkdtemp()
+
def create_fake_users(
self,
names,
@@ -703,12 +706,12 @@ class TestMultipleSshAuthorizedKeysFile(test_helpers.CiTestCase):
):
homes = []
- root = "/tmp/root"
+ root = self.tmp_d + "/root"
fpw = FakePwEnt(pw_name="root", pw_dir=root)
users["root"] = fpw
for name in names:
- home = "/tmp/home/" + name
+ home = self.tmp_d + "/home/" + name
fpw = FakePwEnt(pw_name=name, pw_dir=home)
users[name] = fpw
homes.append(home)
@@ -730,13 +733,13 @@ class TestMultipleSshAuthorizedKeysFile(test_helpers.CiTestCase):
return authorized_keys
def create_global_authorized_file(self, filename, content_key, keys):
- authorized_keys = self.tmp_path(filename, dir="/tmp")
+ authorized_keys = self.tmp_path(filename, dir=self.tmp_d)
util.write_file(authorized_keys, VALID_CONTENT[content_key])
keys[authorized_keys] = content_key
return authorized_keys
def create_sshd_config(self, authorized_keys_files):
- sshd_config = self.tmp_path("sshd_config", dir="/tmp")
+ sshd_config = self.tmp_path("sshd_config", dir=self.tmp_d)
util.write_file(
sshd_config, "AuthorizedKeysFile " + authorized_keys_files
)
@@ -757,8 +760,8 @@ class TestMultipleSshAuthorizedKeysFile(test_helpers.CiTestCase):
else:
self.assertFalse(VALID_CONTENT[key] in content)
- if delete_keys and os.path.isdir("/tmp/home/"):
- util.delete_dir_contents("/tmp/home/")
+ if delete_keys and os.path.isdir(self.tmp_d + "/home/"):
+ util.delete_dir_contents(self.tmp_d + "/home/")
@patch("cloudinit.ssh_util.pwd.getpwnam")
@patch("cloudinit.util.get_permissions")
@@ -771,10 +774,12 @@ class TestMultipleSshAuthorizedKeysFile(test_helpers.CiTestCase):
keys = {}
users = {}
mock_permissions = {
- "/tmp/home/bobby": ("bobby", "bobby", 0o700),
- "/tmp/home/bobby/.ssh": ("bobby", "bobby", 0o700),
- "/tmp/home/bobby/.ssh/user_keys": ("bobby", "bobby", 0o600),
- "/tmp/home/bobby/.ssh/authorized_keys": ("bobby", "bobby", 0o600),
+ self.tmp_d + "/home/bobby": ("bobby", "bobby", 0o700),
+ self.tmp_d + "/home/bobby/.ssh": ("bobby", "bobby", 0o700),
+ self.tmp_d
+ + "/home/bobby/.ssh/user_keys": ("bobby", "bobby", 0o600),
+ self.tmp_d
+ + "/home/bobby/.ssh/authorized_keys": ("bobby", "bobby", 0o600),
}
homes = self.create_fake_users(
@@ -815,10 +820,12 @@ class TestMultipleSshAuthorizedKeysFile(test_helpers.CiTestCase):
keys = {}
users = {}
mock_permissions = {
- "/tmp/home/bobby": ("bobby", "bobby", 0o700),
- "/tmp/home/bobby/.ssh": ("bobby", "bobby", 0o700),
- "/tmp/home/bobby/.ssh/user_keys": ("bobby", "bobby", 0o600),
- "/tmp/home/bobby/.ssh/authorized_keys": ("bobby", "bobby", 0o600),
+ self.tmp_d + "/home/bobby": ("bobby", "bobby", 0o700),
+ self.tmp_d + "/home/bobby/.ssh": ("bobby", "bobby", 0o700),
+ self.tmp_d
+ + "/home/bobby/.ssh/user_keys": ("bobby", "bobby", 0o600),
+ self.tmp_d
+ + "/home/bobby/.ssh/authorized_keys": ("bobby", "bobby", 0o600),
}
homes = self.create_fake_users(
@@ -859,10 +866,12 @@ class TestMultipleSshAuthorizedKeysFile(test_helpers.CiTestCase):
keys = {}
users = {}
mock_permissions = {
- "/tmp/home/bobby": ("bobby", "bobby", 0o700),
- "/tmp/home/bobby/.ssh": ("bobby", "bobby", 0o700),
- "/tmp/home/bobby/.ssh/user_keys": ("bobby", "bobby", 0o600),
- "/tmp/home/bobby/.ssh/authorized_keys": ("bobby", "bobby", 0o600),
+ self.tmp_d + "/home/bobby": ("bobby", "bobby", 0o700),
+ self.tmp_d + "/home/bobby/.ssh": ("bobby", "bobby", 0o700),
+ self.tmp_d
+ + "/home/bobby/.ssh/user_keys": ("bobby", "bobby", 0o600),
+ self.tmp_d
+ + "/home/bobby/.ssh/authorized_keys": ("bobby", "bobby", 0o600),
}
homes = self.create_fake_users(
@@ -910,10 +919,12 @@ class TestMultipleSshAuthorizedKeysFile(test_helpers.CiTestCase):
keys = {}
users = {}
mock_permissions = {
- "/tmp/home/bobby": ("bobby", "bobby", 0o700),
- "/tmp/home/bobby/.ssh": ("bobby", "bobby", 0o700),
- "/tmp/home/bobby/.ssh/user_keys3": ("bobby", "bobby", 0o600),
- "/tmp/home/bobby/.ssh/authorized_keys2": ("bobby", "bobby", 0o600),
+ self.tmp_d + "/home/bobby": ("bobby", "bobby", 0o700),
+ self.tmp_d + "/home/bobby/.ssh": ("bobby", "bobby", 0o700),
+ self.tmp_d
+ + "/home/bobby/.ssh/user_keys3": ("bobby", "bobby", 0o600),
+ self.tmp_d
+ + "/home/bobby/.ssh/authorized_keys2": ("bobby", "bobby", 0o600),
}
homes = self.create_fake_users(
@@ -961,9 +972,10 @@ class TestMultipleSshAuthorizedKeysFile(test_helpers.CiTestCase):
keys = {}
users = {}
mock_permissions = {
- "/tmp/home/bobby": ("bobby", "bobby", 0o700),
- "/tmp/home/bobby/.ssh": ("bobby", "bobby", 0o700),
- "/tmp/home/bobby/.ssh/authorized_keys": ("bobby", "bobby", 0o600),
+ self.tmp_d + "/home/bobby": ("bobby", "bobby", 0o700),
+ self.tmp_d + "/home/bobby/.ssh": ("bobby", "bobby", 0o700),
+ self.tmp_d
+ + "/home/bobby/.ssh/authorized_keys": ("bobby", "bobby", 0o600),
}
homes = self.create_fake_users(
@@ -998,12 +1010,14 @@ class TestMultipleSshAuthorizedKeysFile(test_helpers.CiTestCase):
keys = {}
users = {}
mock_permissions = {
- "/tmp/home/bobby": ("bobby", "bobby", 0o700),
- "/tmp/home/bobby/.ssh": ("bobby", "bobby", 0o700),
- "/tmp/home/bobby/.ssh/authorized_keys": ("bobby", "bobby", 0o600),
- "/tmp/home/suzie": ("suzie", "suzie", 0o700),
- "/tmp/home/suzie/.ssh": ("suzie", "suzie", 0o700),
- "/tmp/home/suzie/.ssh/authorized_keys": ("suzie", "suzie", 0o600),
+ self.tmp_d + "/home/bobby": ("bobby", "bobby", 0o700),
+ self.tmp_d + "/home/bobby/.ssh": ("bobby", "bobby", 0o700),
+ self.tmp_d
+ + "/home/bobby/.ssh/authorized_keys": ("bobby", "bobby", 0o600),
+ self.tmp_d + "/home/suzie": ("suzie", "suzie", 0o700),
+ self.tmp_d + "/home/suzie/.ssh": ("suzie", "suzie", 0o700),
+ self.tmp_d
+ + "/home/suzie/.ssh/authorized_keys": ("suzie", "suzie", 0o600),
}
user_bobby = "bobby"
@@ -1048,12 +1062,14 @@ class TestMultipleSshAuthorizedKeysFile(test_helpers.CiTestCase):
keys = {}
users = {}
mock_permissions = {
- "/tmp/home/bobby": ("bobby", "bobby", 0o700),
- "/tmp/home/bobby/.ssh": ("bobby", "bobby", 0o700),
- "/tmp/home/bobby/.ssh/authorized_keys2": ("bobby", "bobby", 0o600),
- "/tmp/home/suzie": ("suzie", "suzie", 0o700),
- "/tmp/home/suzie/.ssh": ("suzie", "suzie", 0o700),
- "/tmp/home/suzie/.ssh/authorized_keys2": ("suzie", "suzie", 0o600),
+ self.tmp_d + "/home/bobby": ("bobby", "bobby", 0o700),
+ self.tmp_d + "/home/bobby/.ssh": ("bobby", "bobby", 0o700),
+ self.tmp_d
+ + "/home/bobby/.ssh/authorized_keys2": ("bobby", "bobby", 0o600),
+ self.tmp_d + "/home/suzie": ("suzie", "suzie", 0o700),
+ self.tmp_d + "/home/suzie/.ssh": ("suzie", "suzie", 0o700),
+ self.tmp_d
+ + "/home/suzie/.ssh/authorized_keys2": ("suzie", "suzie", 0o600),
}
user_bobby = "bobby"
@@ -1098,14 +1114,18 @@ class TestMultipleSshAuthorizedKeysFile(test_helpers.CiTestCase):
keys = {}
users = {}
mock_permissions = {
- "/tmp/home/bobby": ("bobby", "bobby", 0o700),
- "/tmp/home/bobby/.ssh": ("bobby", "bobby", 0o700),
- "/tmp/home/bobby/.ssh/authorized_keys2": ("bobby", "bobby", 0o600),
- "/tmp/home/bobby/.ssh/user_keys3": ("bobby", "bobby", 0o600),
- "/tmp/home/suzie": ("suzie", "suzie", 0o700),
- "/tmp/home/suzie/.ssh": ("suzie", "suzie", 0o700),
- "/tmp/home/suzie/.ssh/authorized_keys2": ("suzie", "suzie", 0o600),
- "/tmp/home/suzie/.ssh/user_keys3": ("suzie", "suzie", 0o600),
+ self.tmp_d + "/home/bobby": ("bobby", "bobby", 0o700),
+ self.tmp_d + "/home/bobby/.ssh": ("bobby", "bobby", 0o700),
+ self.tmp_d
+ + "/home/bobby/.ssh/authorized_keys2": ("bobby", "bobby", 0o600),
+ self.tmp_d
+ + "/home/bobby/.ssh/user_keys3": ("bobby", "bobby", 0o600),
+ self.tmp_d + "/home/suzie": ("suzie", "suzie", 0o700),
+ self.tmp_d + "/home/suzie/.ssh": ("suzie", "suzie", 0o700),
+ self.tmp_d
+ + "/home/suzie/.ssh/authorized_keys2": ("suzie", "suzie", 0o600),
+ self.tmp_d
+ + "/home/suzie/.ssh/user_keys3": ("suzie", "suzie", 0o600),
}
user_bobby = "bobby"
@@ -1168,13 +1188,15 @@ class TestMultipleSshAuthorizedKeysFile(test_helpers.CiTestCase):
keys = {}
users = {}
mock_permissions = {
- "/tmp/home/bobby": ("bobby", "bobby", 0o700),
- "/tmp/home/bobby/.ssh": ("bobby", "bobby", 0o700),
- "/tmp/home/bobby/.ssh/authorized_keys2": ("bobby", "bobby", 0o600),
- "/tmp/home/bobby/.ssh/user_keys3": ("bobby", "bobby", 0o600),
- "/tmp/home/badguy": ("root", "root", 0o755),
- "/tmp/home/badguy/home": ("root", "root", 0o755),
- "/tmp/home/badguy/home/bobby": ("root", "root", 0o655),
+ self.tmp_d + "/home/bobby": ("bobby", "bobby", 0o700),
+ self.tmp_d + "/home/bobby/.ssh": ("bobby", "bobby", 0o700),
+ self.tmp_d
+ + "/home/bobby/.ssh/authorized_keys2": ("bobby", "bobby", 0o600),
+ self.tmp_d
+ + "/home/bobby/.ssh/user_keys3": ("bobby", "bobby", 0o600),
+ self.tmp_d + "/home/badguy": ("root", "root", 0o755),
+ self.tmp_d + "/home/badguy/home": ("root", "root", 0o755),
+ self.tmp_d + "/home/badguy/home/bobby": ("root", "root", 0o655),
}
user_bobby = "bobby"
@@ -1200,7 +1222,9 @@ class TestMultipleSshAuthorizedKeysFile(test_helpers.CiTestCase):
)
# /tmp/home/badguy/home/bobby = ""
- authorized_keys2 = self.tmp_path("home/bobby", dir="/tmp/home/badguy")
+ authorized_keys2 = self.tmp_path(
+ "home/bobby", dir=self.tmp_d + "/home/badguy"
+ )
util.write_file(authorized_keys2, "")
# /tmp/etc/ssh/authorized_keys = ecdsa
@@ -1239,17 +1263,20 @@ class TestMultipleSshAuthorizedKeysFile(test_helpers.CiTestCase):
keys = {}
users = {}
mock_permissions = {
- "/tmp/home/bobby": ("bobby", "bobby", 0o700),
- "/tmp/home/bobby/.ssh": ("bobby", "bobby", 0o700),
- "/tmp/home/bobby/.ssh/authorized_keys": ("bobby", "bobby", 0o600),
- "/tmp/etc": ("root", "root", 0o755),
- "/tmp/etc/ssh": ("root", "root", 0o755),
- "/tmp/etc/ssh/userkeys": ("root", "root", 0o700),
- "/tmp/etc/ssh/userkeys/bobby": ("bobby", "bobby", 0o600),
- "/tmp/etc/ssh/userkeys/badguy": ("badguy", "badguy", 0o600),
- "/tmp/home/badguy": ("badguy", "badguy", 0o700),
- "/tmp/home/badguy/.ssh": ("badguy", "badguy", 0o700),
- "/tmp/home/badguy/.ssh/authorized_keys": (
+ self.tmp_d + "/home/bobby": ("bobby", "bobby", 0o700),
+ self.tmp_d + "/home/bobby/.ssh": ("bobby", "bobby", 0o700),
+ self.tmp_d
+ + "/home/bobby/.ssh/authorized_keys": ("bobby", "bobby", 0o600),
+ self.tmp_d + "/etc": ("root", "root", 0o755),
+ self.tmp_d + "/etc/ssh": ("root", "root", 0o755),
+ self.tmp_d + "/etc/ssh/userkeys": ("root", "root", 0o700),
+ self.tmp_d + "/etc/ssh/userkeys/bobby": ("bobby", "bobby", 0o600),
+ self.tmp_d
+ + "/etc/ssh/userkeys/badguy": ("badguy", "badguy", 0o600),
+ self.tmp_d + "/home/badguy": ("badguy", "badguy", 0o700),
+ self.tmp_d + "/home/badguy/.ssh": ("badguy", "badguy", 0o700),
+ self.tmp_d
+ + "/home/badguy/.ssh/authorized_keys": (
"badguy",
"badguy",
0o600,
@@ -1292,7 +1319,7 @@ class TestMultipleSshAuthorizedKeysFile(test_helpers.CiTestCase):
)
# /tmp/sshd_config
- options = "/tmp/etc/ssh/userkeys/%u .ssh/authorized_keys"
+ options = self.tmp_d + "/etc/ssh/userkeys/%u .ssh/authorized_keys"
sshd_config = self.create_sshd_config(options)
self.execute_and_check(
@@ -1318,17 +1345,20 @@ class TestMultipleSshAuthorizedKeysFile(test_helpers.CiTestCase):
keys = {}
users = {}
mock_permissions = {
- "/tmp/home/bobby": ("bobby", "bobby", 0o700),
- "/tmp/home/bobby/.ssh": ("bobby", "bobby", 0o700),
- "/tmp/home/bobby/.ssh/authorized_keys": ("bobby", "bobby", 0o600),
- "/tmp/etc": ("root", "root", 0o755),
- "/tmp/etc/ssh": ("root", "root", 0o755),
- "/tmp/etc/ssh/userkeys": ("root", "root", 0o755),
- "/tmp/etc/ssh/userkeys/bobby": ("bobby", "bobby", 0o600),
- "/tmp/etc/ssh/userkeys/badguy": ("badguy", "badguy", 0o600),
- "/tmp/home/badguy": ("badguy", "badguy", 0o700),
- "/tmp/home/badguy/.ssh": ("badguy", "badguy", 0o700),
- "/tmp/home/badguy/.ssh/authorized_keys": (
+ self.tmp_d + "/home/bobby": ("bobby", "bobby", 0o700),
+ self.tmp_d + "/home/bobby/.ssh": ("bobby", "bobby", 0o700),
+ self.tmp_d
+ + "/home/bobby/.ssh/authorized_keys": ("bobby", "bobby", 0o600),
+ self.tmp_d + "/etc": ("root", "root", 0o755),
+ self.tmp_d + "/etc/ssh": ("root", "root", 0o755),
+ self.tmp_d + "/etc/ssh/userkeys": ("root", "root", 0o755),
+ self.tmp_d + "/etc/ssh/userkeys/bobby": ("bobby", "bobby", 0o600),
+ self.tmp_d
+ + "/etc/ssh/userkeys/badguy": ("badguy", "badguy", 0o600),
+ self.tmp_d + "/home/badguy": ("badguy", "badguy", 0o700),
+ self.tmp_d + "/home/badguy/.ssh": ("badguy", "badguy", 0o700),
+ self.tmp_d
+ + "/home/badguy/.ssh/authorized_keys": (
"badguy",
"badguy",
0o600,
@@ -1371,7 +1401,7 @@ class TestMultipleSshAuthorizedKeysFile(test_helpers.CiTestCase):
)
# /tmp/sshd_config
- options = "/tmp/etc/ssh/userkeys/%u .ssh/authorized_keys"
+ options = self.tmp_d + "/etc/ssh/userkeys/%u .ssh/authorized_keys"
sshd_config = self.create_sshd_config(options)
self.execute_and_check(
@@ -1397,12 +1427,14 @@ class TestMultipleSshAuthorizedKeysFile(test_helpers.CiTestCase):
keys = {}
users = {}
mock_permissions = {
- "/tmp/home/bobby": ("bobby", "bobby", 0o700),
- "/tmp/home/bobby/.ssh": ("bobby", "bobby", 0o700),
- "/tmp/home/bobby/.ssh/authorized_keys": ("bobby", "bobby", 0o600),
- "/tmp/home/suzie": ("suzie", "suzie", 0o700),
- "/tmp/home/suzie/.ssh": ("suzie", "suzie", 0o700),
- "/tmp/home/suzie/.ssh/authorized_keys": ("suzie", "suzie", 0o600),
+ self.tmp_d + "/home/bobby": ("bobby", "bobby", 0o700),
+ self.tmp_d + "/home/bobby/.ssh": ("bobby", "bobby", 0o700),
+ self.tmp_d
+ + "/home/bobby/.ssh/authorized_keys": ("bobby", "bobby", 0o600),
+ self.tmp_d + "/home/suzie": ("suzie", "suzie", 0o700),
+ self.tmp_d + "/home/suzie/.ssh": ("suzie", "suzie", 0o700),
+ self.tmp_d
+ + "/home/suzie/.ssh/authorized_keys": ("suzie", "suzie", 0o600),
}
user_bobby = "bobby"
@@ -1456,12 +1488,14 @@ class TestMultipleSshAuthorizedKeysFile(test_helpers.CiTestCase):
keys = {}
users = {}
mock_permissions = {
- "/tmp/home/bobby": ("bobby", "bobby", 0o700),
- "/tmp/home/bobby/.ssh": ("bobby", "bobby", 0o700),
- "/tmp/home/bobby/.ssh/authorized_keys": ("bobby", "bobby", 0o600),
- "/tmp/home/suzie": ("suzie", "suzie", 0o700),
- "/tmp/home/suzie/.ssh": ("suzie", "suzie", 0o700),
- "/tmp/home/suzie/.ssh/authorized_keys": ("suzie", "suzie", 0o600),
+ self.tmp_d + "/home/bobby": ("bobby", "bobby", 0o700),
+ self.tmp_d + "/home/bobby/.ssh": ("bobby", "bobby", 0o700),
+ self.tmp_d
+ + "/home/bobby/.ssh/authorized_keys": ("bobby", "bobby", 0o600),
+ self.tmp_d + "/home/suzie": ("suzie", "suzie", 0o700),
+ self.tmp_d + "/home/suzie/.ssh": ("suzie", "suzie", 0o700),
+ self.tmp_d
+ + "/home/suzie/.ssh/authorized_keys": ("suzie", "suzie", 0o600),
}
user_bobby = "bobby"
@@ -1515,12 +1549,14 @@ class TestMultipleSshAuthorizedKeysFile(test_helpers.CiTestCase):
keys = {}
users = {}
mock_permissions = {
- "/tmp/home/bobby": ("bobby", "bobby", 0o700),
- "/tmp/home/bobby/.ssh": ("bobby", "bobby", 0o700),
- "/tmp/home/bobby/.ssh/authorized_keys": ("bobby", "bobby", 0o600),
- "/tmp/home/suzie": ("suzie", "suzie", 0o700),
- "/tmp/home/suzie/.ssh": ("suzie", "suzie", 0o700),
- "/tmp/home/suzie/.ssh/authorized_keys": ("suzie", "suzie", 0o600),
+ self.tmp_d + "/home/bobby": ("bobby", "bobby", 0o700),
+ self.tmp_d + "/home/bobby/.ssh": ("bobby", "bobby", 0o700),
+ self.tmp_d
+ + "/home/bobby/.ssh/authorized_keys": ("bobby", "bobby", 0o600),
+ self.tmp_d + "/home/suzie": ("suzie", "suzie", 0o700),
+ self.tmp_d + "/home/suzie/.ssh": ("suzie", "suzie", 0o700),
+ self.tmp_d
+ + "/home/suzie/.ssh/authorized_keys": ("suzie", "suzie", 0o600),
}
user_bobby = "bobby"
diff --git a/tests/unittests/test_stages.py b/tests/unittests/test_stages.py
index 3214410b..9fa2e629 100644
--- a/tests/unittests/test_stages.py
+++ b/tests/unittests/test_stages.py
@@ -10,9 +10,10 @@ from cloudinit import sources, stages
from cloudinit.event import EventScope, EventType
from cloudinit.sources import NetworkConfigSource
from cloudinit.util import write_file
-from tests.unittests.helpers import CiTestCase, mock
+from tests.unittests.helpers import mock
TEST_INSTANCE_ID = "i-testing"
+M_PATH = "cloudinit.stages."
class FakeDataSource(sources.DataSource):
@@ -35,15 +36,11 @@ class FakeDataSource(sources.DataSource):
return True
-class TestInit(CiTestCase):
- with_logs = True
- allowed_subp = False
-
- def setUp(self):
- super(TestInit, self).setUp()
- self.tmpdir = self.tmp_dir()
+class TestInit:
+ @pytest.fixture(autouse=True)
+ def setup(self, tmpdir):
+ self.tmpdir = tmpdir
self.init = stages.Init()
- # Setup fake Paths for Init to reference
self.init._cfg = {
"system_info": {
"distro": "ubuntu",
@@ -60,47 +57,63 @@ class TestInit(CiTestCase):
self.init.paths.get_cpath("data"), "upgraded-network"
)
write_file(disable_file, "")
- self.assertEqual(
- (None, disable_file), self.init._find_networking_config()
- )
-
- @mock.patch("cloudinit.stages.cmdline.read_initramfs_config")
- @mock.patch("cloudinit.stages.cmdline.read_kernel_cmdline_config")
+ assert (None, disable_file) == self.init._find_networking_config()
+
+ @mock.patch(M_PATH + "cmdline.read_initramfs_config")
+ @mock.patch(M_PATH + "cmdline.read_kernel_cmdline_config")
+ @pytest.mark.parametrize(
+ "net_config",
+ [
+ {"config": "disabled"},
+ {"network": {"config": "disabled"}},
+ ],
+ )
def test_wb__find_networking_config_disabled_by_kernel(
- self, m_cmdline, m_initramfs
+ self, m_cmdline, m_initramfs, net_config, caplog
):
"""find_networking_config returns when disabled by kernel cmdline."""
- m_cmdline.return_value = {"config": "disabled"}
+ m_cmdline.return_value = net_config
m_initramfs.return_value = {"config": ["fake_initrd"]}
- self.assertEqual(
- (None, NetworkConfigSource.cmdline),
- self.init._find_networking_config(),
- )
- self.assertEqual(
- "DEBUG: network config disabled by cmdline\n", self.logs.getvalue()
- )
-
- @mock.patch("cloudinit.stages.cmdline.read_initramfs_config")
- @mock.patch("cloudinit.stages.cmdline.read_kernel_cmdline_config")
+ assert (
+ None,
+ NetworkConfigSource.CMD_LINE,
+ ) == self.init._find_networking_config()
+ assert caplog.records[0].levelname == "DEBUG"
+ assert "network config disabled by cmdline" in caplog.text
+
+ @mock.patch(M_PATH + "cmdline.read_initramfs_config")
+ @mock.patch(M_PATH + "cmdline.read_kernel_cmdline_config")
+ @pytest.mark.parametrize(
+ "net_config",
+ [
+ {"config": "disabled"},
+ {"network": {"config": "disabled"}},
+ ],
+ )
def test_wb__find_networking_config_disabled_by_initrd(
- self, m_cmdline, m_initramfs
+ self, m_cmdline, m_initramfs, net_config, caplog
):
"""find_networking_config returns when disabled by kernel cmdline."""
m_cmdline.return_value = {}
- m_initramfs.return_value = {"config": "disabled"}
- self.assertEqual(
- (None, NetworkConfigSource.initramfs),
- self.init._find_networking_config(),
- )
- self.assertEqual(
- "DEBUG: network config disabled by initramfs\n",
- self.logs.getvalue(),
- )
-
- @mock.patch("cloudinit.stages.cmdline.read_initramfs_config")
- @mock.patch("cloudinit.stages.cmdline.read_kernel_cmdline_config")
+ m_initramfs.return_value = net_config
+ assert (
+ None,
+ NetworkConfigSource.INITRAMFS,
+ ) == self.init._find_networking_config()
+ assert caplog.records[0].levelname == "DEBUG"
+ assert "network config disabled by initramfs" in caplog.text
+
+ @mock.patch(M_PATH + "cmdline.read_initramfs_config")
+ @mock.patch(M_PATH + "cmdline.read_kernel_cmdline_config")
+ @pytest.mark.parametrize(
+ "net_config",
+ [
+ {"config": "disabled"},
+ {"network": {"config": "disabled"}},
+ ],
+ )
def test_wb__find_networking_config_disabled_by_datasrc(
- self, m_cmdline, m_initramfs
+ self, m_cmdline, m_initramfs, net_config, caplog
):
"""find_networking_config returns when disabled by datasource cfg."""
m_cmdline.return_value = {} # Kernel doesn't disable networking
@@ -110,41 +123,51 @@ class TestInit(CiTestCase):
"network": {},
} # system config doesn't disable
- self.init.datasource = FakeDataSource(
- network_config={"config": "disabled"}
- )
- self.assertEqual(
- (None, NetworkConfigSource.ds), self.init._find_networking_config()
- )
- self.assertEqual(
- "DEBUG: network config disabled by ds\n", self.logs.getvalue()
- )
-
- @mock.patch("cloudinit.stages.cmdline.read_initramfs_config")
- @mock.patch("cloudinit.stages.cmdline.read_kernel_cmdline_config")
+ self.init.datasource = FakeDataSource(network_config=net_config)
+ assert (
+ None,
+ NetworkConfigSource.DS,
+ ) == self.init._find_networking_config()
+ assert caplog.records[0].levelname == "DEBUG"
+ assert "network config disabled by ds" in caplog.text
+
+ @mock.patch(M_PATH + "cmdline.read_initramfs_config")
+ @mock.patch(M_PATH + "cmdline.read_kernel_cmdline_config")
+ @pytest.mark.parametrize(
+ "net_config",
+ [
+ {"config": "disabled"},
+ {"network": {"config": "disabled"}},
+ ],
+ )
def test_wb__find_networking_config_disabled_by_sysconfig(
- self, m_cmdline, m_initramfs
+ self, m_cmdline, m_initramfs, net_config, caplog
):
"""find_networking_config returns when disabled by system config."""
m_cmdline.return_value = {} # Kernel doesn't disable networking
m_initramfs.return_value = {} # initramfs doesn't disable networking
self.init._cfg = {
"system_info": {"paths": {"cloud_dir": self.tmpdir}},
- "network": {"config": "disabled"},
+ "network": net_config,
}
- self.assertEqual(
- (None, NetworkConfigSource.system_cfg),
- self.init._find_networking_config(),
- )
- self.assertEqual(
- "DEBUG: network config disabled by system_cfg\n",
- self.logs.getvalue(),
- )
-
- @mock.patch("cloudinit.stages.cmdline.read_initramfs_config")
- @mock.patch("cloudinit.stages.cmdline.read_kernel_cmdline_config")
+ assert (
+ None,
+ NetworkConfigSource.SYSTEM_CFG,
+ ) == self.init._find_networking_config()
+ assert caplog.records[0].levelname == "DEBUG"
+ assert "network config disabled by system_cfg" in caplog.text
+
+ @mock.patch(M_PATH + "cmdline.read_initramfs_config")
+ @mock.patch(M_PATH + "cmdline.read_kernel_cmdline_config")
+ @pytest.mark.parametrize(
+ "in_config,out_config",
+ [
+ ({"config": {"a": True}}, {"config": {"a": True}}),
+ ({"network": {"config": {"a": True}}}, {"config": {"a": True}}),
+ ],
+ )
def test__find_networking_config_uses_datasrc_order(
- self, m_cmdline, m_initramfs
+ self, m_cmdline, m_initramfs, in_config, out_config
):
"""find_networking_config should check sources in DS defined order"""
# cmdline and initramfs, which would normally be preferred over other
@@ -153,74 +176,91 @@ class TestInit(CiTestCase):
m_cmdline.return_value = {"config": "disabled"}
m_initramfs.return_value = {"config": "disabled"}
- ds_net_cfg = {"config": {"needle": True}}
- self.init.datasource = FakeDataSource(network_config=ds_net_cfg)
+ self.init.datasource = FakeDataSource(network_config=in_config)
self.init.datasource.network_config_sources = [
- NetworkConfigSource.ds,
- NetworkConfigSource.system_cfg,
- NetworkConfigSource.cmdline,
- NetworkConfigSource.initramfs,
+ NetworkConfigSource.DS,
+ NetworkConfigSource.SYSTEM_CFG,
+ NetworkConfigSource.CMD_LINE,
+ NetworkConfigSource.INITRAMFS,
]
- self.assertEqual(
- (ds_net_cfg, NetworkConfigSource.ds),
- self.init._find_networking_config(),
- )
-
- @mock.patch("cloudinit.stages.cmdline.read_initramfs_config")
- @mock.patch("cloudinit.stages.cmdline.read_kernel_cmdline_config")
+ assert (
+ out_config,
+ NetworkConfigSource.DS,
+ ) == self.init._find_networking_config()
+
+ @mock.patch(M_PATH + "cmdline.read_initramfs_config")
+ @mock.patch(M_PATH + "cmdline.read_kernel_cmdline_config")
+ @pytest.mark.parametrize(
+ "in_config,out_config",
+ [
+ ({"config": {"a": True}}, {"config": {"a": True}}),
+ ({"network": {"config": {"a": True}}}, {"config": {"a": True}}),
+ ],
+ )
def test__find_networking_config_warns_if_datasrc_uses_invalid_src(
- self, m_cmdline, m_initramfs
+ self, m_cmdline, m_initramfs, in_config, out_config, caplog
):
"""find_networking_config should check sources in DS defined order"""
- ds_net_cfg = {"config": {"needle": True}}
- self.init.datasource = FakeDataSource(network_config=ds_net_cfg)
+ self.init.datasource = FakeDataSource(network_config=in_config)
self.init.datasource.network_config_sources = [
"invalid_src",
- NetworkConfigSource.ds,
+ NetworkConfigSource.DS,
]
- self.assertEqual(
- (ds_net_cfg, NetworkConfigSource.ds),
- self.init._find_networking_config(),
- )
- self.assertIn(
- "WARNING: data source specifies an invalid network"
- " cfg_source: invalid_src",
- self.logs.getvalue(),
- )
-
- @mock.patch("cloudinit.stages.cmdline.read_initramfs_config")
- @mock.patch("cloudinit.stages.cmdline.read_kernel_cmdline_config")
+ assert (
+ out_config,
+ NetworkConfigSource.DS,
+ ) == self.init._find_networking_config()
+ assert caplog.records[0].levelname == "WARNING"
+ assert (
+ "data source specifies an invalid network cfg_source: invalid_src"
+ in caplog.text
+ )
+
+ @mock.patch(M_PATH + "cmdline.read_initramfs_config")
+ @mock.patch(M_PATH + "cmdline.read_kernel_cmdline_config")
+ @pytest.mark.parametrize(
+ "in_config,out_config",
+ [
+ ({"config": {"a": True}}, {"config": {"a": True}}),
+ ({"network": {"config": {"a": True}}}, {"config": {"a": True}}),
+ ],
+ )
def test__find_networking_config_warns_if_datasrc_uses_unavailable_src(
- self, m_cmdline, m_initramfs
+ self, m_cmdline, m_initramfs, in_config, out_config, caplog
):
"""find_networking_config should check sources in DS defined order"""
- ds_net_cfg = {"config": {"needle": True}}
- self.init.datasource = FakeDataSource(network_config=ds_net_cfg)
+ self.init.datasource = FakeDataSource(network_config=in_config)
self.init.datasource.network_config_sources = [
- NetworkConfigSource.fallback,
- NetworkConfigSource.ds,
+ NetworkConfigSource.FALLBACK,
+ NetworkConfigSource.DS,
]
- self.assertEqual(
- (ds_net_cfg, NetworkConfigSource.ds),
- self.init._find_networking_config(),
- )
- self.assertIn(
- "WARNING: data source specifies an unavailable network"
- " cfg_source: fallback",
- self.logs.getvalue(),
- )
-
- @mock.patch("cloudinit.stages.cmdline.read_initramfs_config")
- @mock.patch("cloudinit.stages.cmdline.read_kernel_cmdline_config")
+ assert (
+ out_config,
+ NetworkConfigSource.DS,
+ ) == self.init._find_networking_config()
+ assert caplog.records[0].levelname == "WARNING"
+ assert (
+ "data source specifies an unavailable network cfg_source: fallback"
+ in caplog.text
+ )
+
+ @mock.patch(M_PATH + "cmdline.read_initramfs_config")
+ @mock.patch(M_PATH + "cmdline.read_kernel_cmdline_config")
+ @pytest.mark.parametrize(
+ "in_config,out_config",
+ [
+ ({"config": {"a": True}}, {"config": {"a": True}}),
+ ({"network": {"config": {"a": True}}}, {"config": {"a": True}}),
+ ],
+ )
def test_wb__find_networking_config_returns_kernel(
- self, m_cmdline, m_initramfs
+ self, m_cmdline, m_initramfs, in_config, out_config
):
"""find_networking_config returns kernel cmdline config if present."""
- expected_cfg = {"config": ["fakekernel"]}
- m_cmdline.return_value = expected_cfg
+ m_cmdline.return_value = in_config
m_initramfs.return_value = {"config": ["fake_initrd"]}
self.init._cfg = {
"system_info": {"paths": {"cloud_dir": self.tmpdir}},
@@ -229,20 +269,26 @@ class TestInit(CiTestCase):
self.init.datasource = FakeDataSource(
network_config={"config": ["fakedatasource"]}
)
- self.assertEqual(
- (expected_cfg, NetworkConfigSource.cmdline),
- self.init._find_networking_config(),
- )
-
- @mock.patch("cloudinit.stages.cmdline.read_initramfs_config")
- @mock.patch("cloudinit.stages.cmdline.read_kernel_cmdline_config")
+ assert (
+ out_config,
+ NetworkConfigSource.CMD_LINE,
+ ) == self.init._find_networking_config()
+
+ @mock.patch(M_PATH + "cmdline.read_initramfs_config")
+ @mock.patch(M_PATH + "cmdline.read_kernel_cmdline_config")
+ @pytest.mark.parametrize(
+ "in_config,out_config",
+ [
+ ({"config": {"a": True}}, {"config": {"a": True}}),
+ ({"network": {"config": {"a": True}}}, {"config": {"a": True}}),
+ ],
+ )
def test_wb__find_networking_config_returns_initramfs(
- self, m_cmdline, m_initramfs
+ self, m_cmdline, m_initramfs, in_config, out_config
):
- """find_networking_config returns kernel cmdline config if present."""
- expected_cfg = {"config": ["fake_initrd"]}
+ """find_networking_config returns initramfs config if present."""
m_cmdline.return_value = {}
- m_initramfs.return_value = expected_cfg
+ m_initramfs.return_value = in_config
self.init._cfg = {
"system_info": {"paths": {"cloud_dir": self.tmpdir}},
"network": {"config": ["fakesys_config"]},
@@ -250,52 +296,63 @@ class TestInit(CiTestCase):
self.init.datasource = FakeDataSource(
network_config={"config": ["fakedatasource"]}
)
- self.assertEqual(
- (expected_cfg, NetworkConfigSource.initramfs),
- self.init._find_networking_config(),
- )
-
- @mock.patch("cloudinit.stages.cmdline.read_initramfs_config")
- @mock.patch("cloudinit.stages.cmdline.read_kernel_cmdline_config")
+ assert (
+ out_config,
+ NetworkConfigSource.INITRAMFS,
+ ) == self.init._find_networking_config()
+
+ @mock.patch(M_PATH + "cmdline.read_initramfs_config")
+ @mock.patch(M_PATH + "cmdline.read_kernel_cmdline_config")
+ @pytest.mark.parametrize(
+ "in_config,out_config",
+ [
+ ({"config": {"a": True}}, {"config": {"a": True}}),
+ ({"network": {"config": {"a": True}}}, {"config": {"a": True}}),
+ ],
+ )
def test_wb__find_networking_config_returns_system_cfg(
- self, m_cmdline, m_initramfs
+ self, m_cmdline, m_initramfs, in_config, out_config
):
"""find_networking_config returns system config when present."""
m_cmdline.return_value = {} # No kernel network config
m_initramfs.return_value = {} # no initramfs network config
- expected_cfg = {"config": ["fakesys_config"]}
self.init._cfg = {
"system_info": {"paths": {"cloud_dir": self.tmpdir}},
- "network": expected_cfg,
+ "network": in_config,
}
self.init.datasource = FakeDataSource(
network_config={"config": ["fakedatasource"]}
)
- self.assertEqual(
- (expected_cfg, NetworkConfigSource.system_cfg),
- self.init._find_networking_config(),
- )
-
- @mock.patch("cloudinit.stages.cmdline.read_initramfs_config")
- @mock.patch("cloudinit.stages.cmdline.read_kernel_cmdline_config")
+ assert (
+ out_config,
+ NetworkConfigSource.SYSTEM_CFG,
+ ) == self.init._find_networking_config()
+
+ @mock.patch(M_PATH + "cmdline.read_initramfs_config")
+ @mock.patch(M_PATH + "cmdline.read_kernel_cmdline_config")
+ @pytest.mark.parametrize(
+ "in_config,out_config",
+ [
+ ({"config": {"a": True}}, {"config": {"a": True}}),
+ ({"network": {"config": {"a": True}}}, {"config": {"a": True}}),
+ ],
+ )
def test_wb__find_networking_config_returns_datasrc_cfg(
- self, m_cmdline, m_initramfs
+ self, m_cmdline, m_initramfs, in_config, out_config
):
"""find_networking_config returns datasource net config if present."""
m_cmdline.return_value = {} # No kernel network config
m_initramfs.return_value = {} # no initramfs network config
- # No system config for network in setUp
- expected_cfg = {"config": ["fakedatasource"]}
- self.init.datasource = FakeDataSource(network_config=expected_cfg)
- self.assertEqual(
- (expected_cfg, NetworkConfigSource.ds),
- self.init._find_networking_config(),
- )
+ self.init.datasource = FakeDataSource(network_config=in_config)
+ assert (
+ out_config,
+ NetworkConfigSource.DS,
+ ) == self.init._find_networking_config()
- @mock.patch("cloudinit.stages.cmdline.read_initramfs_config")
- @mock.patch("cloudinit.stages.cmdline.read_kernel_cmdline_config")
+ @mock.patch(M_PATH + "cmdline.read_initramfs_config")
+ @mock.patch(M_PATH + "cmdline.read_kernel_cmdline_config")
def test_wb__find_networking_config_returns_fallback(
- self, m_cmdline, m_initramfs
+ self, m_cmdline, m_initramfs, caplog
):
"""find_networking_config returns fallback config if not defined."""
m_cmdline.return_value = {} # Kernel doesn't disable networking
@@ -313,13 +370,13 @@ class TestInit(CiTestCase):
# Monkey patch distro which gets cached on self.init
distro = self.init.distro
distro.generate_fallback_config = fake_generate_fallback
- self.assertEqual(
- (fake_cfg, NetworkConfigSource.fallback),
- self.init._find_networking_config(),
- )
- self.assertNotIn("network config disabled", self.logs.getvalue())
+ assert (
+ fake_cfg,
+ NetworkConfigSource.FALLBACK,
+ ) == self.init._find_networking_config()
+ assert "network config disabled" not in caplog.text
- def test_apply_network_config_disabled(self):
+ def test_apply_network_config_disabled(self, caplog):
"""Log when network is disabled by upgraded-network."""
disable_file = os.path.join(
self.init.paths.get_cpath("data"), "upgraded-network"
@@ -331,10 +388,8 @@ class TestInit(CiTestCase):
self.init._find_networking_config = fake_network_config
self.init.apply_network_config(True)
- self.assertIn(
- "INFO: network config is disabled by %s" % disable_file,
- self.logs.getvalue(),
- )
+ assert caplog.records[0].levelname == "INFO"
+ assert f"network config is disabled by {disable_file}" in caplog.text
@mock.patch("cloudinit.net.get_interfaces_by_mac")
@mock.patch("cloudinit.distros.ubuntu.Distro")
@@ -353,21 +408,23 @@ class TestInit(CiTestCase):
}
def fake_network_config():
- return net_cfg, NetworkConfigSource.fallback
+ return net_cfg, NetworkConfigSource.FALLBACK
m_macs.return_value = {"42:42:42:42:42:42": "eth9"}
self.init._find_networking_config = fake_network_config
self.init.apply_network_config(True)
- self.init.distro.apply_network_config_names.assert_called_with(net_cfg)
+ networking = self.init.distro.networking
+ networking.apply_network_config_names.assert_called_with(net_cfg)
self.init.distro.apply_network_config.assert_called_with(
net_cfg, bring_up=True
)
@mock.patch("cloudinit.distros.ubuntu.Distro")
- def test_apply_network_on_same_instance_id(self, m_ubuntu):
- """Only call distro.apply_network_config_names on same instance id."""
+ def test_apply_network_on_same_instance_id(self, m_ubuntu, caplog):
+ """Only call distro.networking.apply_network_config_names on same
+ instance id."""
self.init.is_new_instance = self._real_is_new_instance
old_instance_id = os.path.join(
self.init.paths.get_cpath("data"), "instance-id"
@@ -386,21 +443,19 @@ class TestInit(CiTestCase):
}
def fake_network_config():
- return net_cfg, NetworkConfigSource.fallback
+ return net_cfg, NetworkConfigSource.FALLBACK
self.init._find_networking_config = fake_network_config
self.init.apply_network_config(True)
- self.init.distro.apply_network_config_names.assert_called_with(net_cfg)
+ networking = self.init.distro.networking
+ networking.apply_network_config_names.assert_called_with(net_cfg)
self.init.distro.apply_network_config.assert_not_called()
assert (
"No network config applied. Neither a new instance nor datasource "
- "network update allowed" in self.logs.getvalue()
+ "network update allowed" in caplog.text
)
- # CiTestCase doesn't work with pytest.mark.parametrize, and moving this
- # functionality to a separate class is more cumbersome than it'd be worth
- # at the moment, so use this as a simple setup
def _apply_network_setup(self, m_macs):
old_instance_id = os.path.join(
self.init.paths.get_cpath("data"), "instance-id"
@@ -419,7 +474,7 @@ class TestInit(CiTestCase):
}
def fake_network_config():
- return net_cfg, NetworkConfigSource.fallback
+ return net_cfg, NetworkConfigSource.FALLBACK
m_macs.return_value = {"42:42:42:42:42:42": "eth9"}
@@ -439,9 +494,10 @@ class TestInit(CiTestCase):
net_cfg = self._apply_network_setup(m_macs)
self.init.apply_network_config(True)
+ networking = self.init.distro.networking
assert (
mock.call(net_cfg)
- == self.init.distro.apply_network_config_names.call_args_list[-1]
+ == networking.apply_network_config_names.call_args_list[-1]
)
assert (
mock.call(net_cfg, bring_up=True)
@@ -455,7 +511,7 @@ class TestInit(CiTestCase):
{EventScope.NETWORK: {EventType.BOOT_NEW_INSTANCE}},
)
def test_apply_network_disabled_when_no_default_boot(
- self, m_ubuntu, m_macs
+ self, m_ubuntu, m_macs, caplog
):
"""Don't apply network if datasource has no BOOT event."""
self._apply_network_setup(m_macs)
@@ -463,7 +519,7 @@ class TestInit(CiTestCase):
self.init.distro.apply_network_config.assert_not_called()
assert (
"No network config applied. Neither a new instance nor datasource "
- "network update allowed" in self.logs.getvalue()
+ "network update allowed" in caplog.text
)
@mock.patch("cloudinit.net.get_interfaces_by_mac")
@@ -479,7 +535,8 @@ class TestInit(CiTestCase):
net_cfg = self._apply_network_setup(m_macs)
self.init._cfg = {"updates": {"network": {"when": ["boot"]}}}
self.init.apply_network_config(True)
- self.init.distro.apply_network_config_names.assert_called_with(net_cfg)
+ networking = self.init.distro.networking
+ networking.apply_network_config_names.assert_called_with(net_cfg)
self.init.distro.apply_network_config.assert_called_with(
net_cfg, bring_up=True
)
@@ -490,7 +547,9 @@ class TestInit(CiTestCase):
sources.DataSource.supported_update_events,
{EventScope.NETWORK: {EventType.BOOT_NEW_INSTANCE}},
)
- def test_apply_network_disabled_when_unsupported(self, m_ubuntu, m_macs):
+ def test_apply_network_disabled_when_unsupported(
+ self, m_ubuntu, m_macs, caplog
+ ):
"""Don't apply network config if unsupported.
Shouldn't work even when specified as userdata
@@ -502,7 +561,7 @@ class TestInit(CiTestCase):
self.init.distro.apply_network_config.assert_not_called()
assert (
"No network config applied. Neither a new instance nor datasource "
- "network update allowed" in self.logs.getvalue()
+ "network update allowed" in caplog.text
)
@@ -519,13 +578,13 @@ class TestInit_InitializeFilesystem:
As it is replaced with a mock, consumers of this fixture can set
`init._cfg` if the default empty dict configuration is not appropriate.
"""
- with mock.patch("cloudinit.stages.util.ensure_dirs"):
+ with mock.patch(M_PATH + "util.ensure_dirs"):
init = stages.Init()
init._cfg = {}
init._paths = paths
yield init
- @mock.patch("cloudinit.stages.util.ensure_file")
+ @mock.patch(M_PATH + "util.ensure_file")
def test_ensure_file_not_called_if_no_log_file_configured(
self, m_ensure_file, init
):
@@ -563,6 +622,3 @@ class TestInit_InitializeFilesystem:
init._initialize_filesystem()
assert mode == stat.S_IMODE(log_file.stat().mode)
-
-
-# vi: ts=4 expandtab
diff --git a/tests/unittests/test_temp_utils.py b/tests/unittests/test_temp_utils.py
index e91f389b..bbdb3bd1 100644
--- a/tests/unittests/test_temp_utils.py
+++ b/tests/unittests/test_temp_utils.py
@@ -3,12 +3,15 @@
"""Tests for cloudinit.temp_utils"""
import os
+from tempfile import gettempdir
from cloudinit.temp_utils import mkdtemp, mkstemp, tempdir
from tests.unittests.helpers import CiTestCase, wrap_and_call
class TestTempUtils(CiTestCase):
+ prefix = gettempdir()
+
def test_mkdtemp_default_non_root(self):
"""mkdtemp creates a dir under /tmp for the unprivileged."""
calls = []
@@ -28,7 +31,7 @@ class TestTempUtils(CiTestCase):
mkdtemp,
)
self.assertEqual("/fake/return/path", retval)
- self.assertEqual([{"dir": "/tmp"}], calls)
+ self.assertEqual([{"dir": self.prefix}], calls)
def test_mkdtemp_default_non_root_needs_exe(self):
"""mkdtemp creates a dir under /var/tmp/cloud-init when needs_exe."""
@@ -92,7 +95,7 @@ class TestTempUtils(CiTestCase):
mkstemp,
)
self.assertEqual("/fake/return/path", retval)
- self.assertEqual([{"dir": "/tmp"}], calls)
+ self.assertEqual([{"dir": self.prefix}], calls)
def test_mkstemp_default_root(self):
"""mkstemp creates a secure tempfile in /run/cloud-init for root."""
diff --git a/tests/unittests/test_templating.py b/tests/unittests/test_templating.py
index c1fec27c..0cdedbe7 100644
--- a/tests/unittests/test_templating.py
+++ b/tests/unittests/test_templating.py
@@ -10,14 +10,6 @@ from cloudinit import templater
from cloudinit.util import load_file, write_file
from tests.unittests import helpers as test_helpers
-try:
- import Cheetah
-
- HAS_CHEETAH = True
- c = Cheetah # make pyflakes and pylint happy, as Cheetah is not used here
-except ImportError:
- HAS_CHEETAH = False
-
class TestTemplates(test_helpers.CiTestCase):
@@ -52,28 +44,6 @@ class TestTemplates(test_helpers.CiTestCase):
out_data = templater.basic_render(in_data, {"b": 2})
self.assertEqual(expected_data.strip(), out_data)
- @test_helpers.skipIf(not HAS_CHEETAH, "cheetah renderer not available")
- def test_detection(self):
- blob = "## template:cheetah"
-
- (template_type, _renderer, contents) = templater.detect_template(blob)
- self.assertIn("cheetah", template_type)
- self.assertEqual("", contents.strip())
-
- blob = "blahblah $blah"
- (template_type, _renderer, _contents) = templater.detect_template(blob)
- self.assertIn("cheetah", template_type)
- self.assertEqual(blob, contents)
-
- blob = "##template:something-new"
- self.assertRaises(ValueError, templater.detect_template, blob)
-
- def test_render_cheetah(self):
- blob = """## template:cheetah
-$a,$b"""
- c = templater.render_string(blob, {"a": 1, "b": 2})
- self.assertEqual("1,2", c)
-
def test_render_jinja(self):
blob = """## template:jinja
{{a}},{{b}}"""
@@ -184,5 +154,19 @@ $a,$b"""
self.logs.getvalue(),
)
+ def test_jinja_do_extension_render_to_string(self):
+ """Test jinja render_to_string using do extension."""
+ expected_result = "[1, 2, 3]"
+ jinja_template = (
+ "{% set r = [] %} {% set input = [1,2,3] %} "
+ "{% for i in input %} {% do r.append(i) %} {% endfor %} {{r}}"
+ )
+ self.assertEqual(
+ templater.render_string(
+ self.add_header("jinja", jinja_template), {}
+ ).strip(),
+ expected_result,
+ )
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_url_helper.py b/tests/unittests/test_url_helper.py
index 85810e00..a9b9a85f 100644
--- a/tests/unittests/test_url_helper.py
+++ b/tests/unittests/test_url_helper.py
@@ -1,18 +1,25 @@
# This file is part of cloud-init. See LICENSE file for license information.
import logging
+from functools import partial
+from threading import Event
+from time import process_time
import httpretty
+import pytest
import requests
+import responses
from cloudinit import util, version
from cloudinit.url_helper import (
NOT_FOUND,
REDACTED,
UrlError,
+ dual_stack,
oauth_headers,
read_file_or_url,
retry_on_url_exc,
+ wait_for_url,
)
from tests.unittests.helpers import CiTestCase, mock, skipIf
@@ -116,27 +123,6 @@ class TestReadFileOrUrl(CiTestCase):
self.assertNotIn(REDACTED, logs)
self.assertIn("sekret", logs)
- @mock.patch(M_PATH + "readurl")
- def test_read_file_or_url_passes_params_to_readurl(self, m_readurl):
- """read_file_or_url passes all params through to readurl."""
- url = "http://hostname/path"
- response = "This is my url content\n"
- m_readurl.return_value = response
- params = {
- "url": url,
- "timeout": 1,
- "retries": 2,
- "headers": {"somehdr": "val"},
- "data": "data",
- "sec_between": 1,
- "ssl_details": {"cert_file": "/path/cert.pem"},
- "headers_cb": "headers_cb",
- "exception_cb": "exception_cb",
- }
- self.assertEqual(response, read_file_or_url(**params))
- params.pop("url") # url is passed in as a positional arg
- self.assertEqual([mock.call(url, **params)], m_readurl.call_args_list)
-
def test_wb_read_url_defaults_honored_by_read_file_or_url_callers(self):
"""Readurl param defaults used when unspecified by read_file_or_url
@@ -178,6 +164,79 @@ class TestReadFileOrUrl(CiTestCase):
self.assertEqual(m_response, response._response)
+class TestReadFileOrUrlParameters:
+ @mock.patch(M_PATH + "readurl")
+ @pytest.mark.parametrize(
+ "timeout", [1, 1.2, "1", (1, None), (1, 1), (None, None)]
+ )
+ def test_read_file_or_url_passes_params_to_readurl(
+ self, m_readurl, timeout
+ ):
+ """read_file_or_url passes all params through to readurl."""
+ url = "http://hostname/path"
+ response = "This is my url content\n"
+ m_readurl.return_value = response
+ params = {
+ "url": url,
+ "timeout": timeout,
+ "retries": 2,
+ "headers": {"somehdr": "val"},
+ "data": "data",
+ "sec_between": 1,
+ "ssl_details": {"cert_file": "/path/cert.pem"},
+ "headers_cb": "headers_cb",
+ "exception_cb": "exception_cb",
+ }
+
+ assert response == read_file_or_url(**params)
+ params.pop("url") # url is passed in as a positional arg
+ assert m_readurl.call_args_list == [mock.call(url, **params)]
+
+ @pytest.mark.parametrize(
+ "readurl_timeout,request_timeout",
+ [
+ (-1, 0),
+ ("-1", 0),
+ (None, None),
+ (1, 1.0),
+ (1.2, 1.2),
+ ("1", 1.0),
+ ((1, None), (1, None)),
+ ((1, 1), (1, 1)),
+ ((None, None), (None, None)),
+ ],
+ )
+ def test_readurl_timeout(self, readurl_timeout, request_timeout):
+ url = "http://hostname/path"
+ m_response = mock.MagicMock()
+
+ class FakeSession(requests.Session):
+ @classmethod
+ def request(cls, **kwargs):
+ expected_kwargs = {
+ "url": url,
+ "allow_redirects": True,
+ "method": "GET",
+ "headers": {
+ "User-Agent": "Cloud-Init/%s"
+ % (version.version_string())
+ },
+ "timeout": request_timeout,
+ }
+ if request_timeout is None:
+ expected_kwargs.pop("timeout")
+
+ assert kwargs == expected_kwargs
+ return m_response
+
+ with mock.patch(
+ M_PATH + "requests.Session", side_effect=[FakeSession()]
+ ):
+ response = read_file_or_url(url, timeout=readurl_timeout)
+
+ assert response._response == m_response
+
+
class TestRetryOnUrlExc(CiTestCase):
def test_do_not_retry_non_urlerror(self):
"""When exception is not UrlError return False."""
@@ -197,4 +256,307 @@ class TestRetryOnUrlExc(CiTestCase):
self.assertTrue(retry_on_url_exc(msg="", exc=myerror))
+def assert_time(func, max_time=1):
+ """Assert function time is bounded by a max (default=1s)
+
+ The following async tests should canceled in under 1ms and have stagger
+ delay and max_
+ It is possible that this could yield a false positive, but this should
+ basically never happen (esp under normal system load).
+ """
+ start = process_time()
+ try:
+ out = func()
+ finally:
+ diff = process_time() - start
+ assert diff < max_time
+ return out
+
+
+event = Event()
+
+
+class TestDualStack:
+ """Async testing suggestions welcome - these all rely on time-bounded
+ assertions (via threading.Event) to prove ordering
+ """
+
+ @pytest.mark.parametrize(
+ "func," "addresses," "stagger_delay," "timeout," "expected_val,",
+ [
+ # Assert order based on timeout
+ (lambda x, _: x, ("one", "two"), 1, 1, "one"),
+ # Assert timeout results in (None, None)
+ (lambda _a, _b: event.wait(1), ("one", "two"), 1, 0, None),
+ (
+ lambda a, _b: 1 / 0 if a == "one" else a,
+ ("one", "two"),
+ 0,
+ 1,
+ "two",
+ ),
+ # Assert that exception in func is only raised
+ # if neither thread gets a valid result
+ (
+ lambda a, _b: 1 / 0 if a == "two" else a,
+ ("one", "two"),
+ 0,
+ 1,
+ "one",
+ ),
+ # simulate a slow response to verify correct order
+ (
+ lambda x, _: event.wait(1) if x != "two" else x,
+ ("one", "two"),
+ 0,
+ 1,
+ "two",
+ ),
+ # simulate a slow response to verify correct order
+ (
+ lambda x, _: event.wait(1) if x != "tri" else x,
+ ("one", "two", "tri"),
+ 0,
+ 1,
+ "tri",
+ ),
+ ],
+ )
+ def test_dual_stack(
+ self,
+ func,
+ addresses,
+ stagger_delay,
+ timeout,
+ expected_val,
+ ):
+ """Assert various failure modes behave as expected"""
+ event.clear()
+
+ gen = partial(
+ dual_stack,
+ func,
+ addresses,
+ stagger_delay=stagger_delay,
+ timeout=timeout,
+ )
+ _, result = assert_time(gen)
+ assert expected_val == result
+
+ event.set()
+
+ @pytest.mark.parametrize(
+ "func,"
+ "addresses,"
+ "stagger_delay,"
+ "timeout,"
+ "message,"
+ "expected_exc",
+ [
+ (
+ lambda _a, _b: 1 / 0,
+ ("¯\\_(ツ)_/¯", "(╯°□°)╯︵ ┻━┻"),
+ 0,
+ 1,
+ "division by zero",
+ ZeroDivisionError,
+ ),
+ (
+ lambda _a, _b: 1 / 0,
+ ("it", "really", "doesn't"),
+ 0,
+ 1,
+ "division by zero",
+ ZeroDivisionError,
+ ),
+ (
+ lambda _a, _b: [][0],
+ ("matter", "these"),
+ 0,
+ 1,
+ "list index out of range",
+ IndexError,
+ ),
+ (
+ lambda _a, _b: (_ for _ in ()).throw(
+ Exception("soapstone is not effective soap")
+ ),
+ ("are", "ignored"),
+ 0,
+ 1,
+ "soapstone is not effective soap",
+ Exception,
+ ),
+ ],
+ )
+ def test_dual_stack_exceptions(
+ self,
+ func,
+ addresses,
+ stagger_delay,
+ timeout,
+ message,
+ expected_exc,
+ caplog,
+ ):
+ # Context:
+ #
+ # currently if all threads experience exception
+ # dual_stack() logs an error containing all exceptions
+ # but only raises the last exception to occur
+ # Verify "best effort behavior"
+ # dual_stack will temporarily ignore an exception in any of the
+ # request threads in hopes that a later thread will succeed
+ # this behavior is intended to allow a requests.ConnectionError
+ # exception from on endpoint to occur without preventing another
+ # thread from succeeding
+ event.clear()
+
+ # Note: python3.6 repr(Exception("test")) produces different output
+ # than later versions, so we cannot match exact message without
+ # some ugly manual exception repr() function, which I'd rather not do
+ # in dual_stack(), so we recreate expected messages manually here
+ # in a version-independant way for testing, the extra comma on old
+ # versions won't hurt anything
+ exc_list = str([expected_exc(message) for _ in addresses])
+ expected_msg = f"Exception(s) {exc_list} during request"
+ gen = partial(
+ dual_stack,
+ func,
+ addresses,
+ stagger_delay=stagger_delay,
+ timeout=timeout,
+ )
+ with pytest.raises(expected_exc):
+ gen() # 1
+ with caplog.at_level(logging.DEBUG):
+ try:
+ gen() # 2
+ except expected_exc:
+ pass
+ finally:
+ assert 2 == len(caplog.records)
+ assert 2 == caplog.text.count(expected_msg)
+ event.set()
+
+ def test_dual_stack_staggered(self):
+ """Assert expected call intervals occur"""
+ stagger = 0.1
+ with mock.patch(M_PATH + "_run_func_with_delay") as delay_func:
+ dual_stack(
+ lambda x, _y: x,
+ ["you", "and", "me", "and", "dog"],
+ stagger_delay=stagger,
+ timeout=1,
+ )
+
+ # ensure that stagger delay for each subsequent call is:
+ # [ 0 * N, 1 * N, 2 * N, 3 * N, 4 * N, 5 * N] where N = stagger
+ # it appears that without an explicit wait/join we can't assert
+ # number of calls
+ for delay, call_item in enumerate(delay_func.call_args_list):
+ _, kwargs = call_item
+ assert stagger * delay == kwargs.get("delay")
+
+
+ADDR1 = "https://addr1/"
+SLEEP1 = "https://sleep1/"
+SLEEP2 = "https://sleep2/"
+
+
+class TestUrlHelper:
+ success = "SUCCESS"
+ fail = "FAIL"
+ event = Event()
+
+ @classmethod
+ def response_wait(cls, _request):
+ cls.event.wait(0.1)
+ return (500, {"request-id": "1"}, cls.fail)
+
+ @classmethod
+ def response_nowait(cls, _request):
+ return (200, {"request-id": "0"}, cls.success)
+
+ @pytest.mark.parametrize(
+ "addresses," "expected_address_index," "response,",
+ [
+ # Use timeout to test ordering happens as expected
+ ((ADDR1, SLEEP1), 0, "SUCCESS"),
+ ((SLEEP1, ADDR1), 1, "SUCCESS"),
+ ((SLEEP1, SLEEP2, ADDR1), 2, "SUCCESS"),
+ ((ADDR1, SLEEP1, SLEEP2), 0, "SUCCESS"),
+ ],
+ )
+ @responses.activate
+ def test_order(self, addresses, expected_address_index, response):
+ """Check that the first response gets returned. Simulate a
+ non-responding endpoint with a response that has a one second wait.
+
+ If this test proves flaky, increase wait time. Since it is async,
+ increasing wait time for the non-responding endpoint should not
+ increase total test time, assuming async_delay=0 is used and at least
+ one non-waiting endpoint is registered with httpretty.
+ Subsequent tests will continue execution after the first response is
+ received.
+ """
+ self.event.clear()
+ for address in set(addresses):
+ responses.add_callback(
+ responses.GET,
+ address,
+ callback=(
+ self.response_wait
+ if "sleep" in address
+ else self.response_nowait
+ ),
+ content_type="application/json",
+ )
+
+ # Use async_delay=0.0 to avoid adding unnecessary time to tests
+ # In practice a value such as 0.150 is used
+ url, response_contents = wait_for_url(
+ urls=addresses,
+ max_wait=1,
+ timeout=1,
+ connect_synchronously=False,
+ async_delay=0.0,
+ )
+ self.event.set()
+
+ # Test for timeout (no responding endpoint)
+ assert addresses[expected_address_index] == url
+ assert response.encode() == response_contents
+
+ @responses.activate
+ def test_timeout(self):
+ """If no endpoint responds in time, expect no response"""
+
+ self.event.clear()
+ addresses = [SLEEP1, SLEEP2]
+ for address in set(addresses):
+ responses.add_callback(
+ responses.GET,
+ address,
+ callback=(
+ self.response_wait
+ if "sleep" in address
+ else self.response_nowait
+ ),
+ content_type="application/json",
+ )
+
+ # Use async_delay=0.0 to avoid adding unnecessary time to tests
+ url, response_contents = wait_for_url(
+ urls=addresses,
+ max_wait=1,
+ timeout=1,
+ connect_synchronously=False,
+ async_delay=0,
+ )
+ self.event.set()
+ assert not url
+ assert not response_contents
+
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py
index 3f3079b0..bcb63787 100644
--- a/tests/unittests/test_util.py
+++ b/tests/unittests/test_util.py
@@ -3,6 +3,7 @@
"""Tests for cloudinit.util"""
import base64
+import errno
import io
import json
import logging
@@ -12,17 +13,21 @@ import re
import shutil
import stat
import tempfile
+from collections import deque
from textwrap import dedent
+from typing import Tuple
from unittest import mock
import pytest
import yaml
from cloudinit import importer, subp, util
+from cloudinit.subp import SubpResult
from tests.unittests import helpers
from tests.unittests.helpers import CiTestCase
LOG = logging.getLogger(__name__)
+M_PATH = "cloudinit.util."
MOUNT_INFO = [
"68 0 8:3 / / ro,relatime shared:1 - btrfs /dev/sda1 ro,attr2,inode64",
@@ -335,30 +340,108 @@ class FakeCloud(object):
return self.hostname
-class TestUtil(CiTestCase):
+class TestUtil:
def test_parse_mount_info_no_opts_no_arg(self):
result = util.parse_mount_info("/home", MOUNT_INFO, LOG)
- self.assertEqual(("/dev/sda2", "xfs", "/home"), result)
+ assert ("/dev/sda2", "xfs", "/home") == result
def test_parse_mount_info_no_opts_arg(self):
result = util.parse_mount_info("/home", MOUNT_INFO, LOG, False)
- self.assertEqual(("/dev/sda2", "xfs", "/home"), result)
+ assert ("/dev/sda2", "xfs", "/home") == result
def test_parse_mount_info_with_opts(self):
result = util.parse_mount_info("/", MOUNT_INFO, LOG, True)
- self.assertEqual(("/dev/sda1", "btrfs", "/", "ro,relatime"), result)
+ assert ("/dev/sda1", "btrfs", "/", "ro,relatime") == result
- @mock.patch("cloudinit.util.get_mount_info")
+ @mock.patch(M_PATH + "get_mount_info")
def test_mount_is_rw(self, m_mount_info):
m_mount_info.return_value = ("/dev/sda1", "btrfs", "/", "rw,relatime")
is_rw = util.mount_is_read_write("/")
- self.assertEqual(is_rw, True)
+ assert is_rw is True
- @mock.patch("cloudinit.util.get_mount_info")
+ @mock.patch(M_PATH + "get_mount_info")
def test_mount_is_ro(self, m_mount_info):
m_mount_info.return_value = ("/dev/sda1", "btrfs", "/", "ro,relatime")
is_rw = util.mount_is_read_write("/")
- self.assertEqual(is_rw, False)
+ assert is_rw is False
+
+ @mock.patch(
+ M_PATH + "read_conf",
+ side_effect=(OSError(errno.EACCES, "Not allowed"), {"0": "0"}),
+ )
+ def test_read_conf_d_no_permissions(
+ self, m_read_conf, caplog, capsys, tmpdir
+ ):
+ """If a user has not read permission to read a config file then
+ there is no exception nor stderr output and the user is informed via
+ logging warnings.
+
+ Note: This is used in cmd, therefore want to keep the invariant of
+ not outputing to the console and log file permission errors.
+ """
+ confs = []
+ for i in range(2):
+ confs.append(tmpdir.join(f"conf-{i}.cfg"))
+ confs[i].write("{}")
+ assert {"0": "0"} == util.read_conf_d(tmpdir)
+ assert (
+ caplog.text.count(
+ f"REDACTED config part {tmpdir}/conf-1.cfg for non-root user"
+ )
+ == 1
+ )
+ assert m_read_conf.call_count == 2
+ out, err = capsys.readouterr()
+ assert not out
+ assert not err
+
+ @pytest.mark.parametrize(
+ "create_confd,expected_call",
+ [
+ (False, mock.call(deque())),
+ (True, mock.call(deque([{"my_config": "foo"}]))),
+ ],
+ )
+ @mock.patch(M_PATH + "mergemanydict")
+ @mock.patch(M_PATH + "read_conf_d", return_value={"my_config": "foo"})
+ @mock.patch(
+ M_PATH + "read_conf", side_effect=OSError(errno.EACCES, "Not allowed")
+ )
+ def test_read_conf_with_confd_no_permissions(
+ self,
+ m_read_conf,
+ m_read_confd,
+ m_mergemanydict,
+ create_confd,
+ expected_call,
+ caplog,
+ capsys,
+ tmpdir,
+ ):
+ """Read a conf file without permission.
+
+ sys output is empty and the user is informed via logging warnings.
+
+ Note: This is used in cmd, therefore want to keep the invariant of
+ not outputing to the console and log file permission errors.
+ """
+ conf_fn = tmpdir.join("conf.cfg")
+ if create_confd:
+ confd_fn = tmpdir.mkdir("conf.cfg.d")
+ util.read_conf_with_confd(conf_fn)
+ assert (
+ caplog.text.count(
+ f"REDACTED config part {conf_fn} for non-root user"
+ )
+ == 1
+ )
+ assert m_read_conf.call_count == 1
+ out, err = capsys.readouterr()
+ assert not out
+ assert not err
+ if create_confd:
+ assert [mock.call(confd_fn)] == m_read_confd.call_args_list
+ assert [expected_call] == m_mergemanydict.call_args_list
class TestSymlink(CiTestCase):
@@ -376,13 +459,16 @@ class TestSymlink(CiTestCase):
tmpd = self.tmp_dir()
link = self.tmp_path("link", tmpd)
target = self.tmp_path("target", tmpd)
+ target2 = self.tmp_path("target2", tmpd)
util.write_file(target, "hello")
+ util.write_file(target2, "hello2")
util.sym_link(target, link)
self.assertTrue(os.path.exists(link))
- util.sym_link(target, link, force=True)
+ util.sym_link(target2, link, force=True)
self.assertTrue(os.path.exists(link))
+ self.assertEqual("hello2", util.load_file(link))
def test_sym_link_dangling_link(self):
tmpd = self.tmp_dir()
@@ -408,9 +494,9 @@ class TestSymlink(CiTestCase):
class TestUptime(CiTestCase):
- @mock.patch("cloudinit.util.boottime")
- @mock.patch("cloudinit.util.os.path.exists")
- @mock.patch("cloudinit.util.time.time")
+ @mock.patch(M_PATH + "boottime")
+ @mock.patch(M_PATH + "os.path.exists")
+ @mock.patch(M_PATH + "time.time")
def test_uptime_non_linux_path(self, m_time, m_exists, m_boottime):
boottime = 1000.0
uptime = 10.0
@@ -590,7 +676,7 @@ class TestBlkid(CiTestCase):
@mock.patch("cloudinit.subp.subp")
def test_functional_blkid(self, m_subp):
- m_subp.return_value = (self.blkid_out.format(**self.ids), "")
+ m_subp.return_value = SubpResult(self.blkid_out.format(**self.ids), "")
self.assertEqual(self._get_expected(), util.blkid())
m_subp.assert_called_with(
["blkid", "-o", "full"], capture=True, decode="replace"
@@ -599,7 +685,7 @@ class TestBlkid(CiTestCase):
@mock.patch("cloudinit.subp.subp")
def test_blkid_no_cache_uses_no_cache(self, m_subp):
"""blkid should turn off cache if disable_cache is true."""
- m_subp.return_value = (self.blkid_out.format(**self.ids), "")
+ m_subp.return_value = SubpResult(self.blkid_out.format(**self.ids), "")
self.assertEqual(self._get_expected(), util.blkid(disable_cache=True))
m_subp.assert_called_with(
["blkid", "-o", "full", "-c", "/dev/null"],
@@ -684,7 +770,7 @@ class TestGetLinuxDistro(CiTestCase):
if path == "/etc/redhat-release":
return 1
- @mock.patch("cloudinit.util.load_file")
+ @mock.patch(M_PATH + "load_file")
def test_get_linux_distro_quoted_name(self, m_os_release, m_path_exists):
"""Verify we get the correct name if the os-release file has
the distro name in quotes"""
@@ -693,7 +779,7 @@ class TestGetLinuxDistro(CiTestCase):
dist = util.get_linux_distro()
self.assertEqual(("sles", "12.3", platform.machine()), dist)
- @mock.patch("cloudinit.util.load_file")
+ @mock.patch(M_PATH + "load_file")
def test_get_linux_distro_bare_name(self, m_os_release, m_path_exists):
"""Verify we get the correct name if the os-release file does not
have the distro name in quotes"""
@@ -704,7 +790,7 @@ class TestGetLinuxDistro(CiTestCase):
@mock.patch("platform.system")
@mock.patch("platform.release")
- @mock.patch("cloudinit.util._parse_redhat_release")
+ @mock.patch(M_PATH + "_parse_redhat_release")
def test_get_linux_freebsd(
self,
m_parse_redhat_release,
@@ -721,7 +807,7 @@ class TestGetLinuxDistro(CiTestCase):
dist = util.get_linux_distro()
self.assertEqual(("freebsd", "12.0-RELEASE-p10", ""), dist)
- @mock.patch("cloudinit.util.load_file")
+ @mock.patch(M_PATH + "load_file")
def test_get_linux_centos6(self, m_os_release, m_path_exists):
"""Verify we get the correct name and release name on CentOS 6."""
m_os_release.return_value = REDHAT_RELEASE_CENTOS_6
@@ -729,7 +815,7 @@ class TestGetLinuxDistro(CiTestCase):
dist = util.get_linux_distro()
self.assertEqual(("centos", "6.10", "Final"), dist)
- @mock.patch("cloudinit.util.load_file")
+ @mock.patch(M_PATH + "load_file")
def test_get_linux_centos7_redhat_release(self, m_os_release, m_exists):
"""Verify the correct release info on CentOS 7 without os-release."""
m_os_release.return_value = REDHAT_RELEASE_CENTOS_7
@@ -737,7 +823,7 @@ class TestGetLinuxDistro(CiTestCase):
dist = util.get_linux_distro()
self.assertEqual(("centos", "7.5.1804", "Core"), dist)
- @mock.patch("cloudinit.util.load_file")
+ @mock.patch(M_PATH + "load_file")
def test_get_linux_redhat7_osrelease(self, m_os_release, m_path_exists):
"""Verify redhat 7 read from os-release."""
m_os_release.return_value = OS_RELEASE_REDHAT_7
@@ -745,7 +831,7 @@ class TestGetLinuxDistro(CiTestCase):
dist = util.get_linux_distro()
self.assertEqual(("redhat", "7.5", "Maipo"), dist)
- @mock.patch("cloudinit.util.load_file")
+ @mock.patch(M_PATH + "load_file")
def test_get_linux_redhat7_rhrelease(self, m_os_release, m_path_exists):
"""Verify redhat 7 read from redhat-release."""
m_os_release.return_value = REDHAT_RELEASE_REDHAT_7
@@ -753,7 +839,7 @@ class TestGetLinuxDistro(CiTestCase):
dist = util.get_linux_distro()
self.assertEqual(("redhat", "7.5", "Maipo"), dist)
- @mock.patch("cloudinit.util.load_file")
+ @mock.patch(M_PATH + "load_file")
def test_get_linux_redhat6_rhrelease(self, m_os_release, m_path_exists):
"""Verify redhat 6 read from redhat-release."""
m_os_release.return_value = REDHAT_RELEASE_REDHAT_6
@@ -761,7 +847,7 @@ class TestGetLinuxDistro(CiTestCase):
dist = util.get_linux_distro()
self.assertEqual(("redhat", "6.10", "Santiago"), dist)
- @mock.patch("cloudinit.util.load_file")
+ @mock.patch(M_PATH + "load_file")
def test_get_linux_copr_centos(self, m_os_release, m_path_exists):
"""Verify we get the correct name and release name on COPR CentOS."""
m_os_release.return_value = OS_RELEASE_CENTOS
@@ -769,7 +855,7 @@ class TestGetLinuxDistro(CiTestCase):
dist = util.get_linux_distro()
self.assertEqual(("centos", "7", "Core"), dist)
- @mock.patch("cloudinit.util.load_file")
+ @mock.patch(M_PATH + "load_file")
def test_get_linux_almalinux8_rhrelease(self, m_os_release, m_path_exists):
"""Verify almalinux 8 read from redhat-release."""
m_os_release.return_value = REDHAT_RELEASE_ALMALINUX_8
@@ -777,7 +863,7 @@ class TestGetLinuxDistro(CiTestCase):
dist = util.get_linux_distro()
self.assertEqual(("almalinux", "8.3", "Purple Manul"), dist)
- @mock.patch("cloudinit.util.load_file")
+ @mock.patch(M_PATH + "load_file")
def test_get_linux_almalinux8_osrelease(self, m_os_release, m_path_exists):
"""Verify almalinux 8 read from os-release."""
m_os_release.return_value = OS_RELEASE_ALMALINUX_8
@@ -785,7 +871,7 @@ class TestGetLinuxDistro(CiTestCase):
dist = util.get_linux_distro()
self.assertEqual(("almalinux", "8.3", "Purple Manul"), dist)
- @mock.patch("cloudinit.util.load_file")
+ @mock.patch(M_PATH + "load_file")
def test_get_linux_eurolinux7_rhrelease(self, m_os_release, m_path_exists):
"""Verify eurolinux 7 read from redhat-release."""
m_os_release.return_value = REDHAT_RELEASE_EUROLINUX_7
@@ -793,7 +879,7 @@ class TestGetLinuxDistro(CiTestCase):
dist = util.get_linux_distro()
self.assertEqual(("eurolinux", "7.9", "Minsk"), dist)
- @mock.patch("cloudinit.util.load_file")
+ @mock.patch(M_PATH + "load_file")
def test_get_linux_eurolinux7_osrelease(self, m_os_release, m_path_exists):
"""Verify eurolinux 7 read from os-release."""
m_os_release.return_value = OS_RELEASE_EUROLINUX_7
@@ -801,7 +887,7 @@ class TestGetLinuxDistro(CiTestCase):
dist = util.get_linux_distro()
self.assertEqual(("eurolinux", "7.9", "Minsk"), dist)
- @mock.patch("cloudinit.util.load_file")
+ @mock.patch(M_PATH + "load_file")
def test_get_linux_eurolinux8_rhrelease(self, m_os_release, m_path_exists):
"""Verify eurolinux 8 read from redhat-release."""
m_os_release.return_value = REDHAT_RELEASE_EUROLINUX_8
@@ -809,7 +895,7 @@ class TestGetLinuxDistro(CiTestCase):
dist = util.get_linux_distro()
self.assertEqual(("eurolinux", "8.4", "Vaduz"), dist)
- @mock.patch("cloudinit.util.load_file")
+ @mock.patch(M_PATH + "load_file")
def test_get_linux_eurolinux8_osrelease(self, m_os_release, m_path_exists):
"""Verify eurolinux 8 read from os-release."""
m_os_release.return_value = OS_RELEASE_EUROLINUX_8
@@ -817,7 +903,7 @@ class TestGetLinuxDistro(CiTestCase):
dist = util.get_linux_distro()
self.assertEqual(("eurolinux", "8.4", "Vaduz"), dist)
- @mock.patch("cloudinit.util.load_file")
+ @mock.patch(M_PATH + "load_file")
def test_get_linux_miraclelinux8_rhrelease(
self, m_os_release, m_path_exists
):
@@ -827,7 +913,7 @@ class TestGetLinuxDistro(CiTestCase):
dist = util.get_linux_distro()
self.assertEqual(("miracle", "8.4", "Peony"), dist)
- @mock.patch("cloudinit.util.load_file")
+ @mock.patch(M_PATH + "load_file")
def test_get_linux_miraclelinux8_osrelease(
self, m_os_release, m_path_exists
):
@@ -837,7 +923,7 @@ class TestGetLinuxDistro(CiTestCase):
dist = util.get_linux_distro()
self.assertEqual(("miraclelinux", "8", "Peony"), dist)
- @mock.patch("cloudinit.util.load_file")
+ @mock.patch(M_PATH + "load_file")
def test_get_linux_rocky8_rhrelease(self, m_os_release, m_path_exists):
"""Verify rocky linux 8 read from redhat-release."""
m_os_release.return_value = REDHAT_RELEASE_ROCKY_8
@@ -845,7 +931,7 @@ class TestGetLinuxDistro(CiTestCase):
dist = util.get_linux_distro()
self.assertEqual(("rocky", "8.3", "Green Obsidian"), dist)
- @mock.patch("cloudinit.util.load_file")
+ @mock.patch(M_PATH + "load_file")
def test_get_linux_rocky8_osrelease(self, m_os_release, m_path_exists):
"""Verify rocky linux 8 read from os-release."""
m_os_release.return_value = OS_RELEASE_ROCKY_8
@@ -853,7 +939,7 @@ class TestGetLinuxDistro(CiTestCase):
dist = util.get_linux_distro()
self.assertEqual(("rocky", "8.3", "Green Obsidian"), dist)
- @mock.patch("cloudinit.util.load_file")
+ @mock.patch(M_PATH + "load_file")
def test_get_linux_virtuozzo8_rhrelease(self, m_os_release, m_path_exists):
"""Verify virtuozzo linux 8 read from redhat-release."""
m_os_release.return_value = REDHAT_RELEASE_VIRTUOZZO_8
@@ -861,7 +947,7 @@ class TestGetLinuxDistro(CiTestCase):
dist = util.get_linux_distro()
self.assertEqual(("virtuozzo", "8", "Virtuozzo Linux"), dist)
- @mock.patch("cloudinit.util.load_file")
+ @mock.patch(M_PATH + "load_file")
def test_get_linux_virtuozzo8_osrelease(self, m_os_release, m_path_exists):
"""Verify virtuozzo linux 8 read from os-release."""
m_os_release.return_value = OS_RELEASE_VIRTUOZZO_8
@@ -869,7 +955,7 @@ class TestGetLinuxDistro(CiTestCase):
dist = util.get_linux_distro()
self.assertEqual(("virtuozzo", "8", "Virtuozzo Linux"), dist)
- @mock.patch("cloudinit.util.load_file")
+ @mock.patch(M_PATH + "load_file")
def test_get_linux_cloud8_rhrelease(self, m_os_release, m_path_exists):
"""Verify cloudlinux 8 read from redhat-release."""
m_os_release.return_value = REDHAT_RELEASE_CLOUDLINUX_8
@@ -877,7 +963,7 @@ class TestGetLinuxDistro(CiTestCase):
dist = util.get_linux_distro()
self.assertEqual(("cloudlinux", "8.4", "Valery Rozhdestvensky"), dist)
- @mock.patch("cloudinit.util.load_file")
+ @mock.patch(M_PATH + "load_file")
def test_get_linux_cloud8_osrelease(self, m_os_release, m_path_exists):
"""Verify cloudlinux 8 read from os-release."""
m_os_release.return_value = OS_RELEASE_CLOUDLINUX_8
@@ -885,7 +971,7 @@ class TestGetLinuxDistro(CiTestCase):
dist = util.get_linux_distro()
self.assertEqual(("cloudlinux", "8.4", "Valery Rozhdestvensky"), dist)
- @mock.patch("cloudinit.util.load_file")
+ @mock.patch(M_PATH + "load_file")
def test_get_linux_debian(self, m_os_release, m_path_exists):
"""Verify we get the correct name and release name on Debian."""
m_os_release.return_value = OS_RELEASE_DEBIAN
@@ -893,7 +979,7 @@ class TestGetLinuxDistro(CiTestCase):
dist = util.get_linux_distro()
self.assertEqual(("debian", "9", "stretch"), dist)
- @mock.patch("cloudinit.util.load_file")
+ @mock.patch(M_PATH + "load_file")
def test_get_linux_openeuler(self, m_os_release, m_path_exists):
"""Verify get the correct name and release name on Openeuler."""
m_os_release.return_value = OS_RELEASE_OPENEULER_20
@@ -901,7 +987,7 @@ class TestGetLinuxDistro(CiTestCase):
dist = util.get_linux_distro()
self.assertEqual(("openEuler", "20.03", "LTS-SP2"), dist)
- @mock.patch("cloudinit.util.load_file")
+ @mock.patch(M_PATH + "load_file")
def test_get_linux_opensuse(self, m_os_release, m_path_exists):
"""Verify we get the correct name and machine arch on openSUSE
prior to openSUSE Leap 15.
@@ -911,7 +997,7 @@ class TestGetLinuxDistro(CiTestCase):
dist = util.get_linux_distro()
self.assertEqual(("opensuse", "42.3", platform.machine()), dist)
- @mock.patch("cloudinit.util.load_file")
+ @mock.patch(M_PATH + "load_file")
def test_get_linux_opensuse_l15(self, m_os_release, m_path_exists):
"""Verify we get the correct name and machine arch on openSUSE
for openSUSE Leap 15.0 and later.
@@ -921,7 +1007,7 @@ class TestGetLinuxDistro(CiTestCase):
dist = util.get_linux_distro()
self.assertEqual(("opensuse-leap", "15.0", platform.machine()), dist)
- @mock.patch("cloudinit.util.load_file")
+ @mock.patch(M_PATH + "load_file")
def test_get_linux_opensuse_tw(self, m_os_release, m_path_exists):
"""Verify we get the correct name and machine arch on openSUSE
for openSUSE Tumbleweed
@@ -933,7 +1019,7 @@ class TestGetLinuxDistro(CiTestCase):
("opensuse-tumbleweed", "20180920", platform.machine()), dist
)
- @mock.patch("cloudinit.util.load_file")
+ @mock.patch(M_PATH + "load_file")
def test_get_linux_photon_os_release(self, m_os_release, m_path_exists):
"""Verify we get the correct name and machine arch on PhotonOS"""
m_os_release.return_value = OS_RELEASE_PHOTON
@@ -1055,6 +1141,9 @@ class TestIsLXD(CiTestCase):
class TestReadCcFromCmdline:
+
+ random_string: Tuple
+
if hasattr(pytest, "param"):
random_string = pytest.param(
CiTestCase.random_string(), None, id="random_string"
@@ -1178,8 +1267,8 @@ class TestMountCb:
"""Mock an already-mounted device, and yield (device, mount dict)"""
device = "/dev/fake0"
mountpoint = "/mnt/fake"
- with mock.patch("cloudinit.util.subp.subp"):
- with mock.patch("cloudinit.util.mounts") as m_mounts:
+ with mock.patch(M_PATH + "subp.subp"):
+ with mock.patch(M_PATH + "mounts") as m_mounts:
mounts = {device: {"mountpoint": mountpoint}}
m_mounts.return_value = mounts
yield device, mounts[device]
@@ -1202,9 +1291,9 @@ class TestMountCb:
("ufs", "ufs"),
],
)
- @mock.patch("cloudinit.util.is_Linux", autospec=True)
- @mock.patch("cloudinit.util.is_BSD", autospec=True)
- @mock.patch("cloudinit.util.subp.subp")
+ @mock.patch(M_PATH + "is_Linux", autospec=True)
+ @mock.patch(M_PATH + "is_BSD", autospec=True)
+ @mock.patch(M_PATH + "subp.subp")
@mock.patch("cloudinit.temp_utils.tempdir", autospec=True)
def test_normalize_mtype_on_bsd(
self, m_tmpdir, m_subp, m_is_BSD, m_is_Linux, mtype, expected
@@ -1241,7 +1330,7 @@ class TestMountCb:
with pytest.raises(TypeError):
util.mount_cb(mock.Mock(), mock.Mock(), mtype=invalid_mtype)
- @mock.patch("cloudinit.util.subp.subp")
+ @mock.patch(M_PATH + "subp.subp")
def test_already_mounted_does_not_mount_or_umount_anything(
self, m_subp, already_mounted_device
):
@@ -1277,7 +1366,7 @@ class TestMountCb:
] == callback.call_args_list
-@mock.patch("cloudinit.util.write_file")
+@mock.patch(M_PATH + "write_file")
class TestEnsureFile:
"""Tests for ``cloudinit.util.ensure_file``."""
@@ -1322,9 +1411,9 @@ class TestEnsureFile:
assert "ab" == kwargs["omode"]
-@mock.patch("cloudinit.util.grp.getgrnam")
-@mock.patch("cloudinit.util.os.setgid")
-@mock.patch("cloudinit.util.os.umask")
+@mock.patch(M_PATH + "grp.getgrnam")
+@mock.patch(M_PATH + "os.setgid")
+@mock.patch(M_PATH + "os.umask")
class TestRedirectOutputPreexecFn:
"""This tests specifically the preexec_fn used in redirect_output."""
@@ -1340,7 +1429,7 @@ class TestRedirectOutputPreexecFn:
args = (test_string, None)
elif request.param == "errfmt":
args = (None, test_string)
- with mock.patch("cloudinit.util.subprocess.Popen") as m_popen:
+ with mock.patch(M_PATH + "subprocess.Popen") as m_popen:
util.redirect_output(*args)
assert 1 == m_popen.call_count
@@ -1774,7 +1863,7 @@ class TestMountinfoParsing(helpers.ResourceUsingTestCase):
expected = ("none", "tmpfs", "/run/lock")
self.assertEqual(expected, util.parse_mount_info("/run/lock", lines))
- @mock.patch("cloudinit.util.os")
+ @mock.patch(M_PATH + "os")
@mock.patch("cloudinit.subp.subp")
def test_get_device_info_from_zpool(self, zpool_output, m_os):
# mock /dev/zfs exists
@@ -1790,7 +1879,7 @@ class TestMountinfoParsing(helpers.ResourceUsingTestCase):
self.assertIsNotNone(ret)
m_os.path.exists.assert_called_with("/dev/zfs")
- @mock.patch("cloudinit.util.os")
+ @mock.patch(M_PATH + "os")
def test_get_device_info_from_zpool_no_dev_zfs(self, m_os):
# mock /dev/zfs missing
m_os.path.exists.return_value = False
@@ -1798,7 +1887,7 @@ class TestMountinfoParsing(helpers.ResourceUsingTestCase):
ret = util.get_device_info_from_zpool("vmzroot")
self.assertIsNone(ret)
- @mock.patch("cloudinit.util.os")
+ @mock.patch(M_PATH + "os")
@mock.patch("cloudinit.subp.subp")
def test_get_device_info_from_zpool_handles_no_zpool(self, m_sub, m_os):
"""Handle case where there is no zpool command"""
@@ -1808,7 +1897,7 @@ class TestMountinfoParsing(helpers.ResourceUsingTestCase):
ret = util.get_device_info_from_zpool("vmzroot")
self.assertIsNone(ret)
- @mock.patch("cloudinit.util.os")
+ @mock.patch(M_PATH + "os")
@mock.patch("cloudinit.subp.subp")
def test_get_device_info_from_zpool_on_error(self, zpool_output, m_os):
# mock /dev/zfs exists
@@ -1875,7 +1964,7 @@ class TestIsX86(helpers.CiTestCase):
util.is_x86(arch), 'Expected not is_x86 for arch "%s"' % arch
)
- @mock.patch("cloudinit.util.os.uname")
+ @mock.patch(M_PATH + "os.uname")
def test_is_x86_calls_uname_for_architecture(self, m_uname):
"""is_x86 returns True if platform from uname matches."""
m_uname.return_value = [0, 1, 2, 3, "x86_64"]
@@ -1982,6 +2071,33 @@ class TestMultiLog(helpers.FilesystemMockingTestCase):
util.multi_log("something", fallback_to_stdout=False)
self.assertEqual("", self.stdout.getvalue())
+ @mock.patch(
+ M_PATH + "write_to_console",
+ mock.Mock(side_effect=OSError("Failed to write to console")),
+ )
+ def test_logs_go_to_stdout_if_writing_to_console_fails_and_fallback_true(
+ self,
+ ):
+ self._createConsole(self.root)
+ util.multi_log("something", fallback_to_stdout=True)
+ self.assertEqual(
+ "Failed to write to /dev/console\nsomething",
+ self.stdout.getvalue(),
+ )
+
+ @mock.patch(
+ M_PATH + "write_to_console",
+ mock.Mock(side_effect=OSError("Failed to write to console")),
+ )
+ def test_logs_go_nowhere_if_writing_to_console_fails_and_fallback_false(
+ self,
+ ):
+ self._createConsole(self.root)
+ util.multi_log("something", fallback_to_stdout=False)
+ self.assertEqual(
+ "Failed to write to /dev/console\n", self.stdout.getvalue()
+ )
+
def test_logs_go_to_log_if_given(self):
log = mock.MagicMock()
logged_string = "something very important"
@@ -2179,7 +2295,7 @@ class TestSystemIsSnappy(helpers.FilesystemMockingTestCase):
self.reRoot(root_d)
self.assertTrue(util.system_is_snappy())
- @mock.patch("cloudinit.util.get_cmdline")
+ @mock.patch(M_PATH + "get_cmdline")
def test_bad_content_in_os_release_no_effect(self, m_cmdline):
"""malformed os-release should not raise exception."""
m_cmdline.return_value = "root=/dev/sda"
@@ -2189,7 +2305,7 @@ class TestSystemIsSnappy(helpers.FilesystemMockingTestCase):
self.reRoot()
self.assertFalse(util.system_is_snappy())
- @mock.patch("cloudinit.util.get_cmdline")
+ @mock.patch(M_PATH + "get_cmdline")
def test_snap_core_in_cmdline_is_snappy(self, m_cmdline):
"""The string snap_core= in kernel cmdline indicates snappy."""
cmdline = (
@@ -2202,7 +2318,7 @@ class TestSystemIsSnappy(helpers.FilesystemMockingTestCase):
self.assertTrue(util.system_is_snappy())
self.assertTrue(m_cmdline.call_count > 0)
- @mock.patch("cloudinit.util.get_cmdline")
+ @mock.patch(M_PATH + "get_cmdline")
def test_nothing_found_is_not_snappy(self, m_cmdline):
"""If no positive identification, then not snappy."""
m_cmdline.return_value = "root=/dev/sda"
@@ -2210,7 +2326,7 @@ class TestSystemIsSnappy(helpers.FilesystemMockingTestCase):
self.assertFalse(util.system_is_snappy())
self.assertTrue(m_cmdline.call_count > 0)
- @mock.patch("cloudinit.util.get_cmdline")
+ @mock.patch(M_PATH + "get_cmdline")
def test_channel_ini_with_snappy_is_snappy(self, m_cmdline):
"""A Channel.ini file with 'ubuntu-core' indicates snappy."""
m_cmdline.return_value = "root=/dev/sda"
@@ -2220,7 +2336,7 @@ class TestSystemIsSnappy(helpers.FilesystemMockingTestCase):
self.reRoot(root_d)
self.assertTrue(util.system_is_snappy())
- @mock.patch("cloudinit.util.get_cmdline")
+ @mock.patch(M_PATH + "get_cmdline")
def test_system_image_config_dir_is_snappy(self, m_cmdline):
"""Existence of /etc/system-image/config.d indicates snappy."""
m_cmdline.return_value = "root=/dev/sda"
@@ -2265,7 +2381,7 @@ class TestGetProcEnv(helpers.TestCase):
# return the value portion of key=val decoded.
return blob.split(b"=", 1)[1].decode(encoding, errors)
- @mock.patch("cloudinit.util.load_file")
+ @mock.patch(M_PATH + "load_file")
def test_non_utf8_in_environment(self, m_load_file):
"""env may have non utf-8 decodable content."""
content = self.null.join(
@@ -2284,7 +2400,7 @@ class TestGetProcEnv(helpers.TestCase):
)
self.assertEqual(1, m_load_file.call_count)
- @mock.patch("cloudinit.util.load_file")
+ @mock.patch(M_PATH + "load_file")
def test_encoding_none_returns_bytes(self, m_load_file):
"""encoding none returns bytes."""
lines = (self.bootflag, self.simple1, self.simple2, self.mixed)
@@ -2297,7 +2413,7 @@ class TestGetProcEnv(helpers.TestCase):
)
self.assertEqual(1, m_load_file.call_count)
- @mock.patch("cloudinit.util.load_file")
+ @mock.patch(M_PATH + "load_file")
def test_all_utf8_encoded(self, m_load_file):
"""common path where only utf-8 decodable content."""
content = self.null.join((self.simple1, self.simple2))
@@ -2307,7 +2423,7 @@ class TestGetProcEnv(helpers.TestCase):
)
self.assertEqual(1, m_load_file.call_count)
- @mock.patch("cloudinit.util.load_file")
+ @mock.patch(M_PATH + "load_file")
def test_non_existing_file_returns_empty_dict(self, m_load_file):
"""as implemented, a non-existing pid returns empty dict.
This is how it was originally implemented."""
@@ -2358,13 +2474,17 @@ class TestFindDevs:
@mock.patch("cloudinit.subp.subp")
def test_find_devs_with_openbsd(self, m_subp):
- m_subp.return_value = ("cd0:,sd0:630d98d32b5d3759,sd1:,fd0:", "")
+ m_subp.return_value = SubpResult(
+ "cd0:,sd0:630d98d32b5d3759,sd1:,fd0:", ""
+ )
devlist = util.find_devs_with_openbsd()
assert devlist == ["/dev/cd0a", "/dev/sd1a", "/dev/sd1i"]
@mock.patch("cloudinit.subp.subp")
def test_find_devs_with_openbsd_with_criteria(self, m_subp):
- m_subp.return_value = ("cd0:,sd0:630d98d32b5d3759,sd1:,fd0:", "")
+ m_subp.return_value = SubpResult(
+ "cd0:,sd0:630d98d32b5d3759,sd1:,fd0:", ""
+ )
devlist = util.find_devs_with_openbsd(criteria="TYPE=iso9660")
assert devlist == ["/dev/cd0a", "/dev/sd1a", "/dev/sd1i"]
@@ -2412,29 +2532,29 @@ class TestFindDevs:
@mock.patch("cloudinit.subp.subp")
def test_find_devs_with_netbsd(self, m_subp, criteria, expected_devlist):
side_effect_values = [
- ("ld0 dk0 dk1 cd0", ""),
- (
+ SubpResult("ld0 dk0 dk1 cd0", ""),
+ SubpResult(
"mscdlabel: CDIOREADTOCHEADER: "
"Inappropriate ioctl for device\n"
"track (ctl=4) at sector 0\n"
"disklabel not written\n",
"",
),
- (
+ SubpResult(
"mscdlabel: CDIOREADTOCHEADER: "
"Inappropriate ioctl for device\n"
"track (ctl=4) at sector 0\n"
"disklabel not written\n",
"",
),
- (
+ SubpResult(
"mscdlabel: CDIOREADTOCHEADER: "
"Inappropriate ioctl for device\n"
"track (ctl=4) at sector 0\n"
"disklabel not written\n",
"",
),
- (
+ SubpResult(
"track (ctl=4) at sector 0\n"
'ISO filesystem, label "config-2", '
"creation time: 2020/03/31 17:29\n"
@@ -2462,7 +2582,9 @@ class TestFindDevs:
def test_find_devs_with_dragonflybsd(
self, m_subp, criteria, expected_devlist
):
- m_subp.return_value = ("md2 md1 cd0 vbd0 acd0 vn3 vn2 vn1 vn0 md0", "")
+ m_subp.return_value = SubpResult(
+ "md2 md1 cd0 vbd0 acd0 vn3 vn2 vn1 vn0 md0", ""
+ )
devlist = util.find_devs_with_dragonflybsd(criteria=criteria)
assert devlist == expected_devlist
diff --git a/tests/unittests/util.py b/tests/unittests/util.py
index 79a6e1d0..f57a3d25 100644
--- a/tests/unittests/util.py
+++ b/tests/unittests/util.py
@@ -84,9 +84,6 @@ class MockDistro(distros.Distro):
def apply_network_config(self, netconfig, bring_up=False) -> bool:
return False
- def apply_network_config_names(self, netconfig):
- pass
-
def apply_locale(self, locale, out_fn=None):
pass
diff --git a/tools/.github-cla-signers b/tools/.github-cla-signers
index 0567a89d..cd7efbd4 100644
--- a/tools/.github-cla-signers
+++ b/tools/.github-cla-signers
@@ -1,3 +1,4 @@
+aciba90
ader1990
ajmyyra
akutz
@@ -6,6 +7,7 @@ Aman306
andgein
andrewbogott
andrewlukoshko
+andrew-lee-metaswitch
antonyc
aswinrajamannar
beantaxi
@@ -20,12 +22,15 @@ chrislalos
ciprianbadescu
citrus-it
cjp256
+Conan-Kudo
+cvstealth
dankenigsberg
ddymko
dermotbradley
dhensby
eandersson
eb3095
+edudobay
emmanuelthome
eslerm
esposem
@@ -37,6 +42,7 @@ impl
irishgordo
izzyleung
j5awry
+jf
Jille
JohnKepplers
johnsonshi
@@ -44,6 +50,7 @@ jordimassaguerpla
jqueuniet
jsf9k
jshen28
+kallioli
klausenbusk
KsenijaS
landon912
@@ -58,6 +65,7 @@ marlluslustosa
matthewruffell
maxnet
megian
+michaelrommel
mitechie
nazunalika
nicolasbock
@@ -65,11 +73,13 @@ nishigori
olivierlemasle
omBratteng
onitake
+Oursin
qubidt
renanrodrigo
rhansen
riedel
sarahwzadara
+shi2wei3
slingamn
slyon
smoser
@@ -79,6 +89,7 @@ steverweber
t-8ch
TheRealFalcon
taoyama
+thetoolsmith
timothegenzmer
tnt-dev
tomponline
@@ -88,6 +99,8 @@ vorlonofportland
vteratipally
Vultaire
WebSpider
+wschoot
xiachen-rh
xnox
+yangzz-97
zhuzaifangxuele
diff --git a/tools/.lp-to-git-user b/tools/.lp-to-git-user
index 9b09d568..4df62976 100644
--- a/tools/.lp-to-git-user
+++ b/tools/.lp-to-git-user
@@ -1,4 +1,5 @@
{
+ "adam-collard": "sparkiegeek",
"adobrawy": "ad-m",
"afranceschini": "andreaf74",
"ahosmanmsft": "AOhassan",
@@ -33,4 +34,4 @@
"vlastimil-holer": "vholer",
"vtqanh": "anhvoms",
"xiaofengw": "xiaofengw-vmware"
-}
+} \ No newline at end of file
diff --git a/tools/benchmark.sh b/tools/benchmark.sh
new file mode 100755
index 00000000..c382f374
--- /dev/null
+++ b/tools/benchmark.sh
@@ -0,0 +1,8 @@
+#!/usr/bin/bash
+
+BIN="$@"
+chmod +x "$BIN"
+
+time for _ in $(seq 1 $ITER); do
+ "$BIN";
+done
diff --git a/tools/build-on-freebsd b/tools/build-on-freebsd
index 1e876905..e2196b03 100755
--- a/tools/build-on-freebsd
+++ b/tools/build-on-freebsd
@@ -23,7 +23,6 @@ pkgs="
bash
e2fsprogs
$py_prefix-Jinja2
- $py_prefix-boto
$py_prefix-configobj
$py_prefix-jsonpatch
$py_prefix-jsonpointer
diff --git a/tools/build-on-openbsd b/tools/build-on-openbsd
index ca028606..201844f1 100755
--- a/tools/build-on-openbsd
+++ b/tools/build-on-openbsd
@@ -13,7 +13,6 @@ pkgs="
py3-oauthlib
py3-requests
py3-setuptools
- py3-six
py3-yaml
sudo--
"
diff --git a/tools/ds-identify b/tools/ds-identify
index 794a96f4..b4e434c3 100755
--- a/tools/ds-identify
+++ b/tools/ds-identify
@@ -96,6 +96,7 @@ DI_BLKID_EXPORT_OUT=""
DI_GEOM_LABEL_STATUS_OUT=""
DI_DEFAULT_POLICY="search,found=all,maybe=all,notfound=${DI_DISABLED}"
DI_DEFAULT_POLICY_NO_DMI="search,found=all,maybe=all,notfound=${DI_ENABLED}"
+DI_DMI_BOARD_NAME=""
DI_DMI_CHASSIS_ASSET_TAG=""
DI_DMI_PRODUCT_NAME=""
DI_DMI_SYS_VENDOR=""
@@ -460,6 +461,10 @@ is_container() {
esac
}
+is_socket_file() {
+ [ -S $1 ] && return 0 || return 1
+}
+
read_kernel_cmdline() {
cached "${DI_KERNEL_CMDLINE}" && return
local cmdline="" fpath="${PATH_PROC_CMDLINE}"
@@ -477,6 +482,12 @@ read_kernel_cmdline() {
DI_KERNEL_CMDLINE="$cmdline"
}
+read_dmi_board_name() {
+ cached "${DI_DMI_BOARD_NAME}" && return
+ get_dmi_field board_name
+ DI_DMI_BOARD_NAME="$_RET"
+}
+
read_dmi_chassis_asset_tag() {
cached "${DI_DMI_CHASSIS_ASSET_TAG}" && return
get_dmi_field chassis_asset_tag
@@ -806,7 +817,20 @@ dscheck_MAAS() {
# LXD datasource requires active /dev/lxd/sock
# https://linuxcontainers.org/lxd/docs/master/dev-lxd
dscheck_LXD() {
- [ -S /dev/lxd/sock ] && return ${DS_FOUND} || return ${DS_NOT_FOUND}
+ if is_socket_file /dev/lxd/sock; then
+ return ${DS_FOUND}
+ fi
+ # On LXD KVM instances, /dev/lxd/sock is not yet setup by
+ # lxd-agent-loader's systemd lxd-agent.service.
+ # Rely on DMI product information that is present on all LXD images.
+ # Note "qemu" is returned on kvm instances launched from a host kernel
+ # kernels >=5.10, due to `hv_passthrough` option.
+ # systemd v. 251 should properly return "kvm" in this scenario
+ # https://github.com/systemd/systemd/issues/22709
+ if [ "${DI_VIRT}" = "kvm" -o "${DI_VIRT}" = "qemu" ]; then
+ [ "${DI_DMI_BOARD_NAME}" = "LXD" ] && return ${DS_FOUND}
+ fi
+ return ${DS_NOT_FOUND}
}
dscheck_NoCloud() {
@@ -827,6 +851,11 @@ dscheck_NoCloud() {
return ${DS_FOUND}
fi
+ # This is a bit hacky, but a NoCloud false positive isn't the end of the world
+ if check_config "NoCloud" && check_config "user-data" && check_config "meta-data"; then
+ return ${DS_FOUND}
+ fi
+
return ${DS_NOT_FOUND}
}
@@ -884,7 +913,7 @@ dscheck_DigitalOcean() {
dscheck_OpenNebula() {
check_seed_dir opennebula && return ${DS_FOUND}
- has_fs_with_label "CONTEXT" && return ${DS_FOUND}
+ has_fs_with_label "CONTEXT" "CDROM" && return ${DS_FOUND}
return ${DS_NOT_FOUND}
}
@@ -1466,6 +1495,7 @@ collect_info() {
read_config
read_datasource_list
read_dmi_sys_vendor
+ read_dmi_board_name
read_dmi_chassis_asset_tag
read_dmi_product_name
read_dmi_product_serial
@@ -1482,7 +1512,7 @@ _print_info() {
local n="" v="" vars=""
vars="DMI_PRODUCT_NAME DMI_SYS_VENDOR DMI_PRODUCT_SERIAL"
vars="$vars DMI_PRODUCT_UUID PID_1_PRODUCT_NAME DMI_CHASSIS_ASSET_TAG"
- vars="$vars FS_LABELS ISO9660_DEVS KERNEL_CMDLINE VIRT"
+ vars="$vars DMI_BOARD_NAME FS_LABELS ISO9660_DEVS KERNEL_CMDLINE VIRT"
vars="$vars UNAME_KERNEL_NAME UNAME_KERNEL_RELEASE UNAME_KERNEL_VERSION"
vars="$vars UNAME_MACHINE UNAME_NODENAME UNAME_OPERATING_SYSTEM"
vars="$vars DSNAME DSLIST"
diff --git a/tools/migrate-lp-user-to-github b/tools/migrate-lp-user-to-github
index f1247cb3..4cfaf93c 100755
--- a/tools/migrate-lp-user-to-github
+++ b/tools/migrate-lp-user-to-github
@@ -9,8 +9,10 @@ import sys
try:
from launchpadlib.launchpad import Launchpad
except ImportError:
- print("Missing python launchpadlib dependency to create branches for you."
- "Install with: sudo apt-get install python3-launchpadlib" )
+ print(
+ "Missing python launchpadlib dependency to create branches for you."
+ "Install with: sudo apt-get install python3-launchpadlib"
+ )
sys.exit(1)
if "avoid-pep8-E402-import-not-top-of-file":
@@ -20,16 +22,16 @@ if "avoid-pep8-E402-import-not-top-of-file":
DRYRUN = False
-LP_TO_GIT_USER_FILE='.lp-to-git-user'
-MIGRATE_BRANCH_NAME='migrate-lp-to-github'
-GITHUB_PULL_URL='https://github.com/canonical/cloud-init/compare/master...{github_user}:{branch}'
-GH_UPSTREAM_URL='https://github.com/canonical/cloud-init'
+LP_TO_GIT_USER_FILE = ".lp-to-git-user"
+MIGRATE_BRANCH_NAME = "migrate-lp-to-github"
+GITHUB_PULL_URL = "https://github.com/canonical/cloud-init/compare/main...{github_user}:{branch}"
+GH_UPSTREAM_URL = "https://github.com/canonical/cloud-init"
def error(message):
if isinstance(message, bytes):
- message = message.decode('utf-8')
- log('ERROR: {error}'.format(error=message))
+ message = message.decode("utf-8")
+ log("ERROR: {error}".format(error=message))
sys.exit(1)
@@ -38,63 +40,85 @@ def log(message):
def subp(cmd, skip=False):
- prefix = 'SKIPPED: ' if skip else '$ '
- log('{prefix}{command}'.format(prefix=prefix, command=' '.join(cmd)))
+ prefix = "SKIPPED: " if skip else "$ "
+ log("{prefix}{command}".format(prefix=prefix, command=" ".join(cmd)))
if skip:
return
proc = Popen(cmd, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate()
if proc.returncode:
error(err if err else out)
- return out.decode('utf-8')
+ return out.decode("utf-8")
-LP_GIT_PATH_TMPL = 'git+ssh://{launchpad_user}@git.launchpad.net/'
-LP_UPSTREAM_PATH_TMPL = LP_GIT_PATH_TMPL + 'cloud-init'
-LP_REMOTE_PATH_TMPL = LP_GIT_PATH_TMPL + '~{launchpad_user}/cloud-init'
-GITHUB_REMOTE_PATH_TMPL = 'git@github.com:{github_user}/cloud-init.git'
+LP_GIT_PATH_TMPL = "git+ssh://{launchpad_user}@git.launchpad.net/"
+LP_UPSTREAM_PATH_TMPL = LP_GIT_PATH_TMPL + "cloud-init"
+LP_REMOTE_PATH_TMPL = LP_GIT_PATH_TMPL + "~{launchpad_user}/cloud-init"
+GITHUB_REMOTE_PATH_TMPL = "git@github.com:{github_user}/cloud-init.git"
# Comment templates
-COMMIT_MSG_TMPL = '''\
+COMMIT_MSG_TMPL = """\
lp-to-git-users: adding {gh_username}
Mapped from {lp_username}
-'''
-PUBLISH_DIR='/tmp/cloud-init-lp-to-github-migration'
+"""
+PUBLISH_DIR = "/tmp/cloud-init-lp-to-github-migration"
+
def get_parser():
parser = ArgumentParser(description=__doc__)
parser.add_argument(
- '--dryrun', required=False, default=False, action='store_true',
- help=('Run commands and review operation in dryrun mode, '
- 'making not changes.'))
- parser.add_argument('launchpad_user', help='Your launchpad username.')
- parser.add_argument('github_user', help='Your github username.')
+ "--dryrun",
+ required=False,
+ default=False,
+ action="store_true",
+ help=(
+ "Run commands and review operation in dryrun mode, "
+ "making not changes."
+ ),
+ )
+ parser.add_argument("launchpad_user", help="Your launchpad username.")
+ parser.add_argument("github_user", help="Your github username.")
parser.add_argument(
- '--local-repo-dir', required=False, dest='repo_dir',
- help=('The name of the local directory into which we clone.'
- ' Default: {}'.format(PUBLISH_DIR)))
+ "--local-repo-dir",
+ required=False,
+ dest="repo_dir",
+ help=(
+ "The name of the local directory into which we clone."
+ " Default: {}".format(PUBLISH_DIR)
+ ),
+ )
parser.add_argument(
- '--upstream-branch', required=False, dest='upstream',
- default='origin/master',
- help=('The name of remote branch target into which we will merge.'
- ' Default: origin/master'))
+ "--upstream-branch",
+ required=False,
+ dest="upstream",
+ default="origin/main",
+ help=(
+ "The name of remote branch target into which we will merge."
+ " Default: origin/main"
+ ),
+ )
parser.add_argument(
- '-v', '--verbose', required=False, default=False, action='store_true',
- help=('Print all actions.'))
+ "-v",
+ "--verbose",
+ required=False,
+ default=False,
+ action="store_true",
+ help=("Print all actions."),
+ )
return parser
def create_publish_branch(upstream, publish_branch):
- '''Create clean publish branch target in the current git repo.'''
- branches = subp(['git', 'branch'])
- upstream_remote, upstream_branch = upstream.split('/', 1)
- subp(['git', 'checkout', upstream_branch])
- subp(['git', 'pull'])
+ """Create clean publish branch target in the current git repo."""
+ branches = subp(["git", "branch"])
+ upstream_remote, upstream_branch = upstream.split("/", 1)
+ subp(["git", "checkout", upstream_branch])
+ subp(["git", "pull"])
if publish_branch in branches:
- subp(['git', 'branch', '-D', publish_branch])
- subp(['git', 'checkout', upstream, '-b', publish_branch])
+ subp(["git", "branch", "-D", publish_branch])
+ subp(["git", "checkout", upstream, "-b", publish_branch])
def add_lp_and_github_remotes(lp_user, gh_user):
@@ -104,7 +128,7 @@ def add_lp_and_github_remotes(lp_user, gh_user):
"""
lp_remote = LP_REMOTE_PATH_TMPL.format(launchpad_user=lp_user)
gh_remote = GITHUB_REMOTE_PATH_TMPL.format(github_user=gh_user)
- remotes = subp(['git', 'remote', '-v'])
+ remotes = subp(["git", "remote", "-v"])
lp_remote_name = gh_remote_name = None
for remote in remotes.splitlines():
if not remote:
@@ -115,58 +139,70 @@ def add_lp_and_github_remotes(lp_user, gh_user):
elif gh_remote == remote_url:
gh_remote_name = remote_name
if not lp_remote_name:
- log("launchpad: Creating git remote launchpad-{} to point at your"
- " LP repo".format(lp_user))
- lp_remote_name = 'launchpad-{}'.format(lp_user)
- subp(['git', 'remote', 'add', lp_remote_name, lp_remote])
+ log(
+ "launchpad: Creating git remote launchpad-{} to point at your"
+ " LP repo".format(lp_user)
+ )
+ lp_remote_name = "launchpad-{}".format(lp_user)
+ subp(["git", "remote", "add", lp_remote_name, lp_remote])
try:
- subp(['git', 'fetch', lp_remote_name])
+ subp(["git", "fetch", lp_remote_name])
except:
log("launchpad: Pushing to ensure LP repo exists")
- subp(['git', 'push', lp_remote_name, 'master:master'])
- subp(['git', 'fetch', lp_remote_name])
+ subp(["git", "push", lp_remote_name, "main:main"])
+ subp(["git", "fetch", lp_remote_name])
if not gh_remote_name:
- log("github: Creating git remote github-{} to point at your"
- " GH repo".format(gh_user))
- gh_remote_name = 'github-{}'.format(gh_user)
- subp(['git', 'remote', 'add', gh_remote_name, gh_remote])
+ log(
+ "github: Creating git remote github-{} to point at your"
+ " GH repo".format(gh_user)
+ )
+ gh_remote_name = "github-{}".format(gh_user)
+ subp(["git", "remote", "add", gh_remote_name, gh_remote])
try:
- subp(['git', 'fetch', gh_remote_name])
+ subp(["git", "fetch", gh_remote_name])
except:
- log("ERROR: [github] Could not fetch remote '{remote}'."
+ log(
+ "ERROR: [github] Could not fetch remote '{remote}'."
"Please create a fork for your github user by clicking 'Fork'"
" from {gh_upstream}".format(
- remote=gh_remote, gh_upstream=GH_UPSTREAM_URL))
+ remote=gh_remote, gh_upstream=GH_UPSTREAM_URL
+ )
+ )
sys.exit(1)
return (lp_remote_name, gh_remote_name)
def create_migration_branch(
- branch_name, upstream, lp_user, gh_user, commit_msg):
+ branch_name, upstream, lp_user, gh_user, commit_msg
+):
"""Create an LP to Github migration branch and add lp_user->gh_user."""
- log("Creating a migration branch: {} adding your users".format(
- MIGRATE_BRANCH_NAME))
+ log(
+ "Creating a migration branch: {} adding your users".format(
+ MIGRATE_BRANCH_NAME
+ )
+ )
create_publish_branch(upstream, MIGRATE_BRANCH_NAME)
lp_to_git_map = {}
- lp_to_git_file = os.path.join(os.getcwd(), 'tools', LP_TO_GIT_USER_FILE)
+ lp_to_git_file = os.path.join(os.getcwd(), "tools", LP_TO_GIT_USER_FILE)
if os.path.exists(lp_to_git_file):
with open(lp_to_git_file) as stream:
lp_to_git_map = util.load_json(stream.read())
if gh_user in lp_to_git_map.values():
raise RuntimeError(
- "github user '{}' already in {}".format(gh_user, lp_to_git_file))
+ "github user '{}' already in {}".format(gh_user, lp_to_git_file)
+ )
if lp_user in lp_to_git_map:
raise RuntimeError(
- "launchpad user '{}' already in {}".format(
- lp_user, lp_to_git_file))
+ "launchpad user '{}' already in {}".format(lp_user, lp_to_git_file)
+ )
lp_to_git_map[lp_user] = gh_user
- with open(lp_to_git_file, 'w') as stream:
+ with open(lp_to_git_file, "w") as stream:
stream.write(util.json_dumps(lp_to_git_map))
- subp(['git', 'add', lp_to_git_file])
- commit_file = os.path.join(os.path.dirname(os.getcwd()), 'commit.msg')
- with open(commit_file, 'wb') as stream:
- stream.write(commit_msg.encode('utf-8'))
- subp(['git', 'commit', '--all', '-F', commit_file])
+ subp(["git", "add", lp_to_git_file])
+ commit_file = os.path.join(os.path.dirname(os.getcwd()), "commit.msg")
+ with open(commit_file, "wb") as stream:
+ stream.write(commit_msg.encode("utf-8"))
+ subp(["git", "commit", "--all", "-F", commit_file])
def main():
@@ -179,29 +215,42 @@ def main():
repo_dir = args.repo_dir or PUBLISH_DIR
if not os.path.exists(repo_dir):
cleanup_repo_dir = True
- subp(['git', 'clone',
- LP_UPSTREAM_PATH_TMPL.format(launchpad_user=args.launchpad_user),
- repo_dir])
+ subp(
+ [
+ "git",
+ "clone",
+ LP_UPSTREAM_PATH_TMPL.format(
+ launchpad_user=args.launchpad_user
+ ),
+ repo_dir,
+ ]
+ )
else:
cleanup_repo_dir = False
cwd = os.getcwd()
os.chdir(repo_dir)
- log("Syncing master branch with upstream")
- subp(['git', 'checkout', 'master'])
- subp(['git', 'pull'])
+ log("Syncing main branch with upstream")
+ subp(["git", "checkout", "main"])
+ subp(["git", "pull"])
try:
lp_remote_name, gh_remote_name = add_lp_and_github_remotes(
- args.launchpad_user, args.github_user)
+ args.launchpad_user, args.github_user
+ )
commit_msg = COMMIT_MSG_TMPL.format(
- gh_username=args.github_user, lp_username=args.launchpad_user)
+ gh_username=args.github_user, lp_username=args.launchpad_user
+ )
create_migration_branch(
- MIGRATE_BRANCH_NAME, args.upstream, args.launchpad_user,
- args.github_user, commit_msg)
+ MIGRATE_BRANCH_NAME,
+ args.upstream,
+ args.launchpad_user,
+ args.github_user,
+ commit_msg,
+ )
for push_remote in (lp_remote_name, gh_remote_name):
- subp(['git', 'push', push_remote, MIGRATE_BRANCH_NAME, '--force'])
+ subp(["git", "push", push_remote, MIGRATE_BRANCH_NAME, "--force"])
except Exception as e:
- error('Failed setting up migration branches: {0}'.format(e))
+ error("Failed setting up migration branches: {0}".format(e))
finally:
os.chdir(cwd)
if cleanup_repo_dir and os.path.exists(repo_dir):
@@ -209,35 +258,52 @@ def main():
# Make merge request on LP
log("[launchpad] Automatically creating merge proposal using launchpadlib")
lp = Launchpad.login_with(
- "server-team github-migration tool", 'production', version='devel')
- master = lp.git_repositories.getByPath(
- path='cloud-init').getRefByPath(path='master')
- LP_BRANCH_PATH='~{launchpad_user}/cloud-init/+git/cloud-init'
+ "server-team github-migration tool", "production", version="devel"
+ )
+ main = lp.git_repositories.getByPath(path="cloud-init").getRefByPath(
+ path="main"
+ )
+ LP_BRANCH_PATH = "~{launchpad_user}/cloud-init/+git/cloud-init"
lp_git_repo = lp.git_repositories.getByPath(
- path=LP_BRANCH_PATH.format(launchpad_user=args.launchpad_user))
+ path=LP_BRANCH_PATH.format(launchpad_user=args.launchpad_user)
+ )
lp_user_migrate_branch = lp_git_repo.getRefByPath(
- path='refs/heads/migrate-lp-to-github')
+ path="refs/heads/migrate-lp-to-github"
+ )
lp_merge_url = (
- 'https://code.launchpad.net/' +
- LP_BRANCH_PATH.format(launchpad_user=args.launchpad_user) +
- '/+ref/' + MIGRATE_BRANCH_NAME)
+ "https://code.launchpad.net/"
+ + LP_BRANCH_PATH.format(launchpad_user=args.launchpad_user)
+ + "/+ref/"
+ + MIGRATE_BRANCH_NAME
+ )
try:
lp_user_migrate_branch.createMergeProposal(
- commit_message=commit_msg, merge_target=master, needs_review=True)
+ commit_message=commit_msg, merge_target=main, needs_review=True
+ )
except Exception:
- log('[launchpad] active merge proposal already exists at:\n'
- '{url}\n'.format(url=lp_merge_url))
+ log(
+ "[launchpad] active merge proposal already exists at:\n"
+ "{url}\n".format(url=lp_merge_url)
+ )
else:
- log("[launchpad] Merge proposal created at:\n{url}.\n".format(
- url=lp_merge_url))
- log("To link your account to github open your browser and"
+ log(
+ "[launchpad] Merge proposal created at:\n{url}.\n".format(
+ url=lp_merge_url
+ )
+ )
+ log(
+ "To link your account to github open your browser and"
" click 'Create pull request' at the following URL:\n"
- "{url}".format(url=GITHUB_PULL_URL.format(
- github_user=args.github_user, branch=MIGRATE_BRANCH_NAME)))
+ "{url}".format(
+ url=GITHUB_PULL_URL.format(
+ github_user=args.github_user, branch=MIGRATE_BRANCH_NAME
+ )
+ )
+ )
if os.path.exists(repo_dir):
util.del_dir(repo_dir)
return 0
-if __name__ == '__main__':
+if __name__ == "__main__":
sys.exit(main())
diff --git a/tools/run-centos b/tools/run-centos
deleted file mode 100755
index 4506b20d..00000000
--- a/tools/run-centos
+++ /dev/null
@@ -1,72 +0,0 @@
-#!/bin/bash
-# This file is part of cloud-init. See LICENSE file for license information.
-
-deprecated() {
-cat <<EOF
- ================ DEPRECATED ================
- | run-centos is deprecated. Please replace |
- | your usage with tools/run-container . |
- ================ DEPRECATED ================
-EOF
-}
-
-Usage() {
- deprecated
- cat <<EOF
-Usage: ${0##*/} [ options ] version
-
- This utility can makes it easier to run tests, build rpm and source rpm
- generation inside a LXC of the specified version of CentOS.
-
- version is major release number (6 or 7)
-
- options:
- -a | --artifact keep .rpm artifacts
- --dirty apply local changes before running tests.
- If not provided, a clean checkout of branch is tested.
- Inside container, changes are in local-changes.diff.
- -k | --keep keep container after tests
- -r | --rpm build .rpm
- -s | --srpm build .src.rpm
- -u | --unittest run unit tests
-
- Example:
- * ${0##*/} --rpm --srpm --unittest 6
-EOF
- deprecated
-EOF
-}
-
-bad_Usage() { Usage 1>&2; [ $# -eq 0 ] || error "$@"; return 1; }
-
-main() {
- if [ "$1" = "-h" -o "$1" == "--help" ]; then
- Usage 1>&2;
- exit 0;
- fi
- local pt="" mydir=$(dirname "$0")
- local run_container="$mydir/run-container"
- if [ ! -x "$run_container" ]; then
- bad_Usage "Could not find run-container."
- return
- fi
-
- pt=( "$run_container" )
- while [ $# -ne 0 ]; do
- cur="${1:-}"; next="${2:-}";
- case "$cur" in
- -r|--rpm) cur="--package";;
- -s|--srpm) cur="--source-package";;
- -a|--artifact) cur="--artifacts=.";;
- 6|7) cur="centos/$cur";;
- esac
- pt[${#pt[@]}]="$cur"
- shift;
- done
- deprecated
- exec "${pt[@]}"
-}
-
-main "$@"
-
-# vi: ts=4 expandtab
diff --git a/tools/run-container b/tools/run-container
index e049dfdc..182db0e9 100755
--- a/tools/run-container
+++ b/tools/run-container
@@ -38,6 +38,7 @@ Usage: ${0##*/} [ options ] [images:]image-ref
-p | --package build a binary package (.deb or .rpm)
-s | --source-package build source package (debuild -S or srpm)
-u | --unittest run unit tests
+ --vm use a VM instead of a container
Example:
* ${0##*/} --package --source-package --unittest centos/6
@@ -50,7 +51,7 @@ cleanup() {
if [ "$KEEP" = "true" ]; then
error "not deleting container '$CONTAINER' due to --keep"
else
- delete_container "$CONTAINER"
+ delete_instance "$CONTAINER"
fi
fi
}
@@ -340,6 +341,12 @@ wait_inside() {
wait_for_boot() {
local name="$1"
local out="" ret="" wtime=$DEFAULT_WAIT_MAX
+ local system_up=false
+ for i in {0..30}; do
+ [ "$i" -gt 1 ] && sleep 5
+ inside "$name" true 2>/dev/null && system_up=true && break
+ done
+ [ $system_up == true ] || { errorrc "exec command inside $name failed."; return; }
get_os_info_in "$name"
[ "$OS_NAME" = "debian" ] && wtime=300 &&
debug 1 "on debian we wait for ${wtime}s"
@@ -360,10 +367,12 @@ wait_for_boot() {
fi
}
-start_container() {
- local src="$1" name="$2"
+start_instance() {
+ local src="$1" name="$2" use_vm="$3"
debug 1 "starting container $name from '$src'"
- lxc launch "$src" "$name" || {
+ launch_flags=()
+ [ "$use_vm" == true ] && launch_flags+=(--vm)
+ lxc launch "$src" "$name" "${launch_flags[@]}" || {
errorrc "Failed to start container '$name' from '$src'";
return
}
@@ -371,7 +380,7 @@ start_container() {
wait_for_boot "$name"
}
-delete_container() {
+delete_instance() {
debug 1 "removing container $1 [--keep to keep]"
lxc delete --force "$1"
}
@@ -391,7 +400,7 @@ run_self_inside_as_cd() {
main() {
local short_opts="a:hknpsuv"
- local long_opts="artifacts:,dirty,help,keep,name:,package,source-package,unittest,verbose"
+ local long_opts="artifacts:,dirty,help,keep,name:,package,source-package,unittest,verbose,vm"
local getopt_out=""
getopt_out=$(getopt --name "${0##*/}" \
--options "${short_opts}" --long "${long_opts}" -- "$@") &&
@@ -401,6 +410,7 @@ main() {
local cur="" next=""
local package=false srcpackage=false unittest="" name=""
local dirty=false artifact_d="."
+ local use_vm=false
while [ $# -ne 0 ]; do
cur="${1:-}"; next="${2:-}";
@@ -414,6 +424,7 @@ main() {
-s|--source-package) srcpackage=true;;
-u|--unittest) unittest=1;;
-v|--verbose) VERBOSITY=$((VERBOSITY+1));;
+ --vm) use_vm=true;;
--) shift; break;;
esac
shift;
@@ -443,7 +454,7 @@ main() {
trap cleanup EXIT
- start_container "$img_ref" "$name" ||
+ start_instance "$img_ref" "$name" "$use_vm" ||
{ errorrc "Failed to start container for $img_ref"; return; }
get_os_info_in "$name" ||
diff --git a/tox.ini b/tox.ini
index c494cb94..e69c6e13 100644
--- a/tox.ini
+++ b/tox.ini
@@ -10,12 +10,14 @@ passenv=
PYTEST_ADDOPTS
[format_deps]
-black==21.12b0
-flake8==3.9.2
+black==22.3.0
+flake8==4.0.1
isort==5.10.1
-mypy==0.931
-pylint==2.11.1
-pytest==7.0.0
+mypy==0.950
+pylint==2.13.8
+pytest==7.0.1
+types-jsonschema==4.4.2
+types-oauthlib==3.1.6
types-PyYAML==6.0.4
types-requests==2.27.8
types-setuptools==57.4.9
@@ -23,14 +25,14 @@ types-setuptools==57.4.9
[testenv:flake8]
deps =
flake8=={[format_deps]flake8}
-commands = {envpython} -m flake8 {posargs:cloudinit/ tests/ tools/ setup.py}
+commands = {envpython} -m flake8 {posargs:cloudinit/ tests/ tools/ conftest.py setup.py}
[testenv:pylint]
deps =
pylint=={[format_deps]pylint}
-r{toxinidir}/test-requirements.txt
-r{toxinidir}/integration-requirements.txt
-commands = {envpython} -m pylint {posargs:cloudinit tests tools}
+commands = {envpython} -m pylint {posargs:cloudinit/ tests/ tools/ conftest.py setup.py}
[testenv:black]
deps =
@@ -45,11 +47,13 @@ commands = {envpython} -m isort . --check-only
[testenv:mypy]
deps =
mypy=={[format_deps]mypy}
+ types-jsonschema=={[format_deps]types-jsonschema}
+ types-oauthlib=={[format_deps]types-oauthlib}
types-pyyaml=={[format_deps]types-PyYAML}
types-requests=={[format_deps]types-requests}
types-setuptools=={[format_deps]types-setuptools}
pytest=={[format_deps]pytest}
-commands = {envpython} -m mypy .
+commands = {envpython} -m mypy cloudinit/ tests/ tools/
[testenv:check_format]
deps =
@@ -59,6 +63,11 @@ deps =
mypy=={[format_deps]mypy}
pylint=={[format_deps]pylint}
pytest=={[format_deps]pytest}
+ types-jsonschema=={[format_deps]types-jsonschema}
+ types-oauthlib=={[format_deps]types-oauthlib}
+ types-pyyaml=={[format_deps]types-PyYAML}
+ types-requests=={[format_deps]types-requests}
+ types-setuptools=={[format_deps]types-setuptools}
-r{toxinidir}/test-requirements.txt
-r{toxinidir}/integration-requirements.txt
commands =
@@ -108,8 +117,10 @@ deps =
# test-requirements
pytest==3.3.2
pytest-cov==2.5.1
+ pytest-mock==1.7.1
# Needed by pytest and default causes failures
attrs==17.4.0
+ responses==0.5.1
[testenv:lowest-supported]
# This definition will run on bionic with the version of httpretty
@@ -132,24 +143,26 @@ commands = {[testenv:py3]commands}
deps =
-r{toxinidir}/doc-requirements.txt
commands =
- {envpython} -m sphinx {posargs:doc/rtd doc/rtd_html}
+ {envpython} -m sphinx {posargs:-W doc/rtd doc/rtd_html}
doc8 doc/rtd
[testenv:tip-flake8]
-commands = {envpython} -m flake8 {posargs:cloudinit/ tests/ tools/ setup.py}
+commands = {envpython} -m flake8 {posargs:cloudinit/ tests/ tools/ conftest.py setup.py}
deps = flake8
[testenv:tip-mypy]
-commands = {envpython} -m mypy --install-types --non-interactive .
+commands = {envpython} -m mypy --install-types --non-interactive cloudinit/ tests/ tools/
deps =
mypy
pytest
+ types-jsonschema
+ types-oauthlib
types-PyYAML
types-requests
types-setuptools
[testenv:tip-pylint]
-commands = {envpython} -m pylint {posargs:cloudinit tests tools}
+commands = {envpython} -m pylint {posargs:cloudinit/ tests/ tools/ conftest.py setup.py}
deps =
# requirements
pylint
@@ -158,7 +171,7 @@ deps =
-r{toxinidir}/integration-requirements.txt
[testenv:integration-tests]
-commands = {envpython} -m pytest --log-cli-level=INFO {posargs:tests/integration_tests}
+commands = {envpython} -m pytest --log-cli-level=INFO -vv {posargs:tests/integration_tests}
deps =
-r{toxinidir}/integration-requirements.txt
passenv = CLOUD_INIT_* PYCLOUDLIB_* SSH_AUTH_SOCK OS_*
diff --git a/upstart/cloud-config.conf b/upstart/cloud-config.conf
deleted file mode 100644
index 2c3ef67b..00000000
--- a/upstart/cloud-config.conf
+++ /dev/null
@@ -1,9 +0,0 @@
-# cloud-config - Handle applying the settings specified in cloud-config
-description "Handle applying cloud-config"
-emits cloud-config
-
-start on (filesystem and started rsyslog)
-console output
-task
-
-exec cloud-init modules --mode=config
diff --git a/upstart/cloud-final.conf b/upstart/cloud-final.conf
deleted file mode 100644
index 72ae5052..00000000
--- a/upstart/cloud-final.conf
+++ /dev/null
@@ -1,10 +0,0 @@
-# cloud-final.conf - run "final" jobs
-# this runs around traditional "rc.local" time.
-# and after all cloud-config jobs are run
-description "execute cloud user/final scripts"
-
-start on (stopped rc RUNLEVEL=[2345] and stopped cloud-config)
-console output
-task
-
-exec cloud-init modules --mode=final
diff --git a/upstart/cloud-init-blocknet.conf b/upstart/cloud-init-blocknet.conf
deleted file mode 100644
index be09e7d8..00000000
--- a/upstart/cloud-init-blocknet.conf
+++ /dev/null
@@ -1,83 +0,0 @@
-# cloud-init-blocknet
-# the purpose of this job is
-# * to block networking from coming up until cloud-init-nonet has run
-# * timeout if they all do not come up in a reasonable amount of time
-description "block networking until cloud-init-local"
-start on (starting network-interface
- or starting network-manager
- or starting networking)
-stop on stopped cloud-init-local
-
-instance $JOB${INTERFACE:+/}${INTERFACE:-}
-export INTERFACE
-task
-
-script
- set +e # you cannot trap TERM reliably with 'set -e'
- SLEEP_CHILD=""
-
- static_network_up() {
- local emitted="/run/network/static-network-up-emitted"
- # /run/network/static-network-up-emitted is written by
- # upstart (via /etc/network/if-up.d/upstart). its presense would
- # indicate that static-network-up has already fired.
- [ -e "$emitted" -o -e "/var/$emitted" ]
- }
- msg() {
- local uptime="" idle="" msg=""
- if [ -r /proc/uptime ]; then
- read uptime idle < /proc/uptime
- fi
- msg="${UPSTART_INSTANCE}${uptime:+[${uptime}]}: $*"
- echo "$msg"
- }
-
- handle_sigterm() {
- # if we received sigterm and static networking is up then it probably
- # came from upstart as a result of 'stop on static-network-up'
- msg "got sigterm"
- if [ -n "$SLEEP_CHILD" ]; then
- if ! kill $SLEEP_CHILD 2>/dev/null; then
- [ ! -d "/proc/$SLEEP_CHILD" ] ||
- msg "hm.. failed to kill sleep pid $SLEEP_CHILD"
- fi
- fi
- msg "stopped"
- exit 0
- }
-
- dowait() {
- msg "blocking $1 seconds"
- # all this 'exec -a' does is get me a nicely named process in 'ps'
- # ie, 'sleep-block-network-interface.eth1'
- if [ -x /bin/bash ]; then
- bash -c 'exec -a sleep-block-$1 sleep $2' -- "$UPSTART_INSTANCE" "$1" &
- else
- sleep "$1" &
- fi
- SLEEP_CHILD=$!
- msg "sleepchild=$SLEEP_CHILD"
- wait $SLEEP_CHILD
- SLEEP_CHILD=""
- }
-
- trap handle_sigterm TERM
-
- if [ -n "$INTERFACE" -a "${INTERFACE#lo}" != "${INTERFACE}" ]; then
- msg "ignoring interface ${INTERFACE}";
- exit 0;
- fi
-
- # static_network_up already occurred
- static_network_up && { msg "static_network_up already"; exit 0; }
-
- # local-finished cloud-init-local success or failure
- lfin="/run/cloud-init/local-finished"
- disable="/etc/cloud/no-blocknet"
- [ -f "$lfin" ] && { msg "$lfin found"; exit 0; }
- [ -f "$disable" ] && { msg "$disable found"; exit 0; }
-
- dowait 120
- msg "gave up waiting for $lfin"
- exit 1
-end script
diff --git a/upstart/cloud-init-container.conf b/upstart/cloud-init-container.conf
deleted file mode 100644
index 6bdbe77e..00000000
--- a/upstart/cloud-init-container.conf
+++ /dev/null
@@ -1,57 +0,0 @@
-# in a lxc container, events for network interfaces do not
-# get created or may be missed. This helps cloud-init-nonet along
-# by emitting those events if they have not been emitted.
-
-start on container
-stop on static-network-up
-task
-
-emits net-device-added
-
-console output
-
-script
- # if we are inside a container, then we may have to emit the ifup
- # events for 'auto' network devices.
- set -f
-
- # from /etc/network/if-up.d/upstart
- MARK_DEV_PREFIX="/run/network/ifup."
- MARK_STATIC_NETWORK_EMITTED="/run/network/static-network-up-emitted"
- # if the all static network interfaces are already up, nothing to do
- [ -f "$MARK_STATIC_NETWORK_EMITTED" ] && exit 0
-
- # ifquery will exit failure if there is no /run/network directory.
- # normally that would get created by one of network-interface.conf
- # or networking.conf. But, it is possible that we're running
- # before either of those have.
- mkdir -p /run/network
-
- # get list of all 'auto' interfaces. if there are none, nothing to do.
- auto_list=$(ifquery --list --allow auto 2>/dev/null) || :
- [ -z "$auto_list" ] && exit 0
- set -- ${auto_list}
- [ "$*" = "lo" ] && exit 0
-
- # we only want to emit for interfaces that do not exist, so filter
- # out anything that does not exist.
- for iface in "$@"; do
- [ "$iface" = "lo" ] && continue
- # skip interfaces that are already up
- [ -f "${MARK_DEV_PREFIX}${iface}" ] && continue
-
- if [ -d /sys/net ]; then
- # if /sys is mounted, and there is no /sys/net/iface, then no device
- [ -e "/sys/net/$iface" ] && continue
- else
- # sys wasn't mounted, so just check via 'ifconfig'
- ifconfig "$iface" >/dev/null 2>&1 || continue
- fi
- initctl emit --no-wait net-device-added "INTERFACE=$iface" &&
- emitted="$emitted $iface" ||
- echo "warn: ${UPSTART_JOB} failed to emit net-device-added INTERFACE=$iface"
- done
-
- [ -z "${emitted# }" ] ||
- echo "${UPSTART_JOB}: emitted ifup for ${emitted# }"
-end script
diff --git a/upstart/cloud-init-local.conf b/upstart/cloud-init-local.conf
deleted file mode 100644
index 5def043d..00000000
--- a/upstart/cloud-init-local.conf
+++ /dev/null
@@ -1,16 +0,0 @@
-# cloud-init - the initial cloud-init job
-# crawls metadata service, emits cloud-config
-start on mounted MOUNTPOINT=/ and mounted MOUNTPOINT=/run
-
-task
-
-console output
-
-script
- lfin=/run/cloud-init/local-finished
- ret=0
- cloud-init init --local || ret=$?
- [ -r /proc/uptime ] && read up idle < /proc/uptime || up="N/A"
- echo "$ret up $up" > "$lfin"
- exit $ret
-end script
diff --git a/upstart/cloud-init-nonet.conf b/upstart/cloud-init-nonet.conf
deleted file mode 100644
index 6abf6573..00000000
--- a/upstart/cloud-init-nonet.conf
+++ /dev/null
@@ -1,66 +0,0 @@
-# cloud-init-no-net
-# the purpose of this job is
-# * to block running of cloud-init until all network interfaces
-# configured in /etc/network/interfaces are up
-# * timeout if they all do not come up in a reasonable amount of time
-start on mounted MOUNTPOINT=/ and stopped cloud-init-local
-stop on static-network-up
-task
-
-console output
-
-script
- set +e # you cannot trap TERM reliably with 'set -e'
- SLEEP_CHILD=""
-
- static_network_up() {
- local emitted="/run/network/static-network-up-emitted"
- # /run/network/static-network-up-emitted is written by
- # upstart (via /etc/network/if-up.d/upstart). its presense would
- # indicate that static-network-up has already fired.
- [ -e "$emitted" -o -e "/var/$emitted" ]
- }
- msg() {
- local uptime="" idle=""
- if [ -r /proc/uptime ]; then
- read uptime idle < /proc/uptime
- fi
- echo "$UPSTART_JOB${uptime:+[${uptime}]}:" "$1"
- }
-
- handle_sigterm() {
- # if we received sigterm and static networking is up then it probably
- # came from upstart as a result of 'stop on static-network-up'
- if [ -n "$SLEEP_CHILD" ]; then
- if ! kill $SLEEP_CHILD 2>/dev/null; then
- [ ! -d "/proc/$SLEEP_CHILD" ] ||
- msg "hm.. failed to kill sleep pid $SLEEP_CHILD"
- fi
- fi
- if static_network_up; then
- msg "static networking is now up"
- exit 0
- fi
- msg "recieved SIGTERM, networking not up"
- exit 2
- }
-
- dowait() {
- [ $# -eq 2 ] || msg "waiting $1 seconds for network device"
- sleep "$1" &
- SLEEP_CHILD=$!
- wait $SLEEP_CHILD
- SLEEP_CHILD=""
- }
-
- trap handle_sigterm TERM
-
- # static_network_up already occurred
- static_network_up && exit 0
-
- dowait 5 silent
- dowait 10
- dowait 115
- msg "gave up waiting for a network device."
- : > /var/lib/cloud/data/no-net
-end script
diff --git a/upstart/cloud-init.conf b/upstart/cloud-init.conf
deleted file mode 100644
index 41ddd284..00000000
--- a/upstart/cloud-init.conf
+++ /dev/null
@@ -1,9 +0,0 @@
-# cloud-init - the initial cloud-init job
-# crawls metadata service, emits cloud-config
-start on mounted MOUNTPOINT=/ and stopped cloud-init-nonet
-
-task
-
-console output
-
-exec /usr/bin/cloud-init init
diff --git a/upstart/cloud-log-shutdown.conf b/upstart/cloud-log-shutdown.conf
deleted file mode 100644
index 278b9c06..00000000
--- a/upstart/cloud-log-shutdown.conf
+++ /dev/null
@@ -1,19 +0,0 @@
-# log shutdowns and reboots to the console (/dev/console)
-# this is useful for correlating logs
-start on runlevel PREVLEVEL=2
-
-task
-console output
-
-script
- # runlevel(7) says INIT_HALT will be set to HALT or POWEROFF
- date=$(date --utc)
- case "$RUNLEVEL:$INIT_HALT" in
- 6:*) mode="reboot";;
- 0:HALT) mode="halt";;
- 0:POWEROFF) mode="poweroff";;
- 0:*) mode="shutdown-unknown";;
- esac
- { read seconds idle < /proc/uptime; } 2>/dev/null || :
- echo "$date: shutting down for $mode${seconds:+ [up ${seconds%.*}s]}."
-end script