summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJames Falcon <james.falcon@canonical.com>2022-11-15 14:23:44 -0600
committergit-ubuntu importer <ubuntu-devel-discuss@lists.ubuntu.com>2022-11-15 22:10:13 +0000
commit1b82290fc3b715282ea573a855608a6632841274 (patch)
tree17133e8d9303e410410aef5022a58d158fc42389
parentbb2229803764a77f8b45a533f71f8e9742585bf2 (diff)
downloadcloud-init-git-1b82290fc3b715282ea573a855608a6632841274.tar.gz
22.4-0ubuntu1 (patches unapplied)
Imported using git-ubuntu import.
-rw-r--r--.github/workflows/check_format.yml13
-rw-r--r--.readthedocs.yaml8
-rw-r--r--.travis.yml3
-rw-r--r--ChangeLog119
-rw-r--r--Makefile8
-rw-r--r--README.md6
-rw-r--r--cloudinit/analyze/__main__.py3
-rw-r--r--cloudinit/analyze/show.py7
-rw-r--r--cloudinit/apport.py54
-rw-r--r--cloudinit/cloud.py2
-rw-r--r--[-rwxr-xr-x]cloudinit/cmd/cloud_id.py29
-rwxr-xr-xcloudinit/cmd/devel/hotplug_hook.py7
-rwxr-xr-xcloudinit/cmd/devel/logs.py89
-rwxr-xr-xcloudinit/cmd/devel/make_mime.py3
-rw-r--r--cloudinit/cmd/devel/parser.py2
-rwxr-xr-xcloudinit/cmd/devel/render.py47
-rwxr-xr-xcloudinit/cmd/main.py16
-rwxr-xr-xcloudinit/cmd/query.py21
-rw-r--r--[-rwxr-xr-x]cloudinit/cmd/status.py156
-rw-r--r--cloudinit/config/__init__.py1
-rw-r--r--cloudinit/config/cc_ansible.py184
-rw-r--r--cloudinit/config/cc_apk_configure.py7
-rw-r--r--cloudinit/config/cc_apt_configure.py33
-rw-r--r--cloudinit/config/cc_apt_pipelining.py7
-rw-r--r--cloudinit/config/cc_bootcmd.py7
-rw-r--r--cloudinit/config/cc_byobu.py8
-rw-r--r--cloudinit/config/cc_ca_certs.py7
-rw-r--r--cloudinit/config/cc_chef.py41
-rw-r--r--cloudinit/config/cc_disable_ec2_metadata.py7
-rw-r--r--cloudinit/config/cc_disk_setup.py26
-rw-r--r--cloudinit/config/cc_fan.py7
-rw-r--r--cloudinit/config/cc_final_message.py14
-rw-r--r--cloudinit/config/cc_growpart.py40
-rw-r--r--cloudinit/config/cc_grub_dpkg.py14
-rw-r--r--cloudinit/config/cc_install_hotplug.py7
-rw-r--r--cloudinit/config/cc_keyboard.py7
-rw-r--r--cloudinit/config/cc_keys_to_console.py7
-rw-r--r--cloudinit/config/cc_landscape.py7
-rw-r--r--cloudinit/config/cc_locale.py7
-rw-r--r--cloudinit/config/cc_lxd.py188
-rw-r--r--cloudinit/config/cc_mcollective.py7
-rw-r--r--cloudinit/config/cc_migrator.py7
-rw-r--r--cloudinit/config/cc_mounts.py9
-rw-r--r--cloudinit/config/cc_ntp.py77
-rw-r--r--cloudinit/config/cc_package_update_upgrade_install.py7
-rw-r--r--cloudinit/config/cc_phone_home.py9
-rw-r--r--cloudinit/config/cc_power_state_change.py7
-rw-r--r--cloudinit/config/cc_puppet.py26
-rw-r--r--cloudinit/config/cc_refresh_rmc_and_interface.py7
-rw-r--r--cloudinit/config/cc_reset_rmc.py7
-rw-r--r--cloudinit/config/cc_resizefs.py7
-rw-r--r--cloudinit/config/cc_resolv_conf.py17
-rw-r--r--cloudinit/config/cc_rh_subscription.py9
-rw-r--r--cloudinit/config/cc_rightscale_userdata.py13
-rw-r--r--cloudinit/config/cc_rsyslog.py9
-rw-r--r--cloudinit/config/cc_runcmd.py7
-rw-r--r--cloudinit/config/cc_salt_minion.py9
-rw-r--r--cloudinit/config/cc_scripts_per_boot.py7
-rw-r--r--cloudinit/config/cc_scripts_per_instance.py7
-rw-r--r--cloudinit/config/cc_scripts_per_once.py7
-rw-r--r--cloudinit/config/cc_scripts_user.py7
-rw-r--r--cloudinit/config/cc_scripts_vendor.py7
-rw-r--r--cloudinit/config/cc_seed_random.py7
-rw-r--r--cloudinit/config/cc_set_hostname.py7
-rw-r--r--cloudinit/config/cc_set_passwords.py5
-rw-r--r--cloudinit/config/cc_snap.py7
-rw-r--r--cloudinit/config/cc_spacewalk.py7
-rw-r--r--cloudinit/config/cc_ssh.py5
-rw-r--r--cloudinit/config/cc_ssh_authkey_fingerprints.py7
-rw-r--r--cloudinit/config/cc_ssh_import_id.py9
-rw-r--r--cloudinit/config/cc_timezone.py8
-rw-r--r--cloudinit/config/cc_ubuntu_advantage.py430
-rw-r--r--cloudinit/config/cc_ubuntu_autoinstall.py7
-rw-r--r--cloudinit/config/cc_ubuntu_drivers.py18
-rw-r--r--cloudinit/config/cc_update_etc_hosts.py7
-rw-r--r--cloudinit/config/cc_update_hostname.py7
-rw-r--r--cloudinit/config/cc_users_groups.py9
-rw-r--r--cloudinit/config/cc_wireguard.py6
-rw-r--r--cloudinit/config/cc_write_files.py6
-rw-r--r--cloudinit/config/cc_write_files_deferred.py7
-rw-r--r--cloudinit/config/cc_yum_add_repo.py8
-rw-r--r--cloudinit/config/cc_zypper_add_repo.py7
-rw-r--r--cloudinit/config/modules.py8
-rw-r--r--cloudinit/config/schemas/schema-cloud-config-v1.json1493
-rw-r--r--cloudinit/config/schemas/versions.schema.cloud-config.json8
-rw-r--r--cloudinit/distros/__init__.py53
-rw-r--r--cloudinit/distros/alpine.py32
-rw-r--r--cloudinit/distros/bsd.py3
-rw-r--r--cloudinit/distros/cos.py12
-rw-r--r--cloudinit/distros/freebsd.py27
-rw-r--r--cloudinit/distros/mariner.py55
-rw-r--r--cloudinit/distros/networking.py57
-rw-r--r--cloudinit/distros/openbsd.py28
-rw-r--r--cloudinit/distros/parsers/hostname.py2
-rw-r--r--cloudinit/distros/parsers/hosts.py2
-rw-r--r--cloudinit/distros/parsers/ifconfig.py277
-rw-r--r--cloudinit/distros/parsers/resolv_conf.py2
-rw-r--r--cloudinit/filters/launch_index.py2
-rw-r--r--cloudinit/handlers/jinja_template.py44
-rw-r--r--cloudinit/helpers.py71
-rw-r--r--cloudinit/mergers/__init__.py2
-rw-r--r--cloudinit/mergers/m_dict.py2
-rw-r--r--cloudinit/mergers/m_list.py2
-rw-r--r--cloudinit/mergers/m_str.py2
-rw-r--r--cloudinit/net/bsd.py3
-rw-r--r--cloudinit/net/cmdline.py3
-rw-r--r--cloudinit/net/dhcp.py42
-rw-r--r--cloudinit/net/eni.py4
-rw-r--r--cloudinit/net/ephemeral.py23
-rw-r--r--cloudinit/net/netplan.py5
-rw-r--r--cloudinit/net/network_manager.py4
-rw-r--r--cloudinit/net/network_state.py2
-rw-r--r--cloudinit/net/networkd.py73
-rw-r--r--cloudinit/net/renderer.py2
-rw-r--r--cloudinit/net/renderers.py2
-rw-r--r--cloudinit/net/sysconfig.py5
-rw-r--r--cloudinit/registry.py2
-rw-r--r--cloudinit/reporting/events.py10
-rw-r--r--cloudinit/reporting/handlers.py9
-rw-r--r--cloudinit/settings.py1
-rw-r--r--cloudinit/simpletable.py2
-rw-r--r--cloudinit/sources/DataSourceAzure.py12
-rw-r--r--cloudinit/sources/DataSourceCloudStack.py2
-rw-r--r--cloudinit/sources/DataSourceEc2.py35
-rw-r--r--cloudinit/sources/DataSourceGCE.py7
-rw-r--r--cloudinit/sources/DataSourceHetzner.py1
-rw-r--r--cloudinit/sources/DataSourceIBMCloud.py2
-rw-r--r--cloudinit/sources/DataSourceLXD.py333
-rw-r--r--cloudinit/sources/DataSourceNWCS.py168
-rw-r--r--cloudinit/sources/DataSourceOpenNebula.py2
-rw-r--r--cloudinit/sources/DataSourceOpenStack.py12
-rw-r--r--cloudinit/sources/DataSourceOracle.py48
-rw-r--r--cloudinit/sources/DataSourceScaleway.py5
-rw-r--r--cloudinit/sources/DataSourceSmartOS.py2
-rw-r--r--cloudinit/sources/DataSourceUpCloud.py4
-rw-r--r--cloudinit/sources/DataSourceVultr.py1
-rw-r--r--cloudinit/sources/__init__.py20
-rw-r--r--cloudinit/sources/helpers/cloudsigma.py4
-rw-r--r--cloudinit/sources/helpers/ec2.py19
-rw-r--r--cloudinit/sources/helpers/openstack.py2
-rw-r--r--cloudinit/sources/helpers/vmware/imc/boot_proto.py2
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config.py4
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_custom_script.py4
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_file.py2
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_namespace.py2
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_nic.py2
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_passwd.py2
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_source.py2
-rw-r--r--cloudinit/sources/helpers/vmware/imc/guestcust_error.py2
-rw-r--r--cloudinit/sources/helpers/vmware/imc/guestcust_event.py2
-rw-r--r--cloudinit/sources/helpers/vmware/imc/guestcust_state.py2
-rw-r--r--cloudinit/sources/helpers/vmware/imc/guestcust_util.py9
-rw-r--r--cloudinit/sources/helpers/vmware/imc/ipv4_mode.py2
-rw-r--r--cloudinit/sources/helpers/vmware/imc/nic.py8
-rw-r--r--cloudinit/sources/helpers/vmware/imc/nic_base.py6
-rw-r--r--cloudinit/sources/helpers/vultr.py6
-rw-r--r--cloudinit/ssh_util.py6
-rw-r--r--cloudinit/stages.py25
-rw-r--r--cloudinit/temp_utils.py66
-rw-r--r--cloudinit/url_helper.py6
-rw-r--r--cloudinit/user_data.py2
-rw-r--r--cloudinit/util.py100
-rw-r--r--cloudinit/version.py2
-rw-r--r--config/cloud.cfg.tmpl54
-rw-r--r--conftest.py25
-rw-r--r--debian/changelog12
-rw-r--r--debian/cloud-init.templates6
-rw-r--r--debian/control1
-rw-r--r--debian/po/templates.pot6
-rw-r--r--doc-requirements.txt13
-rw-r--r--doc/examples/cloud-config-ansible-controller.txt140
-rw-r--r--doc/examples/cloud-config-ansible-managed.txt64
-rw-r--r--doc/examples/cloud-config-ansible-pull.txt (renamed from doc/examples/cloud-config-ansible.txt)6
-rw-r--r--doc/examples/cloud-config-user-groups.txt2
-rw-r--r--doc/examples/cloud-config.txt12
-rw-r--r--doc/examples/include.txt8
-rw-r--r--doc/rtd/conf.py13
-rw-r--r--doc/rtd/index.rst54
-rw-r--r--doc/rtd/static/logo-dark-mode.pngbin0 -> 17483 bytes
-rw-r--r--doc/rtd/topics/availability.rst1
-rw-r--r--doc/rtd/topics/base_config_reference.rst363
-rw-r--r--doc/rtd/topics/boot.rst2
-rw-r--r--doc/rtd/topics/cli.rst20
-rw-r--r--doc/rtd/topics/configuration.rst79
-rw-r--r--doc/rtd/topics/datasources.rst1
-rw-r--r--doc/rtd/topics/datasources/exoscale.rst5
-rw-r--r--doc/rtd/topics/datasources/lxd.rst33
-rw-r--r--doc/rtd/topics/datasources/nocloud.rst4
-rw-r--r--doc/rtd/topics/datasources/nwcs.rst30
-rw-r--r--doc/rtd/topics/datasources/openstack.rst6
-rw-r--r--doc/rtd/topics/datasources/smartos.rst4
-rw-r--r--doc/rtd/topics/examples.rst20
-rw-r--r--doc/rtd/topics/faq.rst22
-rw-r--r--doc/rtd/topics/instancedata.rst378
-rw-r--r--doc/rtd/topics/logging.rst6
-rw-r--r--doc/rtd/topics/merging.rst2
-rw-r--r--doc/rtd/topics/module_creation.rst9
-rw-r--r--doc/rtd/topics/modules.rst1
-rw-r--r--doc/rtd/topics/network-config-format-v1.rst3
-rw-r--r--doc/rtd/topics/network-config-format-v2.rst48
-rw-r--r--doc/rtd/topics/network-config.rst2
-rw-r--r--doc/rtd/topics/vendordata.rst2
-rw-r--r--integration-requirements.txt2
-rw-r--r--packages/README.md10
-rw-r--r--pyproject.toml7
-rw-r--r--templates/chrony.conf.cos.tmpl26
-rw-r--r--templates/chrony.conf.freebsd.tmpl347
-rw-r--r--templates/host.mariner.tmpl22
-rw-r--r--templates/ntp.conf.freebsd.tmpl114
-rw-r--r--templates/ntpd.conf.openbsd.tmpl19
-rw-r--r--test-requirements.txt1
-rw-r--r--tests/data/netinfo/freebsd-ifconfig-output2
-rw-r--r--tests/data/netinfo/openbsd-ifconfig-output29
-rw-r--r--tests/data/old_pickles/focal-azure-20.1-10-g71af48df-0ubuntu5.pklbin0 -> 8948 bytes
-rw-r--r--tests/integration_tests/assets/echo_server.py37
-rw-r--r--tests/integration_tests/assets/echo_server.service10
-rw-r--r--tests/integration_tests/clouds.py11
-rw-r--r--tests/integration_tests/datasources/test_lxd_discovery.py19
-rw-r--r--tests/integration_tests/datasources/test_lxd_hotplug.py154
-rw-r--r--tests/integration_tests/datasources/test_tmp_noexec.py32
-rw-r--r--tests/integration_tests/instances.py2
-rw-r--r--tests/integration_tests/modules/test_ansible.py201
-rw-r--r--tests/integration_tests/modules/test_apt.py44
-rw-r--r--tests/integration_tests/modules/test_combined.py25
-rw-r--r--tests/integration_tests/modules/test_jinja_templating.py102
-rw-r--r--tests/integration_tests/modules/test_lxd.py220
-rw-r--r--tests/integration_tests/modules/test_ubuntu_advantage.py223
-rw-r--r--tests/integration_tests/reporting/test_webhook_reporting.py66
-rw-r--r--tests/integration_tests/test_paths.py56
-rw-r--r--tests/integration_tests/test_upgrade.py9
-rw-r--r--tests/integration_tests/util.py17
-rw-r--r--tests/unittests/analyze/test_boot.py68
-rw-r--r--tests/unittests/analyze/test_dump.py58
-rw-r--r--tests/unittests/cmd/devel/test_hotplug_hook.py18
-rw-r--r--tests/unittests/cmd/devel/test_logs.py101
-rw-r--r--tests/unittests/cmd/devel/test_render.py137
-rw-r--r--tests/unittests/cmd/test_clean.py2
-rw-r--r--tests/unittests/cmd/test_cloud_id.py72
-rw-r--r--tests/unittests/cmd/test_query.py48
-rw-r--r--tests/unittests/cmd/test_status.py296
-rw-r--r--tests/unittests/config/test_apt_source_v1.py2
-rw-r--r--tests/unittests/config/test_cc_ansible.py279
-rw-r--r--tests/unittests/config/test_cc_bootcmd.py2
-rw-r--r--tests/unittests/config/test_cc_chef.py74
-rw-r--r--tests/unittests/config/test_cc_disk_setup.py4
-rw-r--r--tests/unittests/config/test_cc_growpart.py30
-rw-r--r--tests/unittests/config/test_cc_lxd.py168
-rw-r--r--tests/unittests/config/test_cc_ntp.py51
-rw-r--r--tests/unittests/config/test_cc_power_state_change.py16
-rw-r--r--tests/unittests/config/test_cc_puppet.py175
-rw-r--r--tests/unittests/config/test_cc_resizefs.py10
-rw-r--r--tests/unittests/config/test_cc_set_passwords.py6
-rw-r--r--tests/unittests/config/test_cc_ubuntu_advantage.py1129
-rw-r--r--tests/unittests/config/test_cc_ubuntu_drivers.py10
-rw-r--r--tests/unittests/config/test_cc_users_groups.py10
-rw-r--r--tests/unittests/config/test_cc_wireguard.py2
-rw-r--r--tests/unittests/config/test_schema.py1
-rw-r--r--tests/unittests/distros/test__init__.py (renamed from tests/unittests/distros/test_generic.py)27
-rw-r--r--tests/unittests/distros/test_arch.py3
-rw-r--r--tests/unittests/distros/test_gentoo.py3
-rw-r--r--tests/unittests/distros/test_ifconfig.py72
-rw-r--r--tests/unittests/distros/test_manage_service.py44
-rw-r--r--tests/unittests/distros/test_mariner.py25
-rw-r--r--tests/unittests/distros/test_netbsd.py6
-rw-r--r--tests/unittests/distros/test_netconfig.py140
-rw-r--r--tests/unittests/distros/test_networking.py65
-rw-r--r--tests/unittests/distros/test_opensuse.py3
-rw-r--r--tests/unittests/distros/test_photon.py3
-rw-r--r--tests/unittests/distros/test_sles.py3
-rw-r--r--tests/unittests/helpers.py38
-rw-r--r--tests/unittests/net/test_dhcp.py205
-rw-r--r--tests/unittests/net/test_ephemeral.py10
-rw-r--r--tests/unittests/net/test_init.py10
-rw-r--r--tests/unittests/net/test_networkd.py236
-rw-r--r--tests/unittests/sources/helpers/test_ec2.py190
-rw-r--r--tests/unittests/sources/test_aliyun.py61
-rw-r--r--tests/unittests/sources/test_azure.py150
-rw-r--r--tests/unittests/sources/test_azure_helper.py3
-rw-r--r--tests/unittests/sources/test_bigstep.py6
-rw-r--r--tests/unittests/sources/test_common.py2
-rw-r--r--tests/unittests/sources/test_ec2.py109
-rw-r--r--tests/unittests/sources/test_exoscale.py93
-rw-r--r--tests/unittests/sources/test_gce.py120
-rw-r--r--tests/unittests/sources/test_hetzner.py5
-rw-r--r--tests/unittests/sources/test_init.py37
-rw-r--r--tests/unittests/sources/test_lxd.py266
-rw-r--r--tests/unittests/sources/test_nwcs.py116
-rw-r--r--tests/unittests/sources/test_openstack.py140
-rw-r--r--tests/unittests/sources/test_oracle.py200
-rw-r--r--tests/unittests/sources/test_scaleway.py110
-rw-r--r--tests/unittests/sources/test_smartos.py4
-rw-r--r--tests/unittests/sources/test_upcloud.py6
-rw-r--r--tests/unittests/sources/test_vultr.py16
-rw-r--r--tests/unittests/test_apport.py39
-rw-r--r--tests/unittests/test_builtin_handlers.py5
-rw-r--r--tests/unittests/test_cli.py7
-rw-r--r--tests/unittests/test_data.py170
-rw-r--r--tests/unittests/test_ds_identify.py62
-rw-r--r--tests/unittests/test_render_cloudcfg.py2
-rw-r--r--tests/unittests/test_temp_utils.py1
-rw-r--r--tests/unittests/test_upgrade.py28
-rw-r--r--tests/unittests/test_url_helper.py40
-rw-r--r--tests/unittests/test_util.py170
-rw-r--r--tests/unittests/util.py5
-rw-r--r--tools/.github-cla-signers12
-rwxr-xr-xtools/check_json_format.sh11
-rwxr-xr-xtools/ds-identify19
-rwxr-xr-xtools/mock-meta.py4
-rwxr-xr-xtools/read-version86
-rwxr-xr-xtools/render-cloudcfg1
-rw-r--r--tox.ini29
311 files changed, 11597 insertions, 3035 deletions
diff --git a/.github/workflows/check_format.yml b/.github/workflows/check_format.yml
index 1cc2fac2..4e7a7271 100644
--- a/.github/workflows/check_format.yml
+++ b/.github/workflows/check_format.yml
@@ -49,3 +49,16 @@ jobs:
env:
TOXENV: tip-${{ matrix.env }}
run: tox
+ schema-format:
+ strategy:
+ fail-fast: false
+ name: Check json format
+ runs-on: ubuntu-22.04
+ steps:
+ - name: "Checkout #1"
+ uses: actions/checkout@v3.0.0
+
+ - name: "Test format"
+ run: |
+ tools/check_json_format.sh cloudinit/config/schemas/schema-cloud-config-v1.json
+ tools/check_json_format.sh cloudinit/config/schemas/versions.schema.cloud-config.json
diff --git a/.readthedocs.yaml b/.readthedocs.yaml
index 7769bc55..f5a56263 100644
--- a/.readthedocs.yaml
+++ b/.readthedocs.yaml
@@ -1,8 +1,16 @@
version: 2
+build:
+ os: "ubuntu-22.04"
+ tools:
+ python: "3.10"
+
formats: all
python:
install:
- path: .
- requirements: doc-requirements.txt
+
+sphinx:
+ configuration: doc/rtd/conf.py
diff --git a/.travis.yml b/.travis.yml
index 253295dd..6456204b 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -133,7 +133,7 @@ matrix:
TOXENV=lowest-supported
PYTEST_ADDOPTS=-v # List all tests run by pytest
dist: bionic
- - python: 3.7
+ - python: 3.10
env: TOXENV=doc
install:
- git fetch --unshallow
@@ -146,6 +146,7 @@ matrix:
- make check_spelling && tox
# Test all supported Python versions (but at the end, so we schedule
# longer-running jobs first)
+ - python: 3.12-dev
- python: 3.11-dev
- python: "3.10"
- python: 3.9
diff --git a/ChangeLog b/ChangeLog
index 0ff76617..16d58d3a 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,122 @@
+22.4
+ - test: fix pro integration test [Alberto Contreras]
+ - cc_disk_setup: pass options in correct order to utils (#1829)
+ [dermotbradley]
+ - tests: text_lxd basic_preseed verify_clean_log (#1826)
+ - docs: switch sphinx theme to furo (SC-1327) (#1821) [Alberto Contreras]
+ - tests: activate Ubuntu Pro tests (only on Jenkins) (#1777)
+ [Alberto Contreras]
+ - tests: test_lxd assert features.storage.buckets when present (#1827)
+ - tests: replace missed ansible install-method with underscore (#1825)
+ - tests: replace ansible install-method with underscore
+ - ansible: standardize schema keys
+ - ci: run json tool on 22.04 rather than 20.04 (#1823)
+ - Stop using devices endpoint for LXD network config (#1819)
+ - apport: address new curtin log and config locations (#1812)
+ - cc_grub: reword docs for clarity (#1818)
+ - tests: Fix preseed test (#1820)
+ - Auto-format schema (#1810)
+ - Ansible Control Module (#1778)
+ - Fix last reported event possibly not being sent (#1796) (LP: #1993836)
+ - tests: Ignore unsupported lxd project keys (#1817) [Alberto Contreras]
+ - udevadm settle should handle non-udev system gracefully (#1806)
+ [dermotbradley]
+ - add mariner support (#1780) [Minghe Ren]
+ - Net: add BSD ifconfig(8) parser and state class (#1779) [Mina Galić]
+ - adding itjamie to .github-cla-signers [Jamie (Bear) Murphy]
+ - Fix inconsistency between comment and statement (#1809) [Guillaume Gay]
+ - Update .github-cla-signers (#1811) [Guillaume Gay]
+ - alpine.py: Add Alpine-specific manage_service function and update tests
+ (#1804) [dermotbradley]
+ - test: add 3.12-dev to Travis CI (#1798) [Alberto Contreras]
+ - add NWCS datasource (#1793) [shell-skrimp]
+ - Adding myself as CLA signer (#1799) [s-makin]
+ - apport: fix some data collection failures due to symlinks (#1797)
+ [Dan Bungert]
+ - read-version: Make it compatible with bionic (#1795) [Alberto Contreras]
+ - lxd: add support for lxd preseed config(#1789)
+ - Enable hotplug for LXD datasource (#1787)
+ - cli: collect logs and apport subiquity support
+ - add support for Container-Optimized OS (#1748) [vteratipally]
+ - test: temporarily disable failing integration test (#1792)
+ - Fix LXD/nocloud detection on lxd vm tests (#1791)
+ - util: Implement __str__ and __iter__ for Version (#1790)
+ - cc_ua: consume ua json api for enable commands [Alberto Contreras]
+ - Add clarity to cc_final_message docs (#1788)
+ - cc_ntp: add support for BSDs (#1759) [Mina Galić] (LP: #1990041)
+ - make Makefile make agnostic (#1786) [Mina Galić]
+ - Remove hardcoding and unnecessary overrides in Makefile (#1783)
+ [Joseph Mingrone]
+ - Add my username (Jehops) to .github-cla-signers (#1784) [Joseph Mingrone]
+ - Temporarily remove broken test (#1781)
+ - Create reference documentation for base config
+ - cc_ansible: add support for galaxy install (#1736)
+ - distros/manage_services: add support to disable service (#1772)
+ [Mina Galić] (LP: #1991024)
+ - OpenBSD: remove pkg_cmd_environ function (#1773)
+ [Mina Galić] (LP: 1991567)
+ - docs: Correct typo in the FAQ (#1774) [Maximilian Wörner]
+ - tests: Use LXD metadata to determine NoCloud status (#1776)
+ - analyze: use init-local as start of boot record (#1767) [Chris Patterson]
+ - docs: use opensuse for distro name in package doc (#1771)
+ - doc: clarify packages as dev only (#1769) [Alberto Contreras]
+ - Distro manage service: Improve BSD support (#1758)
+ [Mina Galić] (LP: #1990070)
+ - testing: check logs for critical errors (#1765) [Chris Patterson]
+ - cc_ubuntu_advantage: Handle already attached on Pro [Alberto Contreras]
+ - doc: Add configuration explanation (SC-1169)
+ - Fix Oracle DS primary interface when using IMDS (#1757) (LP: #1989686)
+ - style: prefer absolute imports over relative imports [Mina Galić]
+ - tests: Fix ip log during instance destruction (#1755) [Alberto Contreras]
+ - cc_ubuntu_advantage: add ua_config in auto-attach [Alberto Contreras]
+ - apt configure: sources write/append mode (#1738)
+ [Fabian Lichtenegger-Lukas]
+ - networkd: Add test and improve typing. (#1747) [Alberto Contreras]
+ - pycloudlib: bump commit for gce cpu architecture support (#1750)
+ - commit ffcb29bc8315d1e1d6244eeb1cbd8095958f7bad (LP: #1307667)
+ - testing: workaround LXD vendor data (#1740)
+ - support dhcp{4,6}-overrides in networkd renderer (#1710) [Aidan Obley]
+ - tests: Drop httpretty in favor of responses (#1720) [Alberto Contreras]
+ - cc_ubuntu_advantage: Implement custom auto-attach behaviors (#1583)
+ [Alberto Contreras]
+ - Fix Oracle DS not setting subnet when using IMDS (#1735) (LP: #1989686)
+ - testing: focal lxd datasource discovery (#1734)
+ - cc_ubuntu_advantage: Redact token from logs (#1726) [Alberto Contreras]
+ - docs: make sure echo properly evaluates the string (#1733) [Mina Galić]
+ - net: set dhclient lease and pid files (#1715)
+ - cli: status machine-readable output --format yaml/json (#1663)
+ (LP: #1883122)
+ - tests: Simplify does_not_raise (#1731) [Alberto Contreras]
+ - Refactor: Drop inheritance from object (#1728) [Alberto Contreras]
+ - testing: LXD datasource now supported on Focal (#1732)
+ - Allow jinja templating in /etc/cloud (SC-1170) (#1722) (LP: #1913461)
+ - sources/azure: ensure instance id is always correct (#1727)
+ [Chris Patterson]
+ - azure: define new attribute for pre-22.3 pickles (#1725)
+ - doc: main page Diátaxis rewording (SC-967) (#1701)
+ - ubuntu advantage: improved idempotency, enable list is now strict
+ [Fabian Lichtenegger-Lukas]
+ - test: bump pycloudlib (#1724) [Alberto Contreras]
+ - cloud.cfg.tmpl: make sure "centos" settings are identical to "rhel"
+ (#1639) [Emanuele Giuseppe Esposito]
+ - lxd: fetch 1.0/devices content (#1712) [Alberto Contreras]
+ - Update docs according to ad8f406a (#1719)
+ - testing: Port unittests/analyze to pytest (#1708) [Alberto Contreras]
+ - doc: Fix rtd builds. (#1718) [Alberto Contreras]
+ - testing: fully mock noexec calls (#1717) [Alberto Contreras]
+ - typing: Add types to cc_<module>.handle (#1700) [Alberto Contreras]
+ - Identify 3DS Outscale Datasource as Ec2 (#1686) [Maxime Dufour]
+ - config: enable bootstrapping pip in ansible (#1707)
+ - Fix cc_chef typing issue (#1716)
+ - Refactor instance json files to use Paths (SC-1238) (#1709)
+ - tools: read-version check GITHUB_REF and git branch --show-current
+ (#1677)
+ - net: Ensure a tmp with exec permissions for dhcp (#1690)
+ [Alberto Contreras] (LP: #1962343)
+ - testing: Fix test regression in test_combined (#1713) [Alberto Contreras]
+ - Identify Huawei Cloud as OpenStack (#1689) [huang xinjie]
+ - doc: add reporting suggestion to FAQ (SC-1236) (#1698)
+
22.3
- sources: obj.pkl cache should be written anyime get_data is run (#1669)
- schema: drop release number from version file (#1664)
diff --git a/Makefile b/Makefile
index 2acf132e..a0221c27 100644
--- a/Makefile
+++ b/Makefile
@@ -4,13 +4,11 @@ VARIANT ?= ubuntu
YAML_FILES=$(shell find cloudinit tests tools -name "*.yaml" -type f )
YAML_FILES+=$(shell find doc/examples -name "cloud-config*.txt" -type f )
-PYTHON = python3
+PYTHON ?= python3
NUM_ITER ?= 100
-ifeq ($(distro),)
- distro = redhat
-endif
+distro ?= redhat
READ_VERSION=$(shell $(PYTHON) $(CWD)/tools/read-version || echo read-version-failed)
CODE_VERSION=$(shell $(PYTHON) -c "from cloudinit import version; print(version.version_string())")
@@ -29,7 +27,7 @@ flake8:
@$(CWD)/tools/run-flake8
unittest: clean_pyc
- python3 -m pytest -v tests/unittests cloudinit
+ $(PYTHON) -m pytest -v tests/unittests cloudinit
render-template:
$(PYTHON) ./tools/render-cloudcfg --variant=$(VARIANT) $(FILE) $(subst .tmpl,,$(FILE))
diff --git a/README.md b/README.md
index 64a1635d..feb896ac 100644
--- a/README.md
+++ b/README.md
@@ -37,9 +37,9 @@ Below are a list of the many OSes and clouds that contain and ship with cloud-in
distribution or cloud is not listed or does not have a recent version of cloud-init, please
get in contact with that distribution and send them our way!
-| Supported OSes | Supported Public Clouds | Supported Private Clouds |
-| --- | --- | --- |
-| Alpine Linux<br />Arch Linux<br />Debian<br />DragonFlyBSD<br />Fedora<br />FreeBSD<br />Gentoo Linux<br />NetBSD<br />OpenBSD<br />openEuler<br />OpenMandriva<br />RHEL/CentOS/AlmaLinux/Rocky/PhotonOS/Virtuozzo/EuroLinux/CloudLinux/MIRACLE LINUX<br />SLES/openSUSE<br />Ubuntu<br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /> | Amazon Web Services<br />Microsoft Azure<br />Google Cloud Platform<br />Oracle Cloud Infrastructure<br />Softlayer<br />Rackspace Public Cloud<br />IBM Cloud<br />DigitalOcean<br />Bigstep<br />Hetzner<br />Joyent<br />CloudSigma<br />Alibaba Cloud<br />OVH<br />OpenNebula<br />Exoscale<br />Scaleway<br />CloudStack<br />AltCloud<br />SmartOS<br />HyperOne<br />Vultr<br />Rootbox<br /> | Bare metal installs<br />OpenStack<br />LXD<br />KVM<br />Metal-as-a-Service (MAAS)<br />VMware<br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br />|
+| Supported OSes | Supported Public Clouds | Supported Private Clouds |
+|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| --- | --- |
+| Alpine Linux<br />Arch Linux<br />Container-Optimized OS<br />Debian<br />DragonFlyBSD<br />Fedora<br />FreeBSD<br />Gentoo Linux<br />NetBSD<br />OpenBSD<br />openEuler<br />OpenMandriva<br />RHEL/CentOS/AlmaLinux/Rocky/PhotonOS/Virtuozzo/EuroLinux/CloudLinux/MIRACLE LINUX/MarinerOS<br />SLES/openSUSE<br />Ubuntu<br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /> | Amazon Web Services<br />Microsoft Azure<br />Google Cloud Platform<br />Oracle Cloud Infrastructure<br />Softlayer<br />Rackspace Public Cloud<br />IBM Cloud<br />DigitalOcean<br />Bigstep<br />Hetzner<br />Joyent<br />CloudSigma<br />Alibaba Cloud<br />Huawei Cloud<br />OVH<br />OpenNebula<br />Exoscale<br />Scaleway<br />CloudStack<br />AltCloud<br />SmartOS<br />HyperOne<br />Vultr<br />Rootbox<br /> | Bare metal installs<br />OpenStack<br />LXD<br />KVM<br />Metal-as-a-Service (MAAS)<br />VMware<br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br />|
## To start developing cloud-init
diff --git a/cloudinit/analyze/__main__.py b/cloudinit/analyze/__main__.py
index df08d46c..c5293486 100644
--- a/cloudinit/analyze/__main__.py
+++ b/cloudinit/analyze/__main__.py
@@ -8,10 +8,9 @@ import sys
from datetime import datetime
from typing import IO
+from cloudinit.analyze import dump, show
from cloudinit.util import json_dumps
-from . import dump, show
-
def get_parser(parser=None):
if not parser:
diff --git a/cloudinit/analyze/show.py b/cloudinit/analyze/show.py
index 04621f12..8ce649de 100644
--- a/cloudinit/analyze/show.py
+++ b/cloudinit/analyze/show.py
@@ -131,7 +131,7 @@ def total_time_record(total_time):
return "Total Time: %3.5f seconds\n" % total_time
-class SystemctlReader(object):
+class SystemctlReader:
"""
Class for dealing with all systemctl subp calls in a consistent manner.
"""
@@ -309,7 +309,6 @@ def generate_records(
start_time = None
total_time = 0.0
stage_start_time = {}
- stages_seen = []
boot_records = []
unprocessed = []
@@ -321,7 +320,7 @@ def generate_records(
next_evt = None
if event_type(event) == "start":
- if event.get("name") in stages_seen:
+ if records and event.get("name") == "init-local":
records.append(total_time_record(total_time))
boot_records.append(records)
records = []
@@ -329,7 +328,6 @@ def generate_records(
total_time = 0.0
if start_time is None:
- stages_seen = []
start_time = event_datetime(event)
stage_start_time[event_parent(event)] = start_time
@@ -346,7 +344,6 @@ def generate_records(
# This is a parent event
records.append("Starting stage: %s" % event.get("name"))
unprocessed.append(event)
- stages_seen.append(event.get("name"))
continue
else:
prev_evt = unprocessed.pop()
diff --git a/cloudinit/apport.py b/cloudinit/apport.py
index aa3a6c5c..e42ecf8e 100644
--- a/cloudinit/apport.py
+++ b/cloudinit/apport.py
@@ -3,11 +3,19 @@
# This file is part of cloud-init. See LICENSE file for license information.
"""Cloud-init apport interface"""
+
+import os
+
from cloudinit.cmd.devel import read_cfg_paths
+from cloudinit.cmd.devel.logs import (
+ INSTALLER_APPORT_FILES,
+ INSTALLER_APPORT_SENSITIVE_FILES,
+)
try:
from apport.hookutils import (
attach_file,
+ attach_file_if_exists,
attach_root_command_outputs,
root_command_output,
)
@@ -29,8 +37,10 @@ KNOWN_CLOUD_NAMES = [
"DigitalOcean",
"E24Cloud",
"GCE - Google Compute Engine",
+ "Huawei Cloud",
"Exoscale",
"Hetzner Cloud",
+ "NWCS",
"IBM - (aka SoftLayer or BlueMix)",
"LXD",
"MAAS",
@@ -48,6 +58,7 @@ KNOWN_CLOUD_NAMES = [
"VMware",
"Vultr",
"ZStack",
+ "Outscale",
"Other",
]
@@ -108,30 +119,58 @@ def attach_cloud_info(report, ui=None):
report["CloudName"] = "None"
+def attach_installer_files(report, ui=None):
+ """Attach any subiquity installer logs config.
+
+ To support decoupling apport integration from installer config/logs,
+ we eventually want to either source this function or APPORT_FILES
+ attribute from subiquity and/or ubuntu-desktop-installer package-hooks
+ python modules.
+ """
+ for apport_file in INSTALLER_APPORT_FILES:
+ realpath = os.path.realpath(apport_file.path)
+ attach_file_if_exists(report, realpath, apport_file.label)
+
+
def attach_user_data(report, ui=None):
"""Optionally provide user-data if desired."""
if ui:
user_data_file = _get_user_data_file()
prompt = (
- "Your user-data or cloud-config file can optionally be provided"
- " from {0} and could be useful to developers when addressing this"
- " bug. Do you wish to attach user-data to this bug?".format(
- user_data_file
- )
+ "Your user-data, cloud-config or autoinstall files can optionally "
+ " be provided from {0} and could be useful to developers when"
+ " addressing this bug. Do you wish to attach user-data to this"
+ " bug?".format(user_data_file)
)
response = ui.yesno(prompt)
if response is None:
raise StopIteration # User cancelled
if response:
- attach_file(report, user_data_file, "user_data.txt")
+ realpath = os.path.realpath(user_data_file)
+ attach_file(report, realpath, "user_data.txt")
+ for apport_file in INSTALLER_APPORT_SENSITIVE_FILES:
+ realpath = os.path.realpath(apport_file.path)
+ attach_file_if_exists(report, realpath, apport_file.label)
def add_bug_tags(report):
"""Add any appropriate tags to the bug."""
+ new_tags = []
+ if report.get("CurtinError"):
+ new_tags.append("curtin")
+ if report.get("SubiquityLog"):
+ new_tags.append("subiquity")
if "JournalErrors" in report.keys():
errors = report["JournalErrors"]
if "Breaking ordering cycle" in errors:
- report["Tags"] = "systemd-ordering"
+ new_tags.append("systemd-ordering")
+ if report.get("UdiLog"):
+ new_tags.append("ubuntu-desktop-installer")
+ if new_tags:
+ report.setdefault("Tags", "")
+ if report["Tags"]:
+ report["Tags"] += " "
+ report["Tags"] += " ".join(new_tags)
def add_info(report, ui):
@@ -149,6 +188,7 @@ def add_info(report, ui):
attach_hwinfo(report, ui)
attach_cloud_info(report, ui)
attach_user_data(report, ui)
+ attach_installer_files(report, ui)
add_bug_tags(report)
return True
diff --git a/cloudinit/cloud.py b/cloudinit/cloud.py
index cbc5d0db..3600a784 100644
--- a/cloudinit/cloud.py
+++ b/cloudinit/cloud.py
@@ -28,7 +28,7 @@ LOG = logging.getLogger(__name__)
# while the stages/other objects can be worked on independently...
-class Cloud(object):
+class Cloud:
def __init__(
self,
datasource: DataSource,
diff --git a/cloudinit/cmd/cloud_id.py b/cloudinit/cmd/cloud_id.py
index 567d341a..10527139 100755..100644
--- a/cloudinit/cmd/cloud_id.py
+++ b/cloudinit/cmd/cloud_id.py
@@ -8,16 +8,11 @@ import argparse
import json
import sys
+from cloudinit.cmd.devel import read_cfg_paths
from cloudinit.cmd.status import UXAppStatus, get_status_details
-from cloudinit.sources import (
- INSTANCE_JSON_FILE,
- METADATA_UNKNOWN,
- canonical_cloud_id,
-)
+from cloudinit.sources import METADATA_UNKNOWN, canonical_cloud_id
from cloudinit.util import error
-DEFAULT_INSTANCE_JSON = "/run/cloud-init/%s" % INSTANCE_JSON_FILE
-
NAME = "cloud-id"
@@ -30,6 +25,7 @@ def get_parser(parser=None):
@returns: ArgumentParser with proper argument configuration.
"""
+ default_instance_json = read_cfg_paths().get_runpath("instance_data")
if not parser:
parser = argparse.ArgumentParser(
prog=NAME,
@@ -53,9 +49,11 @@ def get_parser(parser=None):
"-i",
"--instance-data",
type=str,
- default=DEFAULT_INSTANCE_JSON,
- help="Path to instance-data.json file. Default is %s"
- % DEFAULT_INSTANCE_JSON,
+ default=default_instance_json,
+ help=(
+ "Path to instance-data.json file. "
+ f"Default is {default_instance_json}"
+ ),
)
return parser
@@ -67,12 +65,12 @@ def handle_args(name, args):
@return: 0 on success, 1 on error, 2 on disabled, 3 on cloud-init not run.
"""
- status, _status_details, _time = get_status_details()
- if status == UXAppStatus.DISABLED:
- sys.stdout.write("{0}\n".format(status.value))
+ status_details = get_status_details()
+ if status_details.status == UXAppStatus.DISABLED:
+ sys.stdout.write("{0}\n".format(status_details.status.value))
return 2
- elif status == UXAppStatus.NOT_RUN:
- sys.stdout.write("{0}\n".format(status.value))
+ elif status_details.status == UXAppStatus.NOT_RUN:
+ sys.stdout.write("{0}\n".format(status_details.status.value))
return 3
try:
@@ -94,6 +92,7 @@ def handle_args(name, args):
v1.get("platform", METADATA_UNKNOWN),
)
if args.json:
+ sys.stderr.write("DEPRECATED: Use: cloud-init query v1\n")
v1["cloud_id"] = cloud_id
response = json.dumps( # Pretty, sorted json
v1, indent=1, sort_keys=True, separators=(",", ": ")
diff --git a/cloudinit/cmd/devel/hotplug_hook.py b/cloudinit/cmd/devel/hotplug_hook.py
index f95e8cc0..560857ef 100755
--- a/cloudinit/cmd/devel/hotplug_hook.py
+++ b/cloudinit/cmd/devel/hotplug_hook.py
@@ -182,7 +182,7 @@ def is_enabled(hotplug_init, subsystem):
)
-def initialize_datasource(hotplug_init, subsystem):
+def initialize_datasource(hotplug_init: Init, subsystem: str):
LOG.debug("Fetching datasource")
datasource = hotplug_init.fetch(existing="trust")
@@ -220,8 +220,9 @@ def handle_hotplug(hotplug_init: Init, devpath, subsystem, udevaction):
try:
LOG.debug("Refreshing metadata")
event_handler.update_metadata()
- LOG.debug("Detecting device in updated metadata")
- event_handler.detect_hotplugged_device()
+ if not datasource.skip_hotplug_detect:
+ LOG.debug("Detecting device in updated metadata")
+ event_handler.detect_hotplugged_device()
LOG.debug("Applying config change")
event_handler.apply()
LOG.debug("Updating cache")
diff --git a/cloudinit/cmd/devel/logs.py b/cloudinit/cmd/devel/logs.py
index a87b7043..9c4b6dcd 100755
--- a/cloudinit/cmd/devel/logs.py
+++ b/cloudinit/cmd/devel/logs.py
@@ -11,9 +11,11 @@ import os
import shutil
import sys
from datetime import datetime
+from pathlib import Path
+from typing import NamedTuple
from cloudinit.cmd.devel import read_cfg_paths
-from cloudinit.sources import INSTANCE_JSON_SENSITIVE_FILE
+from cloudinit.helpers import Paths
from cloudinit.subp import ProcessExecutionError, subp
from cloudinit.temp_utils import tempdir
from cloudinit.util import chdir, copy, ensure_dir, write_file
@@ -22,11 +24,66 @@ CLOUDINIT_LOGS = ["/var/log/cloud-init.log", "/var/log/cloud-init-output.log"]
CLOUDINIT_RUN_DIR = "/run/cloud-init"
+class ApportFile(NamedTuple):
+ path: str
+ label: str
+
+
+INSTALLER_APPORT_SENSITIVE_FILES = [
+ ApportFile(
+ "/var/log/installer/autoinstall-user-data", "AutoInstallUserData"
+ ),
+ ApportFile("/autoinstall.yaml", "AutoInstallYAML"),
+ ApportFile("/etc/cloud/cloud.cfg.d/99-installer.cfg", "InstallerCloudCfg"),
+]
+
+INSTALLER_APPORT_FILES = [
+ ApportFile("/var/log/installer/ubuntu_desktop_installer.log", "UdiLog"),
+ ApportFile(
+ "/var/log/installer/subiquity-server-debug.log", "SubiquityServerDebug"
+ ),
+ ApportFile(
+ "/var/log/installer/subiquity-client-debug.log", "SubiquityClientDebug"
+ ),
+ ApportFile("/var/log/installer/curtin-install.log", "CurtinLog"),
+ # Legacy single curtin config < 22.1
+ ApportFile(
+ "/var/log/installer/subiquity-curtin-install.conf",
+ "CurtinInstallConfig",
+ ),
+ ApportFile(
+ "/var/log/installer/curtin-install/subiquity-initial.conf",
+ "CurtinConfigInitial",
+ ),
+ ApportFile(
+ "/var/log/installer/curtin-install/subiquity-curthooks.conf",
+ "CurtinConfigCurtHooks",
+ ),
+ ApportFile(
+ "/var/log/installer/curtin-install/subiquity-extract.conf",
+ "CurtinConfigExtract",
+ ),
+ ApportFile(
+ "/var/log/installer/curtin-install/subiquity-partitioning.conf",
+ "CurtinConfigPartitioning",
+ ),
+ # Legacy curtin < 22.1 curtin error tar path
+ ApportFile("/var/log/installer/curtin-error-logs.tar", "CurtinError"),
+ ApportFile("/var/log/installer/curtin-errors.tar", "CurtinError"),
+ ApportFile("/var/log/installer/block/probe-data.json", "ProbeData"),
+]
+
+
def _get_user_data_file() -> str:
paths = read_cfg_paths()
return paths.get_ipath_cur("userdata_raw")
+def _get_cloud_data_path() -> str:
+ paths = read_cfg_paths()
+ return paths.get_cpath("data")
+
+
def get_parser(parser=None):
"""Build or extend and arg parser for collect-logs utility.
@@ -80,7 +137,7 @@ def _copytree_rundir_ignore_files(curdir, files):
]
if os.getuid() != 0:
# Ignore root-permissioned files
- ignored_files.append(INSTANCE_JSON_SENSITIVE_FILE)
+ ignored_files.append(Paths({}).lookups["instance_data_sensitive"])
return ignored_files
@@ -110,6 +167,21 @@ def _collect_file(path, out_dir, verbosity):
_debug("file %s did not exist\n" % path, 2, verbosity)
+def collect_installer_logs(log_dir, include_userdata, verbosity):
+ """Obtain subiquity logs and config files."""
+ for src_file in INSTALLER_APPORT_FILES:
+ destination_dir = Path(log_dir + src_file.path).parent
+ if not destination_dir.exists():
+ ensure_dir(str(destination_dir))
+ _collect_file(src_file.path, str(destination_dir), verbosity)
+ if include_userdata:
+ for src_file in INSTALLER_APPORT_SENSITIVE_FILES:
+ destination_dir = Path(log_dir + src_file.path).parent
+ if not destination_dir.exists():
+ ensure_dir(str(destination_dir))
+ _collect_file(src_file.path, str(destination_dir), verbosity)
+
+
def collect_logs(tarfile, include_userdata: bool, verbosity=0):
"""Collect all cloud-init logs and tar them up into the provided tarfile.
@@ -123,8 +195,7 @@ def collect_logs(tarfile, include_userdata: bool, verbosity=0):
)
return 1
tarfile = os.path.abspath(tarfile)
- date = datetime.utcnow().date().strftime("%Y-%m-%d")
- log_dir = "cloud-init-logs-{0}".format(date)
+ log_dir = datetime.utcnow().date().strftime("cloud-init-logs-%Y-%m-%d")
with tempdir(dir="/tmp") as tmp_dir:
log_dir = os.path.join(tmp_dir, log_dir)
version = _write_command_output_to_file(
@@ -160,6 +231,8 @@ def collect_logs(tarfile, include_userdata: bool, verbosity=0):
if include_userdata:
user_data_file = _get_user_data_file()
_collect_file(user_data_file, log_dir, verbosity)
+ collect_installer_logs(log_dir, include_userdata, verbosity)
+
run_dir = os.path.join(log_dir, "run")
ensure_dir(run_dir)
if os.path.exists(CLOUDINIT_RUN_DIR):
@@ -179,6 +252,14 @@ def collect_logs(tarfile, include_userdata: bool, verbosity=0):
1,
verbosity,
)
+ if os.path.exists(os.path.join(CLOUDINIT_RUN_DIR, "disabled")):
+ # Fallback to grab previous cloud/data
+ cloud_data_dir = Path(_get_cloud_data_path())
+ if cloud_data_dir.exists():
+ shutil.copytree(
+ str(cloud_data_dir),
+ Path(log_dir + str(cloud_data_dir)),
+ )
with chdir(tmp_dir):
subp(["tar", "czvf", tarfile, log_dir.replace(tmp_dir + "/", "")])
sys.stderr.write("Wrote %s\n" % tarfile)
diff --git a/cloudinit/cmd/devel/make_mime.py b/cloudinit/cmd/devel/make_mime.py
index 421e88fd..f18bc459 100755
--- a/cloudinit/cmd/devel/make_mime.py
+++ b/cloudinit/cmd/devel/make_mime.py
@@ -10,10 +10,9 @@ from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from cloudinit import log
+from cloudinit.cmd.devel import addLogHandlerCLI
from cloudinit.handlers import INCLUSION_TYPES_MAP
-from . import addLogHandlerCLI
-
NAME = "make-mime"
LOG = log.getLogger(NAME)
EPILOG = (
diff --git a/cloudinit/cmd/devel/parser.py b/cloudinit/cmd/devel/parser.py
index 460b94b3..7ddb8fc7 100644
--- a/cloudinit/cmd/devel/parser.py
+++ b/cloudinit/cmd/devel/parser.py
@@ -6,7 +6,7 @@
import argparse
-from . import hotplug_hook, make_mime, net_convert, render
+from cloudinit.cmd.devel import hotplug_hook, make_mime, net_convert, render
def get_parser(parser=None):
diff --git a/cloudinit/cmd/devel/render.py b/cloudinit/cmd/devel/render.py
index 62b432d2..69fdb5f0 100755
--- a/cloudinit/cmd/devel/render.py
+++ b/cloudinit/cmd/devel/render.py
@@ -9,10 +9,12 @@ import os
import sys
from cloudinit import log
-from cloudinit.handlers.jinja_template import render_jinja_payload_from_file
-from cloudinit.sources import INSTANCE_JSON_FILE, INSTANCE_JSON_SENSITIVE_FILE
-
-from . import addLogHandlerCLI, read_cfg_paths
+from cloudinit.cmd.devel import addLogHandlerCLI, read_cfg_paths
+from cloudinit.handlers.jinja_template import (
+ JinjaLoadError,
+ NotJinjaError,
+ render_jinja_payload_from_file,
+)
NAME = "render"
@@ -51,7 +53,7 @@ def get_parser(parser=None):
return parser
-def handle_args(name, args):
+def render_template(user_data_path, instance_data_path=None, debug=False):
"""Render the provided user-data template file using instance-data values.
Also setup CLI log handlers to report to stderr since this is a development
@@ -59,17 +61,15 @@ def handle_args(name, args):
@return 0 on success, 1 on failure.
"""
- addLogHandlerCLI(LOG, log.DEBUG if args.debug else log.WARNING)
- if args.instance_data:
- instance_data_fn = args.instance_data
+ addLogHandlerCLI(LOG, log.DEBUG if debug else log.WARNING)
+ if instance_data_path:
+ instance_data_fn = instance_data_path
else:
paths = read_cfg_paths()
uid = os.getuid()
- redacted_data_fn = os.path.join(paths.run_dir, INSTANCE_JSON_FILE)
+ redacted_data_fn = paths.get_runpath("instance_data")
if uid == 0:
- instance_data_fn = os.path.join(
- paths.run_dir, INSTANCE_JSON_SENSITIVE_FILE
- )
+ instance_data_fn = paths.get_runpath("instance_data_sensitive")
if not os.path.exists(instance_data_fn):
LOG.warning(
"Missing root-readable %s. Using redacted %s instead.",
@@ -83,32 +83,33 @@ def handle_args(name, args):
LOG.error("Missing instance-data.json file: %s", instance_data_fn)
return 1
try:
- with open(args.user_data) as stream:
+ with open(user_data_path) as stream:
user_data = stream.read()
except IOError:
- LOG.error("Missing user-data file: %s", args.user_data)
+ LOG.error("Missing user-data file: %s", user_data_path)
return 1
try:
rendered_payload = render_jinja_payload_from_file(
payload=user_data,
- payload_fn=args.user_data,
+ payload_fn=user_data_path,
instance_data_file=instance_data_fn,
- debug=True if args.debug else False,
+ debug=True if debug else False,
+ )
+ except (JinjaLoadError, NotJinjaError) as e:
+ LOG.error(
+ "Cannot render from instance data due to exception: %s", repr(e)
)
- except RuntimeError as e:
- LOG.error("Cannot render from instance data: %s", str(e))
return 1
if not rendered_payload:
- LOG.error("Unable to render user-data file: %s", args.user_data)
+ LOG.error("Unable to render user-data file: %s", user_data_path)
return 1
sys.stdout.write(rendered_payload)
return 0
-def main():
- args = get_parser().parse_args()
- return handle_args(NAME, args)
+def handle_args(_name, args):
+ return render_template(args.user_data, args.instance_data, args.debug)
if __name__ == "__main__":
- sys.exit(main())
+ sys.exit(handle_args(NAME, get_parser().parse_args()))
diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py
index 6134d7c4..f28fda15 100755
--- a/cloudinit/cmd/main.py
+++ b/cloudinit/cmd/main.py
@@ -802,12 +802,10 @@ def status_wrapper(name, args, data_d=None, link_d=None):
return len(v1[mode]["errors"])
-def _maybe_persist_instance_data(init):
+def _maybe_persist_instance_data(init: stages.Init):
"""Write instance-data.json file if absent and datasource is restored."""
- if init.ds_restored:
- instance_data_file = os.path.join(
- init.paths.run_dir, sources.INSTANCE_JSON_FILE
- )
+ if init.datasource and init.ds_restored:
+ instance_data_file = init.paths.get_runpath("instance_data")
if not os.path.exists(instance_data_file):
init.datasource.persist_instance_data(write_cache=False)
@@ -919,13 +917,13 @@ def main(sysv_args=None):
"--name",
"-n",
action="store",
- help="module name to run",
+ help="Module name to run.",
required=True,
)
parser_single.add_argument(
"--frequency",
action="store",
- help="Set module frequency.",
+ help="Module frequency for this run.",
required=False,
choices=list(FREQ_SHORT_NAMES.keys()),
)
@@ -1092,8 +1090,8 @@ def main(sysv_args=None):
func=functor,
args=(name, args),
)
- reporting.flush_events()
- return retval
+ reporting.flush_events()
+ return retval
if __name__ == "__main__":
diff --git a/cloudinit/cmd/query.py b/cloudinit/cmd/query.py
index 2dcd8e44..9add6a71 100755
--- a/cloudinit/cmd/query.py
+++ b/cloudinit/cmd/query.py
@@ -27,11 +27,7 @@ from cloudinit.handlers.jinja_template import (
get_jinja_variable_alias,
render_jinja_payload,
)
-from cloudinit.sources import (
- INSTANCE_JSON_FILE,
- INSTANCE_JSON_SENSITIVE_FILE,
- REDACT_SENSITIVE_VALUE,
-)
+from cloudinit.sources import REDACT_SENSITIVE_VALUE
NAME = "query"
LOG = log.getLogger(NAME)
@@ -59,8 +55,10 @@ def get_parser(parser=None):
"-i",
"--instance-data",
type=str,
- help="Path to instance-data.json file. Default is /run/cloud-init/%s"
- % INSTANCE_JSON_FILE,
+ help=(
+ "Path to instance-data.json file. Default is "
+ f"{read_cfg_paths().get_runpath('instance_data')}"
+ ),
)
parser.add_argument(
"-l",
@@ -151,16 +149,13 @@ def _read_instance_data(instance_data, user_data, vendor_data) -> dict:
access perms.
"""
uid = os.getuid()
- if not all([instance_data, user_data, vendor_data]):
- paths = read_cfg_paths()
+ paths = read_cfg_paths()
if instance_data:
instance_data_fn = instance_data
else:
- redacted_data_fn = os.path.join(paths.run_dir, INSTANCE_JSON_FILE)
+ redacted_data_fn = paths.get_runpath("instance_data")
if uid == 0:
- sensitive_data_fn = os.path.join(
- paths.run_dir, INSTANCE_JSON_SENSITIVE_FILE
- )
+ sensitive_data_fn = paths.get_runpath("instance_data_sensitive")
if os.path.exists(sensitive_data_fn):
instance_data_fn = sensitive_data_fn
else:
diff --git a/cloudinit/cmd/status.py b/cloudinit/cmd/status.py
index 1c7c209b..df136288 100755..100644
--- a/cloudinit/cmd/status.py
+++ b/cloudinit/cmd/status.py
@@ -7,12 +7,15 @@
"""Define 'status' utility and handler as part of cloud-init commandline."""
import argparse
+import copy
import enum
+import json
import os
import sys
from time import gmtime, sleep, strftime
-from typing import Tuple
+from typing import Any, Dict, List, NamedTuple, Tuple, Union
+from cloudinit import safeyaml
from cloudinit.cmd.devel import read_cfg_paths
from cloudinit.distros import uses_systemd
from cloudinit.util import get_cmdline, load_file, load_json
@@ -32,6 +35,43 @@ class UXAppStatus(enum.Enum):
DISABLED = "disabled"
+@enum.unique
+class UXAppBootStatusCode(enum.Enum):
+ """Enum representing user-visible cloud-init boot status codes."""
+
+ DISABLED_BY_GENERATOR = "disabled-by-generator"
+ DISABLED_BY_KERNEL_CMDLINE = "disabled-by-kernel-cmdline"
+ DISABLED_BY_MARKER_FILE = "disabled-by-marker-file"
+ ENABLED_BY_GENERATOR = "enabled-by-generator"
+ ENABLED_BY_KERNEL_CMDLINE = "enabled-by-kernel-cmdline"
+ ENABLED_BY_SYSVINIT = "enabled-by-sysvinit"
+ UNKNOWN = "unknown"
+
+
+DISABLED_BOOT_CODES = frozenset(
+ [
+ UXAppBootStatusCode.DISABLED_BY_GENERATOR,
+ UXAppBootStatusCode.DISABLED_BY_KERNEL_CMDLINE,
+ UXAppBootStatusCode.DISABLED_BY_MARKER_FILE,
+ ]
+)
+
+
+class StatusDetails(NamedTuple):
+ status: UXAppStatus
+ boot_status_code: UXAppBootStatusCode
+ description: str
+ errors: List[str]
+ last_update: str
+ datasource: str
+
+
+TABULAR_LONG_TMPL = """\
+boot_status_code: {boot_code}
+{last_update}detail:
+{description}"""
+
+
def get_parser(parser=None):
"""Build or extend an arg parser for status utility.
@@ -46,6 +86,13 @@ def get_parser(parser=None):
prog="status", description="Report run status of cloud init"
)
parser.add_argument(
+ "--format",
+ type=str,
+ choices=["json", "tabular", "yaml"],
+ default="tabular",
+ help="Specify output format for cloud-id (default: tabular)",
+ )
+ parser.add_argument(
"-l",
"--long",
action="store_true",
@@ -69,54 +116,87 @@ def handle_status_args(name, args) -> int:
"""Handle calls to 'cloud-init status' as a subcommand."""
# Read configured paths
paths = read_cfg_paths()
- status, status_detail, time = get_status_details(paths)
+ details = get_status_details(paths)
if args.wait:
- while status in (UXAppStatus.NOT_RUN, UXAppStatus.RUNNING):
- sys.stdout.write(".")
- sys.stdout.flush()
- status, status_detail, time = get_status_details(paths)
+ while details.status in (UXAppStatus.NOT_RUN, UXAppStatus.RUNNING):
+ if args.format == "tabular":
+ sys.stdout.write(".")
+ sys.stdout.flush()
+ details = get_status_details(paths)
sleep(0.25)
- sys.stdout.write("\n")
- print("status: {0}".format(status.value))
- if args.long:
- if time:
- print("time: {0}".format(time))
- print("detail:\n{0}".format(status_detail))
- return 1 if status == UXAppStatus.ERROR else 0
+ details_dict: Dict[str, Union[str, List[str], Dict[str, Any]]] = {
+ "datasource": details.datasource,
+ "boot_status_code": details.boot_status_code.value,
+ "status": details.status.value,
+ "detail": details.description,
+ "errors": details.errors,
+ "last_update": details.last_update,
+ }
+ details_dict["schemas"] = {"1": copy.deepcopy(details_dict)}
+ details_dict["_schema_version"] = "1"
+ if args.format == "tabular":
+ prefix = "\n" if args.wait else ""
+ print(f"{prefix}status: {details.status.value}")
+ if args.long:
+ if details.last_update:
+ last_update = f"last_update: {details.last_update}\n"
+ else:
+ last_update = ""
+ print(
+ TABULAR_LONG_TMPL.format(
+ prefix=prefix,
+ boot_code=details.boot_status_code.value,
+ description=details.description,
+ last_update=last_update,
+ )
+ )
+ elif args.format == "json":
+ print(
+ json.dumps( # Pretty, sorted json
+ details_dict, indent=2, sort_keys=True, separators=(",", ": ")
+ )
+ )
+ elif args.format == "yaml":
+ print(safeyaml.dumps(details_dict))
+ return 1 if details.status == UXAppStatus.ERROR else 0
-def _is_cloudinit_disabled(disable_file, paths):
- """Report whether cloud-init is disabled.
+
+def get_bootstatus(disable_file, paths) -> Tuple[UXAppBootStatusCode, str]:
+ """Report whether cloud-init current boot status
@param disable_file: The path to the cloud-init disable file.
@param paths: An initialized cloudinit.helpers.Paths object.
- @returns: A tuple containing (bool, reason) about cloud-init's status and
+ @returns: A tuple containing (code, reason) about cloud-init's status and
why.
"""
- is_disabled = False
cmdline_parts = get_cmdline().split()
if not uses_systemd():
+ bootstatus_code = UXAppBootStatusCode.ENABLED_BY_SYSVINIT
reason = "Cloud-init enabled on sysvinit"
elif "cloud-init=enabled" in cmdline_parts:
+ bootstatus_code = UXAppBootStatusCode.ENABLED_BY_KERNEL_CMDLINE
reason = "Cloud-init enabled by kernel command line cloud-init=enabled"
elif os.path.exists(disable_file):
- is_disabled = True
+ bootstatus_code = UXAppBootStatusCode.DISABLED_BY_MARKER_FILE
reason = "Cloud-init disabled by {0}".format(disable_file)
elif "cloud-init=disabled" in cmdline_parts:
- is_disabled = True
+ bootstatus_code = UXAppBootStatusCode.DISABLED_BY_KERNEL_CMDLINE
reason = "Cloud-init disabled by kernel parameter cloud-init=disabled"
elif os.path.exists(os.path.join(paths.run_dir, "disabled")):
- is_disabled = True
+ bootstatus_code = UXAppBootStatusCode.DISABLED_BY_GENERATOR
reason = "Cloud-init disabled by cloud-init-generator"
elif os.path.exists(os.path.join(paths.run_dir, "enabled")):
+ bootstatus_code = UXAppBootStatusCode.ENABLED_BY_GENERATOR
reason = "Cloud-init enabled by systemd cloud-init-generator"
else:
+ bootstatus_code = UXAppBootStatusCode.UNKNOWN
reason = "Systemd generator may not have run yet."
- return (is_disabled, reason)
+ return (bootstatus_code, reason)
-def get_status_details(paths=None) -> Tuple[UXAppStatus, str, str]:
- """Return a 3-tuple of status, status_details and time of last event.
+def get_status_details(paths=None) -> StatusDetails:
+ """Return a dict with status, details and errors.
@param paths: An initialized cloudinit.helpers.paths object.
@@ -125,31 +205,32 @@ def get_status_details(paths=None) -> Tuple[UXAppStatus, str, str]:
paths = paths or read_cfg_paths()
status = UXAppStatus.NOT_RUN
- status_detail = ""
+ errors = []
+ datasource = ""
status_v1 = {}
status_file = os.path.join(paths.run_dir, "status.json")
result_file = os.path.join(paths.run_dir, "result.json")
- (is_disabled, reason) = _is_cloudinit_disabled(
+ boot_status_code, description = get_bootstatus(
CLOUDINIT_DISABLED_FILE, paths
)
- if is_disabled:
+ if boot_status_code in DISABLED_BOOT_CODES:
status = UXAppStatus.DISABLED
- status_detail = reason
if os.path.exists(status_file):
if not os.path.exists(result_file):
status = UXAppStatus.RUNNING
status_v1 = load_json(load_file(status_file)).get("v1", {})
- errors = []
latest_event = 0
for key, value in sorted(status_v1.items()):
if key == "stage":
if value:
status = UXAppStatus.RUNNING
- status_detail = "Running in stage: {0}".format(value)
+ description = "Running in stage: {0}".format(value)
elif key == "datasource":
- status_detail = value
+ description = value
+ datasource, _, _ = value.partition(" ")
+ datasource = datasource.lower().replace("datasource", "")
elif isinstance(value, dict):
errors.extend(value.get("errors", []))
start = value.get("start") or 0
@@ -161,14 +242,17 @@ def get_status_details(paths=None) -> Tuple[UXAppStatus, str, str]:
latest_event = event_time
if errors:
status = UXAppStatus.ERROR
- status_detail = "\n".join(errors)
+ description = "\n".join(errors)
elif status == UXAppStatus.NOT_RUN and latest_event > 0:
status = UXAppStatus.DONE
- if latest_event:
- time = strftime("%a, %d %b %Y %H:%M:%S %z", gmtime(latest_event))
- else:
- time = ""
- return status, status_detail, time
+ last_update = (
+ strftime("%a, %d %b %Y %H:%M:%S %z", gmtime(latest_event))
+ if latest_event
+ else ""
+ )
+ return StatusDetails(
+ status, boot_status_code, description, errors, last_update, datasource
+ )
def main():
diff --git a/cloudinit/config/__init__.py b/cloudinit/config/__init__.py
index e69de29b..e5670257 100644
--- a/cloudinit/config/__init__.py
+++ b/cloudinit/config/__init__.py
@@ -0,0 +1 @@
+Config = dict
diff --git a/cloudinit/config/cc_ansible.py b/cloudinit/config/cc_ansible.py
index 92309272..d8fee517 100644
--- a/cloudinit/config/cc_ansible.py
+++ b/cloudinit/config/cc_ansible.py
@@ -1,16 +1,17 @@
"""ansible enables running on first boot either ansible-pull"""
import abc
-import logging
import os
import re
import sys
from copy import deepcopy
+from logging import Logger, getLogger
from textwrap import dedent
from typing import Optional
from cloudinit.cloud import Cloud
+from cloudinit.config import Config
from cloudinit.config.schema import MetaSchema, get_meta_doc
-from cloudinit.distros import ALL_DISTROS
+from cloudinit.distros import ALL_DISTROS, Distro
from cloudinit.settings import PER_INSTANCE
from cloudinit.subp import subp, which
from cloudinit.util import Version, get_cfg_by_path
@@ -37,39 +38,45 @@ meta: MetaSchema = {
"examples": [
dedent(
"""\
- #cloud-config
ansible:
- install-method: distro
+ install_method: distro
pull:
url: "https://github.com/holmanb/vmboot.git"
- playbook-name: ubuntu.yml
+ playbook_name: ubuntu.yml
"""
),
dedent(
"""\
- #cloud-config
ansible:
- package-name: ansible-core
- install-method: pip
+ package_name: ansible-core
+ install_method: pip
pull:
url: "https://github.com/holmanb/vmboot.git"
- playbook-name: ubuntu.yml
+ playbook_name: ubuntu.yml
"""
),
],
}
__doc__ = get_meta_doc(meta)
-LOG = logging.getLogger(__name__)
+LOG = getLogger(__name__)
+CFG_OVERRIDE = "ansible_config"
class AnsiblePull(abc.ABC):
- cmd_version: list = []
- cmd_pull: list = []
- env: dict = os.environ.copy()
+ def __init__(self, distro: Distro):
+ self.cmd_pull = ["ansible-pull"]
+ self.cmd_version = ["ansible-pull", "--version"]
+ self.distro = distro
+ self.env = os.environ
+ self.run_user: Optional[str] = None
+
+ # some ansible modules directly reference os.environ["HOME"]
+ # and cloud-init might not have that set, default: /root
+ self.env["HOME"] = self.env.get("HOME", "/root")
def get_version(self) -> Optional[Version]:
- stdout, _ = subp(self.cmd_version, env=self.env)
+ stdout, _ = self.do_as(self.cmd_version)
first_line = stdout.splitlines().pop(0)
matches = re.search(r"([\d\.]+)", first_line)
if matches:
@@ -78,13 +85,21 @@ class AnsiblePull(abc.ABC):
return None
def pull(self, *args) -> str:
- stdout, _ = subp([*self.cmd_pull, *args], env=self.env)
+ stdout, _ = self.do_as([*self.cmd_pull, *args])
return stdout
def check_deps(self):
if not self.is_installed():
raise ValueError("command: ansible is not installed")
+ def do_as(self, command: list, **kwargs):
+ if not self.run_user:
+ return self.subp(command, **kwargs)
+ return self.distro.do_as(command, self.run_user, **kwargs)
+
+ def subp(self, command, **kwargs):
+ return subp(command, env=self.env, **kwargs)
+
@abc.abstractmethod
def is_installed(self):
pass
@@ -95,29 +110,43 @@ class AnsiblePull(abc.ABC):
class AnsiblePullPip(AnsiblePull):
- def __init__(self):
- self.cmd_pull = ["ansible-pull"]
- self.cmd_version = ["ansible-pull", "--version"]
- self.env["PATH"] = ":".join([self.env["PATH"], "/root/.local/bin/"])
+ def __init__(self, distro: Distro, user: Optional[str]):
+ super().__init__(distro)
+ self.run_user = user
+
+ # Add pip install site to PATH
+ user_base, _ = self.do_as(
+ [sys.executable, "-c", "'import site; print(site.getuserbase())'"]
+ )
+ ansible_path = f"{user_base}/bin/"
+ old_path = self.env.get("PATH")
+ if old_path:
+ self.env["PATH"] = ":".join([old_path, ansible_path])
+ else:
+ self.env["PATH"] = ansible_path
def install(self, pkg_name: str):
"""should cloud-init grow an interface for non-distro package
managers? this seems reusable
"""
if not self.is_installed():
- subp(["python3", "-m", "pip", "install", "--user", pkg_name])
+ # bootstrap pip if required
+ try:
+ import pip # type: ignore # noqa: F401
+ except ImportError:
+ self.distro.install_packages(self.distro.pip_package_name)
+ cmd = [sys.executable, "-m", "pip", "install"]
+ if self.run_user:
+ cmd.append("--user")
+ self.do_as([*cmd, "--upgrade", "pip"])
+ self.do_as([*cmd, pkg_name])
def is_installed(self) -> bool:
- stdout, _ = subp(["python3", "-m", "pip", "list"])
+ stdout, _ = self.do_as([sys.executable, "-m", "pip", "list"])
return "ansible" in stdout
class AnsiblePullDistro(AnsiblePull):
- def __init__(self, distro):
- self.cmd_pull = ["ansible-pull"]
- self.cmd_version = ["ansible-pull", "--version"]
- self.distro = distro
-
def install(self, pkg_name: str):
if not self.is_installed():
self.distro.install_packages(pkg_name)
@@ -126,46 +155,84 @@ class AnsiblePullDistro(AnsiblePull):
return bool(which("ansible"))
-def handle(name: str, cfg: dict, cloud: Cloud, _, __):
+def handle(
+ name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
+) -> None:
+
ansible_cfg: dict = cfg.get("ansible", {})
+ ansible_user = ansible_cfg.get("run_user")
+ install_method = ansible_cfg.get("install_method")
+ setup_controller = ansible_cfg.get("setup_controller")
+
+ galaxy_cfg = ansible_cfg.get("galaxy")
+ pull_cfg = ansible_cfg.get("pull")
+ package_name = ansible_cfg.get("package_name", "")
+
if ansible_cfg:
+ ansible: AnsiblePull
validate_config(ansible_cfg)
- install = ansible_cfg["install-method"]
- pull_cfg = ansible_cfg.get("pull")
+
+ distro: Distro = cloud.distro
+ if install_method == "pip":
+ ansible = AnsiblePullPip(distro, ansible_user)
+ else:
+ ansible = AnsiblePullDistro(distro)
+ ansible.install(package_name)
+ ansible.check_deps()
+ ansible_config = ansible_cfg.get("ansible_config", "")
+
+ if ansible_config:
+ ansible.env[CFG_OVERRIDE] = ansible_config
+
+ if galaxy_cfg:
+ ansible_galaxy(galaxy_cfg, ansible)
+
if pull_cfg:
- ansible: AnsiblePull
- if install == "pip":
- ansible = AnsiblePullPip()
- else:
- ansible = AnsiblePullDistro(cloud.distro)
- ansible.install(ansible_cfg["package-name"])
- ansible.check_deps()
run_ansible_pull(ansible, deepcopy(pull_cfg))
+ if setup_controller:
+ ansible_controller(setup_controller, ansible)
+
def validate_config(cfg: dict):
- required_keys = {
- "install-method",
- "package-name",
- "pull/url",
- "pull/playbook-name",
- }
+ required_keys = (
+ "install_method",
+ "package_name",
+ )
for key in required_keys:
if not get_cfg_by_path(cfg, key):
- raise ValueError(f"Invalid value config key: '{key}'")
+ raise ValueError(f"Missing required key '{key}' from {cfg}")
+ if cfg.get("pull"):
+ for key in "pull/url", "pull/playbook_name":
+ if not get_cfg_by_path(cfg, key):
+ raise ValueError(f"Missing required key '{key}' from {cfg}")
- install = cfg["install-method"]
+ controller_cfg = cfg.get("setup_controller")
+ if controller_cfg:
+ if not any(
+ [
+ controller_cfg.get("repositories"),
+ controller_cfg.get("run_ansible"),
+ ]
+ ):
+ raise ValueError(f"Missing required key from {controller_cfg}")
+
+ install = cfg["install_method"]
if install not in ("pip", "distro"):
raise ValueError("Invalid install method {install}")
def filter_args(cfg: dict) -> dict:
"""remove boolean false values"""
- return {key: value for (key, value) in cfg.items() if value is not False}
+ return {
+ key.replace("_", "-"): value
+ for (key, value) in cfg.items()
+ if value is not False
+ }
def run_ansible_pull(pull: AnsiblePull, cfg: dict):
- playbook_name: str = cfg.pop("playbook-name")
+ playbook_name: str = cfg.pop("playbook_name")
v = pull.get_version()
if not v:
@@ -186,3 +253,28 @@ def run_ansible_pull(pull: AnsiblePull, cfg: dict):
)
if stdout:
sys.stdout.write(f"{stdout}")
+
+
+def ansible_galaxy(cfg: dict, ansible: AnsiblePull):
+ actions = cfg.get("actions", [])
+
+ if not actions:
+ LOG.warning("Invalid config: %s", cfg)
+ for command in actions:
+ ansible.do_as(command)
+
+
+def ansible_controller(cfg: dict, ansible: AnsiblePull):
+ for repository in cfg.get("repositories", []):
+ ansible.do_as(
+ ["git", "clone", repository["source"], repository["path"]]
+ )
+ for args in cfg.get("run_ansible", []):
+ playbook_dir = args.pop("playbook_dir")
+ playbook_name = args.pop("playbook_name")
+ command = [
+ "ansible-playbook",
+ playbook_name,
+ *[f"--{key}={value}" for key, value in filter_args(args).items()],
+ ]
+ ansible.do_as(command, cwd=playbook_dir)
diff --git a/cloudinit/config/cc_apk_configure.py b/cloudinit/config/cc_apk_configure.py
index 0fd7d229..07a7fa85 100644
--- a/cloudinit/config/cc_apk_configure.py
+++ b/cloudinit/config/cc_apk_configure.py
@@ -6,10 +6,13 @@
"""Apk Configure: Configures apk repositories file."""
+from logging import Logger
from textwrap import dedent
from cloudinit import log as logging
from cloudinit import temp_utils, templater, util
+from cloudinit.cloud import Cloud
+from cloudinit.config import Config
from cloudinit.config.schema import MetaSchema, get_meta_doc
from cloudinit.settings import PER_INSTANCE
@@ -106,7 +109,9 @@ meta: MetaSchema = {
__doc__ = get_meta_doc(meta)
-def handle(name, cfg, cloud, log, _args):
+def handle(
+ name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
+) -> None:
"""
Call to handle apk_repos sections in cloud-config file.
diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py
index 9d39c918..98957f8d 100644
--- a/cloudinit/config/cc_apt_configure.py
+++ b/cloudinit/config/cc_apt_configure.py
@@ -12,11 +12,14 @@ import glob
import os
import pathlib
import re
+from logging import Logger
from textwrap import dedent
from cloudinit import gpg
from cloudinit import log as logging
from cloudinit import subp, templater, util
+from cloudinit.cloud import Cloud
+from cloudinit.config import Config
from cloudinit.config.schema import MetaSchema, get_meta_doc
from cloudinit.settings import PER_INSTANCE
@@ -114,6 +117,13 @@ meta: MetaSchema = {
key: |
------BEGIN PGP PUBLIC KEY BLOCK-------
<key data>
+ ------END PGP PUBLIC KEY BLOCK-------
+ source4:
+ source: 'deb $MIRROR $RELEASE multiverse'
+ append: false
+ key: |
+ ------BEGIN PGP PUBLIC KEY BLOCK-------
+ <key data>
------END PGP PUBLIC KEY BLOCK-------"""
)
],
@@ -160,7 +170,9 @@ def get_default_mirrors(arch=None, target=None):
raise ValueError("No default mirror known for arch %s" % arch)
-def handle(name, ocfg, cloud, log, _):
+def handle(
+ name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
+) -> None:
"""process the config for apt_config. This can be called from
curthooks if a global apt config was provided or via the "apt"
standalone command."""
@@ -170,18 +182,18 @@ def handle(name, ocfg, cloud, log, _):
global LOG
LOG = log
# feed back converted config, but only work on the subset under 'apt'
- ocfg = convert_to_v3_apt_format(ocfg)
- cfg = ocfg.get("apt", {})
+ cfg = convert_to_v3_apt_format(cfg)
+ apt_cfg = cfg.get("apt", {})
- if not isinstance(cfg, dict):
+ if not isinstance(apt_cfg, dict):
raise ValueError(
"Expected dictionary for 'apt' config, found {config_type}".format(
- config_type=type(cfg)
+ config_type=type(apt_cfg)
)
)
- apply_debconf_selections(cfg, target)
- apply_apt(cfg, cloud, target)
+ apply_debconf_selections(apt_cfg, target)
+ apply_apt(apt_cfg, cloud, target)
def _should_configure_on_empty_apt():
@@ -584,7 +596,12 @@ def add_apt_sources(
sourcefn = subp.target_path(target, ent["filename"])
try:
contents = "%s\n" % (source)
- util.write_file(sourcefn, contents, omode="a")
+ omode = "a"
+
+ if "append" in ent and not ent["append"]:
+ omode = "w"
+
+ util.write_file(sourcefn, contents, omode=omode)
except IOError as detail:
LOG.exception("failed write to file %s: %s", sourcefn, detail)
raise
diff --git a/cloudinit/config/cc_apt_pipelining.py b/cloudinit/config/cc_apt_pipelining.py
index 82a8e6e0..6a9ace9c 100644
--- a/cloudinit/config/cc_apt_pipelining.py
+++ b/cloudinit/config/cc_apt_pipelining.py
@@ -6,9 +6,12 @@
"""Apt Pipelining: configure apt pipelining."""
+from logging import Logger
from textwrap import dedent
from cloudinit import util
+from cloudinit.cloud import Cloud
+from cloudinit.config import Config
from cloudinit.config.schema import MetaSchema, get_meta_doc
from cloudinit.settings import PER_INSTANCE
@@ -56,7 +59,9 @@ meta: MetaSchema = {
__doc__ = get_meta_doc(meta)
-def handle(_name, cfg, _cloud, log, _args):
+def handle(
+ name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
+) -> None:
apt_pipe_value = cfg.get("apt_pipelining", "os")
apt_pipe_value_s = str(apt_pipe_value).lower().strip()
diff --git a/cloudinit/config/cc_bootcmd.py b/cloudinit/config/cc_bootcmd.py
index 4ee79859..48cd21cc 100644
--- a/cloudinit/config/cc_bootcmd.py
+++ b/cloudinit/config/cc_bootcmd.py
@@ -10,9 +10,12 @@
"""Bootcmd: run arbitrary commands early in the boot process."""
import os
+from logging import Logger
from textwrap import dedent
from cloudinit import subp, temp_utils, util
+from cloudinit.cloud import Cloud
+from cloudinit.config import Config
from cloudinit.config.schema import MetaSchema, get_meta_doc
from cloudinit.settings import PER_ALWAYS
@@ -60,7 +63,9 @@ meta: MetaSchema = {
__doc__ = get_meta_doc(meta)
-def handle(name, cfg, cloud, log, _args):
+def handle(
+ name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
+) -> None:
if "bootcmd" not in cfg:
log.debug(
diff --git a/cloudinit/config/cc_byobu.py b/cloudinit/config/cc_byobu.py
index e48fce34..681936b4 100644
--- a/cloudinit/config/cc_byobu.py
+++ b/cloudinit/config/cc_byobu.py
@@ -8,7 +8,11 @@
"""Byobu: Enable/disable byobu system wide and for default user."""
+from logging import Logger
+
from cloudinit import subp, util
+from cloudinit.cloud import Cloud
+from cloudinit.config import Config
from cloudinit.config.schema import MetaSchema, get_meta_doc
from cloudinit.distros import ug_util
from cloudinit.settings import PER_INSTANCE
@@ -49,7 +53,9 @@ meta: MetaSchema = {
__doc__ = get_meta_doc(meta)
-def handle(name, cfg, cloud, log, args):
+def handle(
+ name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
+) -> None:
if len(args) != 0:
value = args[0]
else:
diff --git a/cloudinit/config/cc_ca_certs.py b/cloudinit/config/cc_ca_certs.py
index 6c9c7ab4..302a67a4 100644
--- a/cloudinit/config/cc_ca_certs.py
+++ b/cloudinit/config/cc_ca_certs.py
@@ -5,9 +5,12 @@
"""CA Certs: Add ca certificates."""
import os
+from logging import Logger
from textwrap import dedent
from cloudinit import subp, util
+from cloudinit.cloud import Cloud
+from cloudinit.config import Config
from cloudinit.config.schema import MetaSchema, get_meta_doc
from cloudinit.settings import PER_INSTANCE
@@ -160,7 +163,9 @@ def remove_default_ca_certs(distro_name, distro_cfg):
subp.subp(("debconf-set-selections", "-"), debconf_sel)
-def handle(name, cfg, cloud, log, _args):
+def handle(
+ name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
+) -> None:
"""
Call to handle ca-cert sections in cloud-config file.
diff --git a/cloudinit/config/cc_chef.py b/cloudinit/config/cc_chef.py
index 5ab2b401..5a809230 100644
--- a/cloudinit/config/cc_chef.py
+++ b/cloudinit/config/cc_chef.py
@@ -11,10 +11,14 @@
import itertools
import json
import os
+from logging import Logger
from textwrap import dedent
from cloudinit import subp, temp_utils, templater, url_helper, util
+from cloudinit.cloud import Cloud
+from cloudinit.config import Config
from cloudinit.config.schema import MetaSchema, get_meta_doc
+from cloudinit.distros import Distro
from cloudinit.settings import PER_ALWAYS
RUBY_VERSION_DEFAULT = "1.8"
@@ -185,7 +189,9 @@ def get_template_params(iid, chef_cfg, log):
return params
-def handle(name, cfg, cloud, log, _args):
+def handle(
+ name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
+) -> None:
"""Handler method activated by cloud-init."""
# If there isn't a chef key in the configuration don't do anything
@@ -289,32 +295,28 @@ def run_chef(chef_cfg, log):
subp.subp(cmd, capture=False)
-def subp_blob_in_tempfile(blob, *args, **kwargs):
+def subp_blob_in_tempfile(blob, distro: Distro, args: list, **kwargs):
"""Write blob to a tempfile, and call subp with args, kwargs. Then cleanup.
'basename' as a kwarg allows providing the basename for the file.
The 'args' argument to subp will be updated with the full path to the
filename as the first argument.
"""
+ args = args.copy()
basename = kwargs.pop("basename", "subp_blob")
-
- if len(args) == 0 and "args" not in kwargs:
- args = [tuple()]
-
# Use tmpdir over tmpfile to avoid 'text file busy' on execute
- with temp_utils.tempdir(needs_exe=True) as tmpd:
+ with temp_utils.tempdir(
+ dir=distro.get_tmp_exec_path(), needs_exe=True
+ ) as tmpd:
tmpf = os.path.join(tmpd, basename)
- if "args" in kwargs:
- kwargs["args"] = [tmpf] + list(kwargs["args"])
- else:
- args = list(args)
- args[0] = [tmpf] + args[0]
-
+ args.insert(0, tmpf)
util.write_file(tmpf, blob, mode=0o700)
- return subp.subp(*args, **kwargs)
+ return subp.subp(args=args, **kwargs)
-def install_chef_from_omnibus(url=None, retries=None, omnibus_version=None):
+def install_chef_from_omnibus(
+ distro: Distro, url=None, retries=None, omnibus_version=None
+):
"""Install an omnibus unified package from url.
@param url: URL where blob of chef content may be downloaded. Defaults to
@@ -335,11 +337,15 @@ def install_chef_from_omnibus(url=None, retries=None, omnibus_version=None):
args = ["-v", omnibus_version]
content = url_helper.readurl(url=url, retries=retries).contents
return subp_blob_in_tempfile(
- blob=content, args=args, basename="chef-omnibus-install", capture=False
+ distro=distro,
+ blob=content,
+ args=args,
+ basename="chef-omnibus-install",
+ capture=False,
)
-def install_chef(cloud, chef_cfg, log):
+def install_chef(cloud: Cloud, chef_cfg, log):
# If chef is not installed, we install chef based on 'install_type'
install_type = util.get_cfg_option_str(
chef_cfg, "install_type", "packages"
@@ -361,6 +367,7 @@ def install_chef(cloud, chef_cfg, log):
elif install_type == "omnibus":
omnibus_version = util.get_cfg_option_str(chef_cfg, "omnibus_version")
install_chef_from_omnibus(
+ distro=cloud.distro,
url=util.get_cfg_option_str(chef_cfg, "omnibus_url"),
retries=util.get_cfg_option_int(chef_cfg, "omnibus_url_retries"),
omnibus_version=omnibus_version,
diff --git a/cloudinit/config/cc_disable_ec2_metadata.py b/cloudinit/config/cc_disable_ec2_metadata.py
index a7832e25..7439b89b 100644
--- a/cloudinit/config/cc_disable_ec2_metadata.py
+++ b/cloudinit/config/cc_disable_ec2_metadata.py
@@ -8,9 +8,12 @@
"""Disable EC2 Metadata: Disable AWS EC2 metadata."""
+from logging import Logger
from textwrap import dedent
from cloudinit import subp, util
+from cloudinit.cloud import Cloud
+from cloudinit.config import Config
from cloudinit.config.schema import MetaSchema, get_meta_doc
from cloudinit.distros import ALL_DISTROS
from cloudinit.settings import PER_ALWAYS
@@ -37,7 +40,9 @@ meta: MetaSchema = {
__doc__ = get_meta_doc(meta)
-def handle(name, cfg, _cloud, log, _args):
+def handle(
+ name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
+) -> None:
disabled = util.get_cfg_option_bool(cfg, "disable_ec2_metadata", False)
if disabled:
reject_cmd = None
diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py
index 182e9401..71d52d3d 100644
--- a/cloudinit/config/cc_disk_setup.py
+++ b/cloudinit/config/cc_disk_setup.py
@@ -10,9 +10,12 @@
import logging
import os
import shlex
+from logging import Logger
from textwrap import dedent
from cloudinit import subp, util
+from cloudinit.cloud import Cloud
+from cloudinit.config import Config
from cloudinit.config.schema import MetaSchema, get_meta_doc
from cloudinit.distros import ALL_DISTROS
from cloudinit.settings import PER_INSTANCE
@@ -36,6 +39,11 @@ This module is able to configure simple partition tables and filesystems.
for more detail about configuration options for disk setup, see the disk
setup example
+.. note::
+ if a swap partition is being created via ``disk_setup`` then a ``fs_entry``
+ entry is also needed in order for mkswap to be run, otherwise when swap
+ activation is later attempted it will fail.
+
For convenience, aliases can be specified for disks using the
``device_aliases`` config key, which takes a dictionary of alias: path
mappings. There are automatic aliases for ``swap`` and ``ephemeral<X>``, where
@@ -62,11 +70,16 @@ meta: MetaSchema = {
"""\
device_aliases:
my_alias: /dev/sdb
+ swap_disk: /dev/sdc
disk_setup:
my_alias:
table_type: gpt
layout: [50, 50]
overwrite: true
+ swap_disk:
+ table_type: gpt
+ layout: [[100, 82]]
+ overwrite: true
fs_setup:
- label: fs1
filesystem: ext4
@@ -75,9 +88,13 @@ meta: MetaSchema = {
- label: fs2
device: my_alias.2
filesystem: ext4
+ - label: swap
+ device: swap_disk.1
+ filesystem: swap
mounts:
- ["my_alias.1", "/mnt1"]
- ["my_alias.2", "/mnt2"]
+ - ["swap_disk.1", "none", "swap", "sw", "0", "0"]
"""
)
],
@@ -87,7 +104,9 @@ meta: MetaSchema = {
__doc__ = get_meta_doc(meta)
-def handle(_name, cfg, cloud, log, _args):
+def handle(
+ name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
+) -> None:
"""
See doc/examples/cloud-config-disk-setup.txt for documentation on the
format.
@@ -1027,6 +1046,7 @@ def mkfs(fs_cfg):
# Find the mkfs command
mkfs_cmd = subp.which("mkfs.%s" % fs_type)
if not mkfs_cmd:
+ # for "mkswap"
mkfs_cmd = subp.which("mk%s" % fs_type)
if not mkfs_cmd:
@@ -1037,7 +1057,7 @@ def mkfs(fs_cfg):
)
return
- fs_cmd = [mkfs_cmd, device]
+ fs_cmd = [mkfs_cmd]
if label:
fs_cmd.extend(["-L", label])
@@ -1052,6 +1072,8 @@ def mkfs(fs_cfg):
if fs_opts:
fs_cmd.extend(fs_opts)
+ fs_cmd.append(device)
+
LOG.debug("Creating file system %s on %s", label, device)
LOG.debug(" Using cmd: %s", str(fs_cmd))
try:
diff --git a/cloudinit/config/cc_fan.py b/cloudinit/config/cc_fan.py
index 094baa09..ae211f31 100644
--- a/cloudinit/config/cc_fan.py
+++ b/cloudinit/config/cc_fan.py
@@ -5,10 +5,13 @@
# This file is part of cloud-init. See LICENSE file for license information.
"""Fan: Configure ubuntu fan networking"""
+from logging import Logger
from textwrap import dedent
from cloudinit import log as logging
from cloudinit import subp, util
+from cloudinit.cloud import Cloud
+from cloudinit.config import Config
from cloudinit.config.schema import MetaSchema, get_meta_doc
from cloudinit.settings import PER_INSTANCE
@@ -84,7 +87,9 @@ def stop_update_start(distro, service, config_file, content):
distro.manage_service("enable", service)
-def handle(name, cfg, cloud, log, args):
+def handle(
+ name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
+) -> None:
cfgin = cfg.get("fan")
if not cfgin:
cfgin = {}
diff --git a/cloudinit/config/cc_final_message.py b/cloudinit/config/cc_final_message.py
index c44f021f..d773afb1 100644
--- a/cloudinit/config/cc_final_message.py
+++ b/cloudinit/config/cc_final_message.py
@@ -7,9 +7,12 @@
# This file is part of cloud-init. See LICENSE file for license information.
"""Final Message: Output final message when cloud-init has finished"""
+from logging import Logger
from textwrap import dedent
from cloudinit import templater, util, version
+from cloudinit.cloud import Cloud
+from cloudinit.config import Config
from cloudinit.config.schema import MetaSchema, get_meta_doc
from cloudinit.distros import ALL_DISTROS
from cloudinit.settings import PER_ALWAYS
@@ -23,7 +26,12 @@ specified as a jinja template with the following variables set:
- ``datasource``: cloud-init data source
- ``uptime``: system uptime
-Upon exit, this module writes ``/var/lib/cloud/instance/boot-finished``.
+This message is written to the cloud-init log (usually /var/log/cloud-init.log)
+as well as stderr (which usually redirects to /var/log/cloud-init-output.log).
+
+Upon exit, this module writes the system uptime, timestamp, and cloud-init
+version to ``/var/lib/cloud/instance/boot-finished`` independent of any
+user data specified for this module.
"""
frequency = PER_ALWAYS
meta: MetaSchema = {
@@ -58,7 +66,9 @@ FINAL_MESSAGE_DEF = (
)
-def handle(_name, cfg, cloud, log, args):
+def handle(
+ name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
+) -> None:
msg_in = ""
if len(args) != 0:
diff --git a/cloudinit/config/cc_growpart.py b/cloudinit/config/cc_growpart.py
index e3ba0e9a..f3acbe2a 100644
--- a/cloudinit/config/cc_growpart.py
+++ b/cloudinit/config/cc_growpart.py
@@ -14,15 +14,19 @@ import os
import os.path
import re
import stat
+from abc import ABC, abstractmethod
from contextlib import suppress
+from logging import Logger
from pathlib import Path
from textwrap import dedent
from typing import Tuple
from cloudinit import log as logging
from cloudinit import subp, temp_utils, util
+from cloudinit.cloud import Cloud
+from cloudinit.config import Config
from cloudinit.config.schema import MetaSchema, get_meta_doc
-from cloudinit.distros import ALL_DISTROS
+from cloudinit.distros import ALL_DISTROS, Distro
from cloudinit.settings import PER_ALWAYS
MODULE_DESCRIPTION = """\
@@ -95,7 +99,7 @@ DEFAULT_CONFIG = {
KEYDATA_PATH = Path("/cc_growpart_keydata")
-class RESIZE(object):
+class RESIZE:
SKIPPED = "SKIPPED"
CHANGED = "CHANGED"
NOCHANGE = "NOCHANGE"
@@ -105,11 +109,11 @@ class RESIZE(object):
LOG = logging.getLogger(__name__)
-def resizer_factory(mode):
+def resizer_factory(mode: str, distro: Distro):
resize_class = None
if mode == "auto":
for (_name, resizer) in RESIZERS:
- cur = resizer()
+ cur = resizer(distro)
if cur.available():
resize_class = cur
break
@@ -125,7 +129,7 @@ def resizer_factory(mode):
if mode not in mmap:
raise TypeError("unknown resize mode %s" % mode)
- mclass = mmap[mode]()
+ mclass = mmap[mode](distro)
if mclass.available():
resize_class = mclass
@@ -139,7 +143,20 @@ class ResizeFailedException(Exception):
pass
-class ResizeGrowPart(object):
+class Resizer(ABC):
+ def __init__(self, distro: Distro):
+ self._distro = distro
+
+ @abstractmethod
+ def available(self) -> bool:
+ ...
+
+ @abstractmethod
+ def resize(self, diskdev, partnum, partdev):
+ ...
+
+
+class ResizeGrowPart(Resizer):
def available(self):
myenv = os.environ.copy()
myenv["LANG"] = "C"
@@ -160,7 +177,8 @@ class ResizeGrowPart(object):
# growpart uses tmp dir to store intermediate states
# and may conflict with systemd-tmpfiles-clean
- with temp_utils.tempdir(needs_exe=True) as tmpd:
+ tmp_dir = self._distro.get_tmp_exec_path()
+ with temp_utils.tempdir(dir=tmp_dir, needs_exe=True) as tmpd:
growpart_tmp = os.path.join(tmpd, "growpart")
if not os.path.exists(growpart_tmp):
os.mkdir(growpart_tmp, 0o700)
@@ -189,7 +207,7 @@ class ResizeGrowPart(object):
return (before, get_size(partdev))
-class ResizeGpart(object):
+class ResizeGpart(Resizer):
def available(self):
myenv = os.environ.copy()
myenv["LANG"] = "C"
@@ -548,7 +566,9 @@ def resize_devices(resizer, devices):
return info
-def handle(_name, cfg, _cloud, log, _args):
+def handle(
+ name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
+) -> None:
if "growpart" not in cfg:
log.debug(
"No 'growpart' entry in cfg. Using default: %s" % DEFAULT_CONFIG
@@ -582,7 +602,7 @@ def handle(_name, cfg, _cloud, log, _args):
return
try:
- resizer = resizer_factory(mode)
+ resizer = resizer_factory(mode, cloud.distro)
except (ValueError, TypeError) as e:
log.debug("growpart unable to find resizer for '%s': %s" % (mode, e))
if mode != "auto":
diff --git a/cloudinit/config/cc_grub_dpkg.py b/cloudinit/config/cc_grub_dpkg.py
index f2fa6985..893204fa 100644
--- a/cloudinit/config/cc_grub_dpkg.py
+++ b/cloudinit/config/cc_grub_dpkg.py
@@ -9,19 +9,21 @@
"""Grub Dpkg: Configure grub debconf installation device"""
import os
+from logging import Logger
from textwrap import dedent
from cloudinit import subp, util
+from cloudinit.cloud import Cloud
+from cloudinit.config import Config
from cloudinit.config.schema import MetaSchema, get_meta_doc
from cloudinit.settings import PER_INSTANCE
from cloudinit.subp import ProcessExecutionError
MODULE_DESCRIPTION = """\
Configure which device is used as the target for grub installation. This module
-should work correctly by default without any user configuration. It can be
-enabled/disabled using the ``enabled`` config key in the ``grub_dpkg`` config
-dict. The global config key ``grub-dpkg`` is an alias for ``grub_dpkg``. If no
-installation device is specified this module will execute grub-probe to
+can be enabled/disabled using the ``enabled`` config key in the ``grub_dpkg``
+config dict. The global config key ``grub-dpkg`` is an alias for ``grub_dpkg``.
+If no installation device is specified this module will execute grub-probe to
determine which disk the /boot directory is associated with.
The value which is placed into the debconf database is in the format which the
@@ -115,7 +117,9 @@ def fetch_idevs(log):
return idevs
-def handle(name, cfg, _cloud, log, _args):
+def handle(
+ name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
+) -> None:
mycfg = cfg.get("grub_dpkg", cfg.get("grub-dpkg", {}))
if not mycfg:
diff --git a/cloudinit/config/cc_install_hotplug.py b/cloudinit/config/cc_install_hotplug.py
index e29b58b9..b95b8a4c 100644
--- a/cloudinit/config/cc_install_hotplug.py
+++ b/cloudinit/config/cc_install_hotplug.py
@@ -1,9 +1,12 @@
# This file is part of cloud-init. See LICENSE file for license information.
"""Install hotplug udev rules if supported and enabled"""
import os
+from logging import Logger
from textwrap import dedent
from cloudinit import stages, subp, util
+from cloudinit.cloud import Cloud
+from cloudinit.config import Config
from cloudinit.config.schema import MetaSchema, get_meta_doc
from cloudinit.distros import ALL_DISTROS
from cloudinit.event import EventScope, EventType
@@ -65,7 +68,9 @@ LABEL="cloudinit_end"
"""
-def handle(_name, cfg, cloud, log, _args):
+def handle(
+ name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
+) -> None:
network_hotplug_enabled = (
"updates" in cfg
and "network" in cfg["updates"]
diff --git a/cloudinit/config/cc_keyboard.py b/cloudinit/config/cc_keyboard.py
index e44b8648..f6075e63 100644
--- a/cloudinit/config/cc_keyboard.py
+++ b/cloudinit/config/cc_keyboard.py
@@ -6,10 +6,13 @@
"""keyboard: set keyboard layout"""
+from logging import Logger
from textwrap import dedent
from cloudinit import distros
from cloudinit import log as logging
+from cloudinit.cloud import Cloud
+from cloudinit.config import Config
from cloudinit.config.schema import MetaSchema, get_meta_doc
from cloudinit.settings import PER_INSTANCE
@@ -57,7 +60,9 @@ __doc__ = get_meta_doc(meta)
LOG = logging.getLogger(__name__)
-def handle(name, cfg, cloud, log, args):
+def handle(
+ name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
+) -> None:
if "keyboard" not in cfg:
LOG.debug(
"Skipping module named %s, no 'keyboard' section found", name
diff --git a/cloudinit/config/cc_keys_to_console.py b/cloudinit/config/cc_keys_to_console.py
index 115df520..649c0abb 100644
--- a/cloudinit/config/cc_keys_to_console.py
+++ b/cloudinit/config/cc_keys_to_console.py
@@ -9,9 +9,12 @@
"""Keys to Console: Control which SSH host keys may be written to console"""
import os
+from logging import Logger
from textwrap import dedent
from cloudinit import subp, util
+from cloudinit.cloud import Cloud
+from cloudinit.config import Config
from cloudinit.config.schema import MetaSchema, get_meta_doc
from cloudinit.settings import PER_INSTANCE
@@ -74,7 +77,9 @@ def _get_helper_tool_path(distro):
return HELPER_TOOL_TPL % base_lib
-def handle(name, cfg, cloud, log, _args):
+def handle(
+ name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
+) -> None:
if util.is_false(cfg.get("ssh", {}).get("emit_keys_to_console", True)):
log.debug(
"Skipping module named %s, logging of SSH host keys disabled", name
diff --git a/cloudinit/config/cc_landscape.py b/cloudinit/config/cc_landscape.py
index 2607b866..8abb4c5f 100644
--- a/cloudinit/config/cc_landscape.py
+++ b/cloudinit/config/cc_landscape.py
@@ -10,11 +10,14 @@
import os
from io import BytesIO
+from logging import Logger
from textwrap import dedent
from configobj import ConfigObj
from cloudinit import subp, type_utils, util
+from cloudinit.cloud import Cloud
+from cloudinit.config import Config
from cloudinit.config.schema import MetaSchema, get_meta_doc
from cloudinit.settings import PER_INSTANCE
@@ -97,7 +100,9 @@ meta: MetaSchema = {
__doc__ = get_meta_doc(meta)
-def handle(_name, cfg, cloud, log, _args):
+def handle(
+ name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
+) -> None:
"""
Basically turn a top level 'landscape' entry with a 'client' dict
and render it to ConfigObj format under '[client]' section in
diff --git a/cloudinit/config/cc_locale.py b/cloudinit/config/cc_locale.py
index dd7fda38..4a53e765 100644
--- a/cloudinit/config/cc_locale.py
+++ b/cloudinit/config/cc_locale.py
@@ -8,9 +8,12 @@
"""Locale: set system locale"""
+from logging import Logger
from textwrap import dedent
from cloudinit import util
+from cloudinit.cloud import Cloud
+from cloudinit.config import Config
from cloudinit.config.schema import MetaSchema, get_meta_doc
from cloudinit.settings import PER_INSTANCE
@@ -48,7 +51,9 @@ meta: MetaSchema = {
__doc__ = get_meta_doc(meta)
-def handle(name, cfg, cloud, log, args):
+def handle(
+ name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
+) -> None:
if len(args) != 0:
locale = args[0]
else:
diff --git a/cloudinit/config/cc_lxd.py b/cloudinit/config/cc_lxd.py
index 490533c0..e692fbd5 100644
--- a/cloudinit/config/cc_lxd.py
+++ b/cloudinit/config/cc_lxd.py
@@ -12,8 +12,9 @@ from textwrap import dedent
from typing import List, Tuple
from cloudinit import log as logging
-from cloudinit import subp, util
+from cloudinit import safeyaml, subp, util
from cloudinit.cloud import Cloud
+from cloudinit.config import Config
from cloudinit.config.schema import MetaSchema, get_meta_doc
from cloudinit.settings import PER_INSTANCE
@@ -49,6 +50,7 @@ meta: MetaSchema = {
),
dedent(
"""\
+ # LXD init showcasing cloud-init's LXD config options
lxd:
init:
network_address: 0.0.0.0
@@ -72,6 +74,84 @@ meta: MetaSchema = {
domain: lxd
"""
),
+ dedent(
+ """\
+ # For more complex non-iteractive LXD configuration of networks,
+ # storage_pools, profiles, projects, clusters and core config,
+ # `lxd:preseed` config will be passed as stdin to the command:
+ # lxd init --preseed
+ # See https://linuxcontainers.org/lxd/docs/master/preseed/ or
+ # run: lxd init --dump to see viable preseed YAML allowed.
+ #
+ # Preseed settings configuring the LXD daemon for HTTPS connections
+ # on 192.168.1.1 port 9999, a nested profile which allows for
+ # LXD nesting on containers and a limited project allowing for
+ # RBAC approach when defining behavior for sub projects.
+ lxd:
+ preseed: |
+ config:
+ core.https_address: 192.168.1.1:9999
+ networks:
+ - config:
+ ipv4.address: 10.42.42.1/24
+ ipv4.nat: true
+ ipv6.address: fd42:4242:4242:4242::1/64
+ ipv6.nat: true
+ description: ""
+ name: lxdbr0
+ type: bridge
+ project: default
+ storage_pools:
+ - config:
+ size: 5GiB
+ source: /var/snap/lxd/common/lxd/disks/default.img
+ description: ""
+ name: default
+ driver: zfs
+ profiles:
+ - config: {}
+ description: Default LXD profile
+ devices:
+ eth0:
+ name: eth0
+ network: lxdbr0
+ type: nic
+ root:
+ path: /
+ pool: default
+ type: disk
+ name: default
+ - config: {}
+ security.nesting: true
+ devices:
+ eth0:
+ name: eth0
+ network: lxdbr0
+ type: nic
+ root:
+ path: /
+ pool: default
+ type: disk
+ name: nested
+ projects:
+ - config:
+ features.images: true
+ features.networks: true
+ features.profiles: true
+ features.storage.volumes: true
+ description: Default LXD project
+ name: default
+ - config:
+ features.images: false
+ features.networks: true
+ features.profiles: false
+ features.storage.volumes: false
+ description: Limited Access LXD project
+ name: limited
+
+
+ """
+ ),
],
"frequency": PER_INSTANCE,
"activate_by_schema_keys": ["lxd"],
@@ -80,7 +160,46 @@ meta: MetaSchema = {
__doc__ = get_meta_doc(meta)
-def handle(name, cfg, cloud: Cloud, log: Logger, args):
+def supplemental_schema_validation(
+ init_cfg: dict, bridge_cfg: dict, preseed_str: str
+):
+ """Validate user-provided lxd network and bridge config option values.
+
+ @raises: ValueError describing invalid values provided.
+ """
+ errors = []
+ if not isinstance(init_cfg, dict):
+ errors.append(
+ f"lxd.init config must be a dictionary. found a"
+ f" '{type(init_cfg).__name__}'",
+ )
+
+ if not isinstance(bridge_cfg, dict):
+ errors.append(
+ f"lxd.bridge config must be a dictionary. found a"
+ f" '{type(bridge_cfg).__name__}'",
+ )
+
+ if not isinstance(preseed_str, str):
+ errors.append(
+ f"lxd.preseed config must be a string. found a"
+ f" '{type(preseed_str).__name__}'",
+ )
+ if preseed_str and (init_cfg or bridge_cfg):
+ incompat_cfg = ["lxd.init"] if init_cfg else []
+ incompat_cfg += ["lxd.bridge"] if bridge_cfg else []
+
+ errors.append(
+ "Unable to configure LXD. lxd.preseed config can not be provided"
+ f" with key(s): {', '.join(incompat_cfg)}"
+ )
+ if errors:
+ raise ValueError(". ".join(errors))
+
+
+def handle(
+ name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
+) -> None:
# Get config
lxd_cfg = cfg.get("lxd")
if not lxd_cfg:
@@ -89,28 +208,18 @@ def handle(name, cfg, cloud: Cloud, log: Logger, args):
)
return
if not isinstance(lxd_cfg, dict):
- log.warning(
- "lxd config must be a dictionary. found a '%s'", type(lxd_cfg)
+ raise ValueError(
+ f"lxd config must be a dictionary. found a"
+ f" '{type(lxd_cfg).__name__}'"
)
- return
# Grab the configuration
- init_cfg = lxd_cfg.get("init")
- if not isinstance(init_cfg, dict):
- log.warning(
- "lxd/init config must be a dictionary. found a '%s'",
- type(init_cfg),
- )
- init_cfg = {}
-
+ init_cfg = lxd_cfg.get("init", {})
+ preseed_str = lxd_cfg.get("preseed", "")
bridge_cfg = lxd_cfg.get("bridge", {})
- if not isinstance(bridge_cfg, dict):
- log.warning(
- "lxd/bridge config must be a dictionary. found a '%s'",
- type(bridge_cfg),
- )
- bridge_cfg = {}
- packages = get_required_packages(init_cfg)
+ supplemental_schema_validation(init_cfg, bridge_cfg, preseed_str)
+
+ packages = get_required_packages(init_cfg, preseed_str)
if len(packages):
try:
cloud.distro.install_packages(packages)
@@ -118,6 +227,10 @@ def handle(name, cfg, cloud: Cloud, log: Logger, args):
log.warning("failed to install packages %s: %s", packages, exc)
return
+ subp.subp(["lxd", "waitready", "--timeout=300"])
+ if preseed_str:
+ subp.subp(["lxd", "init", "--preseed"], data=preseed_str)
+ return
# Set up lxd if init config is given
if init_cfg:
@@ -133,8 +246,6 @@ def handle(name, cfg, cloud: Cloud, log: Logger, args):
"trust_password",
)
- subp.subp(["lxd", "waitready", "--timeout=300"])
-
# Bug https://bugs.launchpad.net/ubuntu/+source/linux-kvm/+bug/1982780
kernel = util.system_info()["uname"][2]
if init_cfg["storage_backend"] == "lvm" and not os.path.exists(
@@ -385,7 +496,7 @@ def maybe_cleanup_default(
LOG.debug(msg, nic_name, profile, fail_assume_enoent)
-def get_required_packages(cfg: dict) -> List[str]:
+def get_required_packages(init_cfg: dict, preseed_str: str) -> List[str]:
"""identify required packages for install"""
packages = []
if not subp.which("lxd"):
@@ -393,12 +504,27 @@ def get_required_packages(cfg: dict) -> List[str]:
# binary for pool creation must be available for the requested backend:
# zfs, lvcreate, mkfs.btrfs
- storage: str = cfg.get("storage_backend", "")
- if storage:
- if storage == "zfs" and not subp.which("zfs"):
- packages.append("zfsutils-linux")
- if storage == "lvm" and not subp.which("lvcreate"):
- packages.append("lvm2")
- if storage == "btrfs" and not subp.which("mkfs.btrfs"):
- packages.append("btrfs-progs")
+ storage_drivers: List[str] = []
+ preseed_cfg: dict = {}
+ if "storage_backend" in init_cfg:
+ storage_drivers.append(init_cfg["storage_backend"])
+ if preseed_str and "storage_pools" in preseed_str:
+ # Assume correct YAML preseed format
+ try:
+ preseed_cfg = safeyaml.load(preseed_str)
+ except (safeyaml.YAMLError, TypeError, ValueError):
+ LOG.warning(
+ "lxd.preseed string value is not YAML. "
+ " Unable to determine required storage driver packages to"
+ " support storage_pools config."
+ )
+ for storage_pool in preseed_cfg.get("storage_pools", []):
+ if storage_pool.get("driver"):
+ storage_drivers.append(storage_pool["driver"])
+ if "zfs" in storage_drivers and not subp.which("zfs"):
+ packages.append("zfsutils-linux")
+ if "lvm" in storage_drivers and not subp.which("lvcreate"):
+ packages.append("lvm2")
+ if "btrfs" in storage_drivers and not subp.which("mkfs.btrfs"):
+ packages.append("btrfs-progs")
return packages
diff --git a/cloudinit/config/cc_mcollective.py b/cloudinit/config/cc_mcollective.py
index f4fd456e..7d75078d 100644
--- a/cloudinit/config/cc_mcollective.py
+++ b/cloudinit/config/cc_mcollective.py
@@ -11,6 +11,7 @@
import errno
import io
+from logging import Logger
from textwrap import dedent
# Used since this can maintain comments
@@ -19,6 +20,8 @@ from configobj import ConfigObj
from cloudinit import log as logging
from cloudinit import subp, util
+from cloudinit.cloud import Cloud
+from cloudinit.config import Config
from cloudinit.config.schema import MetaSchema, get_meta_doc
from cloudinit.settings import PER_INSTANCE
@@ -149,7 +152,9 @@ def configure(
util.write_file(server_cfg, contents.getvalue(), mode=0o644)
-def handle(name, cfg, cloud, log, _args):
+def handle(
+ name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
+) -> None:
# If there isn't a mcollective key in the configuration don't do anything
if "mcollective" not in cfg:
diff --git a/cloudinit/config/cc_migrator.py b/cloudinit/config/cc_migrator.py
index f1cd788a..956a9478 100644
--- a/cloudinit/config/cc_migrator.py
+++ b/cloudinit/config/cc_migrator.py
@@ -8,8 +8,11 @@
import os
import shutil
+from logging import Logger
from cloudinit import helpers, util
+from cloudinit.cloud import Cloud
+from cloudinit.config import Config
from cloudinit.config.schema import MetaSchema, get_meta_doc
from cloudinit.settings import PER_ALWAYS
@@ -86,7 +89,9 @@ def _migrate_legacy_sems(cloud, log):
pass
-def handle(name, cfg, cloud, log, _args):
+def handle(
+ name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
+) -> None:
do_migrate = util.get_cfg_option_str(cfg, "migrate", True)
if not util.translate_bool(do_migrate):
log.debug("Skipping module named %s, migration disabled", name)
diff --git a/cloudinit/config/cc_mounts.py b/cloudinit/config/cc_mounts.py
index 843ea5eb..db7a7c26 100644
--- a/cloudinit/config/cc_mounts.py
+++ b/cloudinit/config/cc_mounts.py
@@ -12,10 +12,13 @@ import logging
import math
import os
import re
+from logging import Logger
from string import whitespace
from textwrap import dedent
from cloudinit import subp, type_utils, util
+from cloudinit.cloud import Cloud
+from cloudinit.config import Config
from cloudinit.config.schema import MetaSchema, get_meta_doc
from cloudinit.settings import PER_INSTANCE
@@ -404,7 +407,9 @@ def handle_swapcfg(swapcfg):
return None
-def handle(_name, cfg, cloud, log, _args):
+def handle(
+ name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
+) -> None:
# fs_spec, fs_file, fs_vfstype, fs_mntops, fs-freq, fs_passno
def_mnt_opts = "defaults,nobootwait"
uses_systemd = cloud.distro.uses_systemd()
@@ -417,7 +422,7 @@ def handle(_name, cfg, cloud, log, _args):
defvals = cfg.get("mount_default_fields", defvals)
# these are our default set of mounts
- defmnts = [
+ defmnts: list = [
["ephemeral0", "/mnt", "auto", defvals[3], "0", "2"],
["swap", "none", "swap", "sw", "0", "0"],
]
diff --git a/cloudinit/config/cc_ntp.py b/cloudinit/config/cc_ntp.py
index 7974f5b2..8ecc4eb8 100644
--- a/cloudinit/config/cc_ntp.py
+++ b/cloudinit/config/cc_ntp.py
@@ -8,10 +8,13 @@
import copy
import os
+from logging import Logger
from textwrap import dedent
from cloudinit import log as logging
from cloudinit import subp, temp_utils, templater, type_utils, util
+from cloudinit.cloud import Cloud
+from cloudinit.config import Config
from cloudinit.config.schema import MetaSchema, get_meta_doc
from cloudinit.settings import PER_INSTANCE
@@ -25,10 +28,14 @@ distros = [
"alpine",
"centos",
"cloudlinux",
+ "cos",
"debian",
"eurolinux",
"fedora",
+ "freebsd",
+ "mariner",
"miraclelinux",
+ "openbsd",
"openEuler",
"openmandriva",
"opensuse",
@@ -65,6 +72,14 @@ NTP_CLIENT_CONFIG = {
"template_name": "ntp.conf.{distro}",
"template": None,
},
+ "openntpd": {
+ "check_exe": "ntpd",
+ "confpath": "/etc/ntpd.conf",
+ "packages": [],
+ "service_name": "ntpd",
+ "template_name": "ntpd.conf.{distro}",
+ "template": None,
+ },
"systemd-timesyncd": {
"check_exe": "/lib/systemd/systemd-timesyncd",
"confpath": "/etc/systemd/timesyncd.conf.d/cloud-init.conf",
@@ -96,11 +111,49 @@ DISTRO_CLIENT_CONFIG = {
"service_name": "chronyd",
},
},
+ "cos": {
+ "chrony": {
+ "service_name": "chronyd",
+ "confpath": "/etc/chrony/chrony.conf",
+ },
+ },
"debian": {
"chrony": {
"confpath": "/etc/chrony/chrony.conf",
},
},
+ "freebsd": {
+ "ntp": {
+ "confpath": "/etc/ntp.conf",
+ "service_name": "ntpd",
+ "template_name": "ntp.conf.{distro}",
+ },
+ "chrony": {
+ "confpath": "/usr/local/etc/chrony.conf",
+ "packages": ["chrony"],
+ "service_name": "chronyd",
+ "template_name": "chrony.conf.{distro}",
+ },
+ "openntpd": {
+ "check_exe": "/usr/local/sbin/ntpd",
+ "confpath": "/usr/local/etc/ntp.conf",
+ "packages": ["openntpd"],
+ "service_name": "openntpd",
+ "template_name": "ntpd.conf.openbsd",
+ },
+ },
+ "mariner": {
+ "chrony": {
+ "service_name": "chronyd",
+ },
+ "systemd-timesyncd": {
+ "check_exe": "/usr/lib/systemd/systemd-timesyncd",
+ "confpath": "/etc/systemd/timesyncd.conf",
+ },
+ },
+ "openbsd": {
+ "openntpd": {},
+ },
"openmandriva": {
"chrony": {
"service_name": "chronyd",
@@ -387,6 +440,8 @@ def write_ntp_config_template(
if not pools:
pools = []
+ if len(servers) == 0 and len(pools) == 0 and distro_name == "cos":
+ return
if (
len(servers) == 0
and distro_name == "alpine"
@@ -481,7 +536,9 @@ def supplemental_schema_validation(ntp_config):
)
-def handle(name, cfg, cloud, log, _args):
+def handle(
+ name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
+) -> None:
"""Enable and configure ntp."""
if "ntp" not in cfg:
LOG.debug(
@@ -545,6 +602,24 @@ def handle(name, cfg, cloud, log, _args):
packages=ntp_client_config["packages"],
check_exe=ntp_client_config["check_exe"],
)
+ if util.is_BSD():
+ if ntp_client_config.get("service_name") != "ntpd":
+ try:
+ cloud.distro.manage_service("stop", "ntpd")
+ except subp.ProcessExecutionError:
+ LOG.warning("Failed to stop base ntpd service")
+ try:
+ cloud.distro.manage_service("disable", "ntpd")
+ except subp.ProcessExecutionError:
+ LOG.warning("Failed to disable base ntpd service")
+
+ try:
+ cloud.distro.manage_service(
+ "enable", ntp_client_config.get("service_name")
+ )
+ except subp.ProcessExecutionError as e:
+ LOG.exception("Failed to enable ntp service: %s", e)
+ raise
try:
cloud.distro.manage_service(
"reload", ntp_client_config.get("service_name")
diff --git a/cloudinit/config/cc_package_update_upgrade_install.py b/cloudinit/config/cc_package_update_upgrade_install.py
index a8a3e9ff..7d346a19 100644
--- a/cloudinit/config/cc_package_update_upgrade_install.py
+++ b/cloudinit/config/cc_package_update_upgrade_install.py
@@ -8,10 +8,13 @@
import os
import time
+from logging import Logger
from textwrap import dedent
from cloudinit import log as logging
from cloudinit import subp, util
+from cloudinit.cloud import Cloud
+from cloudinit.config import Config
from cloudinit.config.schema import MetaSchema, get_meta_doc
from cloudinit.distros import ALL_DISTROS
from cloudinit.settings import PER_INSTANCE
@@ -82,7 +85,9 @@ def _fire_reboot(log, wait_attempts=6, initial_sleep=1, backoff=2):
)
-def handle(_name, cfg, cloud, log, _args):
+def handle(
+ name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
+) -> None:
# Handle the old style + new config names
update = _multi_cfg_bool_get(cfg, "apt_update", "package_update")
upgrade = _multi_cfg_bool_get(cfg, "package_upgrade", "apt_upgrade")
diff --git a/cloudinit/config/cc_phone_home.py b/cloudinit/config/cc_phone_home.py
index dee30e96..7bbee5af 100644
--- a/cloudinit/config/cc_phone_home.py
+++ b/cloudinit/config/cc_phone_home.py
@@ -8,9 +8,12 @@
"""Phone Home: Post data to url"""
+from logging import Logger
from textwrap import dedent
from cloudinit import templater, url_helper, util
+from cloudinit.cloud import Cloud
+from cloudinit.config import Config
from cloudinit.config.schema import MetaSchema, get_meta_doc
from cloudinit.distros import ALL_DISTROS
from cloudinit.settings import PER_INSTANCE
@@ -105,7 +108,9 @@ __doc__ = get_meta_doc(meta)
#
-def handle(name, cfg, cloud, log, args):
+def handle(
+ name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
+) -> None:
if len(args) != 0:
ph_cfg = util.read_conf(args[0])
else:
@@ -130,7 +135,7 @@ def handle(name, cfg, cloud, log, args):
post_list = ph_cfg.get("post", "all")
tries = ph_cfg.get("tries")
try:
- tries = int(tries) # pyright: ignore
+ tries = int(tries) # type: ignore
except (ValueError, TypeError):
tries = 10
util.logexc(
diff --git a/cloudinit/config/cc_power_state_change.py b/cloudinit/config/cc_power_state_change.py
index 39459bfe..1eb63d78 100644
--- a/cloudinit/config/cc_power_state_change.py
+++ b/cloudinit/config/cc_power_state_change.py
@@ -11,9 +11,12 @@ import os
import re
import subprocess
import time
+from logging import Logger
from textwrap import dedent
from cloudinit import subp, util
+from cloudinit.cloud import Cloud
+from cloudinit.config import Config
from cloudinit.config.schema import MetaSchema, get_meta_doc
from cloudinit.distros import ALL_DISTROS
from cloudinit.settings import PER_INSTANCE
@@ -127,7 +130,9 @@ def check_condition(cond, log=None):
return False
-def handle(_name, cfg, cloud, log, _args):
+def handle(
+ name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
+) -> None:
try:
(args, timeout, condition) = load_power_state(cfg, cloud.distro)
if args is None:
diff --git a/cloudinit/config/cc_puppet.py b/cloudinit/config/cc_puppet.py
index 14467e36..b8a9fe17 100644
--- a/cloudinit/config/cc_puppet.py
+++ b/cloudinit/config/cc_puppet.py
@@ -11,13 +11,16 @@
import os
import socket
from io import StringIO
+from logging import Logger
from textwrap import dedent
import yaml
from cloudinit import helpers, subp, temp_utils, url_helper, util
+from cloudinit.cloud import Cloud
+from cloudinit.config import Config
from cloudinit.config.schema import MetaSchema, get_meta_doc
-from cloudinit.distros import ALL_DISTROS
+from cloudinit.distros import ALL_DISTROS, Distro
from cloudinit.settings import PER_INSTANCE
AIO_INSTALL_URL = "https://raw.githubusercontent.com/puppetlabs/install-puppet/main/install.sh" # noqa: E501
@@ -104,7 +107,7 @@ meta: MetaSchema = {
__doc__ = get_meta_doc(meta)
-class PuppetConstants(object):
+class PuppetConstants:
def __init__(
self, puppet_conf_file, puppet_ssl_dir, csr_attributes_path, log
):
@@ -148,11 +151,16 @@ def get_config_value(puppet_bin, setting):
def install_puppet_aio(
- url=AIO_INSTALL_URL, version=None, collection=None, cleanup=True
+ distro: Distro,
+ url=AIO_INSTALL_URL,
+ version=None,
+ collection=None,
+ cleanup=True,
):
"""Install puppet-agent from the puppetlabs repositories using the one-shot
shell script
+ :param distro: Instance of Distro
:param url: URL from where to download the install script
:param version: version to install, blank defaults to latest
:param collection: collection to install, blank defaults to latest
@@ -170,13 +178,17 @@ def install_puppet_aio(
content = url_helper.readurl(url=url, retries=5).contents
# Use tmpdir over tmpfile to avoid 'text file busy' on execute
- with temp_utils.tempdir(needs_exe=True) as tmpd:
+ with temp_utils.tempdir(
+ dir=distro.get_tmp_exec_path(), needs_exe=True
+ ) as tmpd:
tmpf = os.path.join(tmpd, "puppet-install")
util.write_file(tmpf, content, mode=0o700)
return subp.subp([tmpf] + args, capture=False)
-def handle(name, cfg, cloud, log, _args):
+def handle(
+ name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
+) -> None:
# If there isn't a puppet key in the configuration don't do anything
if "puppet" not in cfg:
log.debug(
@@ -228,7 +240,9 @@ def handle(name, cfg, cloud, log, _args):
if install_type == "packages":
cloud.distro.install_packages((package_name, version))
elif install_type == "aio":
- install_puppet_aio(aio_install_url, version, collection, cleanup)
+ install_puppet_aio(
+ cloud.distro, aio_install_url, version, collection, cleanup
+ )
else:
log.warning("Unknown puppet install type '%s'", install_type)
run = False
diff --git a/cloudinit/config/cc_refresh_rmc_and_interface.py b/cloudinit/config/cc_refresh_rmc_and_interface.py
index 180a8873..18c22476 100644
--- a/cloudinit/config/cc_refresh_rmc_and_interface.py
+++ b/cloudinit/config/cc_refresh_rmc_and_interface.py
@@ -8,9 +8,12 @@
Ensure Network Manager is not managing IPv6 interface"""
import errno
+from logging import Logger
from cloudinit import log as logging
from cloudinit import netinfo, subp, util
+from cloudinit.cloud import Cloud
+from cloudinit.config import Config
from cloudinit.config.schema import MetaSchema
from cloudinit.distros import ALL_DISTROS
from cloudinit.settings import PER_ALWAYS
@@ -54,7 +57,9 @@ LOG = logging.getLogger(__name__)
RMCCTRL = "rmcctrl"
-def handle(name, _cfg, _cloud, _log, _args):
+def handle(
+ name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
+) -> None:
if not subp.which(RMCCTRL):
LOG.debug("No '%s' in path, disabled", RMCCTRL)
return
diff --git a/cloudinit/config/cc_reset_rmc.py b/cloudinit/config/cc_reset_rmc.py
index 9766c3a4..a780e4ff 100644
--- a/cloudinit/config/cc_reset_rmc.py
+++ b/cloudinit/config/cc_reset_rmc.py
@@ -7,9 +7,12 @@
import os
+from logging import Logger
from cloudinit import log as logging
from cloudinit import subp, util
+from cloudinit.cloud import Cloud
+from cloudinit.config import Config
from cloudinit.config.schema import MetaSchema
from cloudinit.distros import ALL_DISTROS
from cloudinit.settings import PER_INSTANCE
@@ -62,7 +65,9 @@ LOG = logging.getLogger(__name__)
NODE_ID_FILE = "/etc/ct_node_id"
-def handle(name, _cfg, cloud, _log, _args):
+def handle(
+ name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
+) -> None:
# Ensuring node id has to be generated only once during first boot
if cloud.datasource.platform_type == "none":
LOG.debug("Skipping creation of new ct_node_id node")
diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py
index 3372208f..7a0ecf96 100644
--- a/cloudinit/config/cc_resizefs.py
+++ b/cloudinit/config/cc_resizefs.py
@@ -11,9 +11,12 @@
import errno
import os
import stat
+from logging import Logger
from textwrap import dedent
from cloudinit import subp, util
+from cloudinit.cloud import Cloud
+from cloudinit.config import Config
from cloudinit.config.schema import MetaSchema, get_meta_doc
from cloudinit.distros import ALL_DISTROS
from cloudinit.settings import PER_ALWAYS
@@ -207,7 +210,9 @@ def maybe_get_writable_device_path(devpath, info, log):
return devpath # The writable block devpath
-def handle(name, cfg, _cloud, log, args):
+def handle(
+ name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
+) -> None:
if len(args) != 0:
resize_root = args[0]
else:
diff --git a/cloudinit/config/cc_resolv_conf.py b/cloudinit/config/cc_resolv_conf.py
index 545b22c3..8dbed71e 100644
--- a/cloudinit/config/cc_resolv_conf.py
+++ b/cloudinit/config/cc_resolv_conf.py
@@ -8,10 +8,13 @@
"""Resolv Conf: configure resolv.conf"""
+from logging import Logger
from textwrap import dedent
from cloudinit import log as logging
from cloudinit import templater, util
+from cloudinit.cloud import Cloud
+from cloudinit.config import Config
from cloudinit.config.schema import MetaSchema, get_meta_doc
from cloudinit.settings import PER_INSTANCE
@@ -52,7 +55,15 @@ meta: MetaSchema = {
"name": "Resolv Conf",
"title": "Configure resolv.conf",
"description": MODULE_DESCRIPTION,
- "distros": ["alpine", "fedora", "opensuse", "photon", "rhel", "sles"],
+ "distros": [
+ "alpine",
+ "fedora",
+ "mariner",
+ "opensuse",
+ "photon",
+ "rhel",
+ "sles",
+ ],
"frequency": PER_INSTANCE,
"examples": [
dedent(
@@ -104,7 +115,9 @@ def generate_resolv_conf(template_fn, params, target_fname):
templater.render_to_file(template_fn, target_fname, params)
-def handle(name, cfg, cloud, log, _args):
+def handle(
+ name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
+) -> None:
"""
Handler for resolv.conf
diff --git a/cloudinit/config/cc_rh_subscription.py b/cloudinit/config/cc_rh_subscription.py
index 9dfe6a38..ce88ec65 100644
--- a/cloudinit/config/cc_rh_subscription.py
+++ b/cloudinit/config/cc_rh_subscription.py
@@ -5,10 +5,13 @@
# This file is part of cloud-init. See LICENSE file for license information.
"""Red Hat Subscription: Register Red Hat Enterprise Linux based system"""
+from logging import Logger
from textwrap import dedent
from cloudinit import log as logging
from cloudinit import subp, util
+from cloudinit.cloud import Cloud
+from cloudinit.config import Config
from cloudinit.config.schema import MetaSchema, get_meta_doc
from cloudinit.settings import PER_INSTANCE
@@ -77,7 +80,9 @@ meta: MetaSchema = {
__doc__ = get_meta_doc(meta)
-def handle(name, cfg, _cloud, log, _args):
+def handle(
+ name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
+) -> None:
sm = SubscriptionManager(cfg, log=log)
if not sm.is_configured():
log.debug("%s: module not configured.", name)
@@ -135,7 +140,7 @@ class SubscriptionError(Exception):
pass
-class SubscriptionManager(object):
+class SubscriptionManager:
valid_rh_keys = [
"org",
"activation-key",
diff --git a/cloudinit/config/cc_rightscale_userdata.py b/cloudinit/config/cc_rightscale_userdata.py
index 5ebf359f..9e84032a 100644
--- a/cloudinit/config/cc_rightscale_userdata.py
+++ b/cloudinit/config/cc_rightscale_userdata.py
@@ -7,10 +7,13 @@
# This file is part of cloud-init. See LICENSE file for license information.
import os
+from logging import Logger
from urllib.parse import parse_qs
from cloudinit import url_helper as uhelp
from cloudinit import util
+from cloudinit.cloud import Cloud
+from cloudinit.config import Config
from cloudinit.config.schema import MetaSchema, get_meta_doc
from cloudinit.distros import ALL_DISTROS
from cloudinit.settings import PER_INSTANCE
@@ -67,13 +70,15 @@ __doc__ = get_meta_doc(meta)
#
-def handle(name, _cfg, cloud, log, _args):
- try:
- ud = cloud.get_userdata_raw()
- except Exception:
+def handle(
+ name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
+) -> None:
+ get_userdata_raw = getattr(cloud, "get_userdata_raw", None)
+ if not get_userdata_raw or not callable(get_userdata_raw):
log.debug("Failed to get raw userdata in module %s", name)
return
+ ud = get_userdata_raw()
try:
mdict = parse_qs(ud)
if not mdict or MY_HOOKNAME not in mdict:
diff --git a/cloudinit/config/cc_rsyslog.py b/cloudinit/config/cc_rsyslog.py
index 5484691b..9baaf094 100644
--- a/cloudinit/config/cc_rsyslog.py
+++ b/cloudinit/config/cc_rsyslog.py
@@ -10,10 +10,13 @@
import os
import re
+from logging import Logger
from textwrap import dedent
from cloudinit import log as logging
from cloudinit import subp, util
+from cloudinit.cloud import Cloud
+from cloudinit.config import Config
from cloudinit.config.schema import MetaSchema, get_meta_doc
from cloudinit.distros import ALL_DISTROS
from cloudinit.settings import PER_INSTANCE
@@ -213,7 +216,7 @@ def parse_remotes_line(line, name=None):
return t
-class SyslogRemotesLine(object):
+class SyslogRemotesLine:
def __init__(
self, name=None, match=None, proto=None, addr=None, port=None
):
@@ -294,7 +297,9 @@ def remotes_to_rsyslog_cfg(remotes, header=None, footer=None):
return "\n".join(lines) + "\n"
-def handle(name, cfg, cloud, log, _args):
+def handle(
+ name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
+) -> None:
if "rsyslog" not in cfg:
log.debug(
"Skipping module named %s, no 'rsyslog' key in configuration", name
diff --git a/cloudinit/config/cc_runcmd.py b/cloudinit/config/cc_runcmd.py
index 60e53298..464198c4 100644
--- a/cloudinit/config/cc_runcmd.py
+++ b/cloudinit/config/cc_runcmd.py
@@ -9,9 +9,12 @@
"""Runcmd: run arbitrary commands at rc.local with output to the console"""
import os
+from logging import Logger
from textwrap import dedent
from cloudinit import util
+from cloudinit.cloud import Cloud
+from cloudinit.config import Config
from cloudinit.config.schema import MetaSchema, get_meta_doc
from cloudinit.distros import ALL_DISTROS
from cloudinit.settings import PER_INSTANCE
@@ -73,7 +76,9 @@ meta: MetaSchema = {
__doc__ = get_meta_doc(meta)
-def handle(name, cfg, cloud, log, _args):
+def handle(
+ name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
+) -> None:
if "runcmd" not in cfg:
log.debug(
"Skipping module named %s, no 'runcmd' key in configuration", name
diff --git a/cloudinit/config/cc_salt_minion.py b/cloudinit/config/cc_salt_minion.py
index ebab4e30..f3a8c16c 100644
--- a/cloudinit/config/cc_salt_minion.py
+++ b/cloudinit/config/cc_salt_minion.py
@@ -5,9 +5,12 @@
"""Salt Minion: Setup and run salt minion"""
import os
+from logging import Logger
from textwrap import dedent
from cloudinit import safeyaml, subp, util
+from cloudinit.cloud import Cloud
+from cloudinit.config import Config
from cloudinit.config.schema import MetaSchema, get_meta_doc
from cloudinit.distros import ALL_DISTROS, bsd_utils
from cloudinit.settings import PER_INSTANCE
@@ -67,7 +70,7 @@ __doc__ = get_meta_doc(meta)
# Note: see https://docs.saltstack.com/en/latest/ref/configuration/
-class SaltConstants(object):
+class SaltConstants:
"""
defines default distribution specific salt variables
"""
@@ -95,7 +98,9 @@ class SaltConstants(object):
)
-def handle(name, cfg, cloud, log, _args):
+def handle(
+ name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
+) -> None:
# If there isn't a salt key in the configuration don't do anything
if "salt_minion" not in cfg:
log.debug(
diff --git a/cloudinit/config/cc_scripts_per_boot.py b/cloudinit/config/cc_scripts_per_boot.py
index 408c3bfd..3e093d0e 100644
--- a/cloudinit/config/cc_scripts_per_boot.py
+++ b/cloudinit/config/cc_scripts_per_boot.py
@@ -8,8 +8,11 @@
"""Scripts Per Boot: Run per boot scripts"""
import os
+from logging import Logger
from cloudinit import subp
+from cloudinit.cloud import Cloud
+from cloudinit.config import Config
from cloudinit.config.schema import MetaSchema, get_meta_doc
from cloudinit.distros import ALL_DISTROS
from cloudinit.settings import PER_ALWAYS
@@ -38,7 +41,9 @@ __doc__ = get_meta_doc(meta)
SCRIPT_SUBDIR = "per-boot"
-def handle(name, _cfg, cloud, log, _args):
+def handle(
+ name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
+) -> None:
# Comes from the following:
# https://forums.aws.amazon.com/thread.jspa?threadID=96918
runparts_path = os.path.join(cloud.get_cpath(), "scripts", SCRIPT_SUBDIR)
diff --git a/cloudinit/config/cc_scripts_per_instance.py b/cloudinit/config/cc_scripts_per_instance.py
index c1360ae6..719b8a2a 100644
--- a/cloudinit/config/cc_scripts_per_instance.py
+++ b/cloudinit/config/cc_scripts_per_instance.py
@@ -8,8 +8,11 @@
"""Scripts Per Instance: Run per instance scripts"""
import os
+from logging import Logger
from cloudinit import subp
+from cloudinit.cloud import Cloud
+from cloudinit.config import Config
from cloudinit.config.schema import MetaSchema, get_meta_doc
from cloudinit.distros import ALL_DISTROS
from cloudinit.settings import PER_INSTANCE
@@ -40,7 +43,9 @@ __doc__ = get_meta_doc(meta)
SCRIPT_SUBDIR = "per-instance"
-def handle(name, _cfg, cloud, log, _args):
+def handle(
+ name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
+) -> None:
# Comes from the following:
# https://forums.aws.amazon.com/thread.jspa?threadID=96918
runparts_path = os.path.join(cloud.get_cpath(), "scripts", SCRIPT_SUBDIR)
diff --git a/cloudinit/config/cc_scripts_per_once.py b/cloudinit/config/cc_scripts_per_once.py
index baf2214e..42aa89b3 100644
--- a/cloudinit/config/cc_scripts_per_once.py
+++ b/cloudinit/config/cc_scripts_per_once.py
@@ -8,8 +8,11 @@
"""Scripts Per Once: Run one time scripts"""
import os
+from logging import Logger
from cloudinit import subp
+from cloudinit.cloud import Cloud
+from cloudinit.config import Config
from cloudinit.config.schema import MetaSchema, get_meta_doc
from cloudinit.distros import ALL_DISTROS
from cloudinit.settings import PER_ONCE
@@ -38,7 +41,9 @@ __doc__ = get_meta_doc(meta)
SCRIPT_SUBDIR = "per-once"
-def handle(name, _cfg, cloud, log, _args):
+def handle(
+ name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
+) -> None:
# Comes from the following:
# https://forums.aws.amazon.com/thread.jspa?threadID=96918
runparts_path = os.path.join(cloud.get_cpath(), "scripts", SCRIPT_SUBDIR)
diff --git a/cloudinit/config/cc_scripts_user.py b/cloudinit/config/cc_scripts_user.py
index ffe610fd..b6ae37f5 100644
--- a/cloudinit/config/cc_scripts_user.py
+++ b/cloudinit/config/cc_scripts_user.py
@@ -8,8 +8,11 @@
"""Scripts User: Run user scripts"""
import os
+from logging import Logger
from cloudinit import subp
+from cloudinit.cloud import Cloud
+from cloudinit.config import Config
from cloudinit.config.schema import MetaSchema, get_meta_doc
from cloudinit.distros import ALL_DISTROS
from cloudinit.settings import PER_INSTANCE
@@ -40,7 +43,9 @@ __doc__ = get_meta_doc(meta)
SCRIPT_SUBDIR = "scripts"
-def handle(name, _cfg, cloud, log, _args):
+def handle(
+ name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
+) -> None:
# This is written to by the user data handlers
# Ie, any custom shell scripts that come down
# go here...
diff --git a/cloudinit/config/cc_scripts_vendor.py b/cloudinit/config/cc_scripts_vendor.py
index 8dc99e1e..b3ee9df1 100644
--- a/cloudinit/config/cc_scripts_vendor.py
+++ b/cloudinit/config/cc_scripts_vendor.py
@@ -6,9 +6,12 @@
"""Scripts Vendor: Run vendor scripts"""
import os
+from logging import Logger
from textwrap import dedent
from cloudinit import subp, util
+from cloudinit.cloud import Cloud
+from cloudinit.config import Config
from cloudinit.config.schema import MetaSchema, get_meta_doc
from cloudinit.distros import ALL_DISTROS
from cloudinit.settings import PER_INSTANCE
@@ -61,7 +64,9 @@ __doc__ = get_meta_doc(meta)
SCRIPT_SUBDIR = "vendor"
-def handle(name, cfg, cloud, log, _args):
+def handle(
+ name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
+) -> None:
# This is written to by the vendor data handlers
# any vendor data shell scripts get placed in runparts_path
runparts_path = os.path.join(
diff --git a/cloudinit/config/cc_seed_random.py b/cloudinit/config/cc_seed_random.py
index f829eaf4..1c1b81d8 100644
--- a/cloudinit/config/cc_seed_random.py
+++ b/cloudinit/config/cc_seed_random.py
@@ -11,10 +11,13 @@
import base64
import os
from io import BytesIO
+from logging import Logger
from textwrap import dedent
from cloudinit import log as logging
from cloudinit import subp, util
+from cloudinit.cloud import Cloud
+from cloudinit.config import Config
from cloudinit.config.schema import MetaSchema, get_meta_doc
from cloudinit.distros import ALL_DISTROS
from cloudinit.settings import PER_INSTANCE
@@ -107,7 +110,9 @@ def handle_random_seed_command(command, required, env=None):
subp.subp(command, env=env, capture=False)
-def handle(name, cfg, cloud, log, _args):
+def handle(
+ name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
+) -> None:
mycfg = cfg.get("random_seed", {})
seed_path = mycfg.get("file", "/dev/urandom")
seed_data = mycfg.get("data", b"")
diff --git a/cloudinit/config/cc_set_hostname.py b/cloudinit/config/cc_set_hostname.py
index 3ea9e4ed..c0bda6fe 100644
--- a/cloudinit/config/cc_set_hostname.py
+++ b/cloudinit/config/cc_set_hostname.py
@@ -8,10 +8,13 @@
"""Set Hostname: Set hostname and FQDN"""
import os
+from logging import Logger
from textwrap import dedent
from cloudinit import util
from cloudinit.atomic_helper import write_json
+from cloudinit.cloud import Cloud
+from cloudinit.config import Config
from cloudinit.config.schema import MetaSchema, get_meta_doc
from cloudinit.distros import ALL_DISTROS
from cloudinit.settings import PER_INSTANCE
@@ -76,7 +79,9 @@ class SetHostnameError(Exception):
"""
-def handle(name, cfg, cloud, log, _args):
+def handle(
+ name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
+) -> None:
if util.get_cfg_option_bool(cfg, "preserve_hostname", False):
log.debug(
"Configuration option 'preserve_hostname' is set,"
diff --git a/cloudinit/config/cc_set_passwords.py b/cloudinit/config/cc_set_passwords.py
index fa7de944..539887c5 100644
--- a/cloudinit/config/cc_set_passwords.py
+++ b/cloudinit/config/cc_set_passwords.py
@@ -17,6 +17,7 @@ from cloudinit import features
from cloudinit import log as logging
from cloudinit import subp, util
from cloudinit.cloud import Cloud
+from cloudinit.config import Config
from cloudinit.config.schema import MetaSchema, get_meta_doc
from cloudinit.distros import ALL_DISTROS, Distro, ug_util
from cloudinit.settings import PER_INSTANCE
@@ -190,7 +191,9 @@ def handle_ssh_pwauth(pw_auth, distro: Distro):
LOG.debug("Not restarting SSH service: service is stopped.")
-def handle(_name, cfg: dict, cloud: Cloud, log: Logger, args: list):
+def handle(
+ name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
+) -> None:
distro: Distro = cloud.distro
if args:
# if run from command line, and give args, wipe the chpasswd['list']
diff --git a/cloudinit/config/cc_snap.py b/cloudinit/config/cc_snap.py
index 7bf22a52..3bf25f1e 100644
--- a/cloudinit/config/cc_snap.py
+++ b/cloudinit/config/cc_snap.py
@@ -6,10 +6,13 @@
import os
import sys
+from logging import Logger
from textwrap import dedent
from cloudinit import log as logging
from cloudinit import subp, util
+from cloudinit.cloud import Cloud
+from cloudinit.config import Config
from cloudinit.config.schema import MetaSchema, get_meta_doc
from cloudinit.settings import PER_INSTANCE
from cloudinit.subp import prepend_base_command
@@ -183,7 +186,9 @@ def run_commands(commands):
raise RuntimeError(msg)
-def handle(name, cfg, cloud, log, args):
+def handle(
+ name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
+) -> None:
cfgin = cfg.get("snap", {})
if not cfgin:
LOG.debug(
diff --git a/cloudinit/config/cc_spacewalk.py b/cloudinit/config/cc_spacewalk.py
index 991aa5ed..add40c1c 100644
--- a/cloudinit/config/cc_spacewalk.py
+++ b/cloudinit/config/cc_spacewalk.py
@@ -1,9 +1,12 @@
# This file is part of cloud-init. See LICENSE file for license information.
"""Spacewalk: Install and configure spacewalk"""
+from logging import Logger
from textwrap import dedent
from cloudinit import subp
+from cloudinit.cloud import Cloud
+from cloudinit.config import Config
from cloudinit.config.schema import MetaSchema, get_meta_doc
from cloudinit.settings import PER_INSTANCE
@@ -85,7 +88,9 @@ def do_register(
subp.subp(cmd, capture=False)
-def handle(name, cfg, cloud, log, _args):
+def handle(
+ name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
+) -> None:
if "spacewalk" not in cfg:
log.debug(
"Skipping module named %s, no 'spacewalk' key in configuration",
diff --git a/cloudinit/config/cc_ssh.py b/cloudinit/config/cc_ssh.py
index ad4fcf80..c9e59d16 100644
--- a/cloudinit/config/cc_ssh.py
+++ b/cloudinit/config/cc_ssh.py
@@ -17,6 +17,7 @@ from typing import List, Optional, Sequence
from cloudinit import ssh_util, subp, util
from cloudinit.cloud import Cloud
+from cloudinit.config import Config
from cloudinit.config.schema import MetaSchema, get_meta_doc
from cloudinit.distros import ALL_DISTROS, ug_util
from cloudinit.settings import PER_INSTANCE
@@ -195,7 +196,9 @@ for k in GENERATE_KEY_NAMES:
KEY_GEN_TPL = 'o=$(ssh-keygen -yf "%s") && echo "$o" root@localhost > "%s"'
-def handle(_name, cfg, cloud: Cloud, log: Logger, _args):
+def handle(
+ name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
+) -> None:
# remove the static keys from the pristine image
if cfg.get("ssh_deletekeys", True):
diff --git a/cloudinit/config/cc_ssh_authkey_fingerprints.py b/cloudinit/config/cc_ssh_authkey_fingerprints.py
index 40fb4ce5..4b4c3d60 100644
--- a/cloudinit/config/cc_ssh_authkey_fingerprints.py
+++ b/cloudinit/config/cc_ssh_authkey_fingerprints.py
@@ -7,8 +7,11 @@
import base64
import hashlib
+from logging import Logger
from cloudinit import ssh_util, util
+from cloudinit.cloud import Cloud
+from cloudinit.config import Config
from cloudinit.config.schema import MetaSchema, get_meta_doc
from cloudinit.distros import ALL_DISTROS, ug_util
from cloudinit.settings import PER_INSTANCE
@@ -112,7 +115,9 @@ def _pprint_key_entries(
)
-def handle(name, cfg, cloud, log, _args):
+def handle(
+ name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
+) -> None:
if util.is_true(cfg.get("no_ssh_fingerprints", False)):
log.debug(
"Skipping module named %s, logging of SSH fingerprints disabled",
diff --git a/cloudinit/config/cc_ssh_import_id.py b/cloudinit/config/cc_ssh_import_id.py
index 358c571f..ed5ac492 100644
--- a/cloudinit/config/cc_ssh_import_id.py
+++ b/cloudinit/config/cc_ssh_import_id.py
@@ -8,15 +8,18 @@
"""SSH Import ID: Import SSH id"""
import pwd
+from logging import Logger
from textwrap import dedent
from cloudinit import subp, util
+from cloudinit.cloud import Cloud
+from cloudinit.config import Config
from cloudinit.config.schema import MetaSchema, get_meta_doc
from cloudinit.distros import ug_util
from cloudinit.settings import PER_INSTANCE
# https://launchpad.net/ssh-import-id
-distros = ["ubuntu", "debian"]
+distros = ["ubuntu", "debian", "cos"]
SSH_IMPORT_ID_BINARY = "ssh-import-id"
MODULE_DESCRIPTION = """\
@@ -49,7 +52,9 @@ meta: MetaSchema = {
__doc__ = get_meta_doc(meta)
-def handle(_name, cfg, cloud, log, args):
+def handle(
+ name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
+) -> None:
if not is_key_in_nested_dict(cfg, "ssh_import_id"):
log.debug(
diff --git a/cloudinit/config/cc_timezone.py b/cloudinit/config/cc_timezone.py
index b9df31af..7436adf3 100644
--- a/cloudinit/config/cc_timezone.py
+++ b/cloudinit/config/cc_timezone.py
@@ -7,7 +7,11 @@
# This file is part of cloud-init. See LICENSE file for license information.
"""Timezone: Set the system timezone"""
+from logging import Logger
+
from cloudinit import util
+from cloudinit.cloud import Cloud
+from cloudinit.config import Config
from cloudinit.config.schema import MetaSchema, get_meta_doc
from cloudinit.distros import ALL_DISTROS
from cloudinit.settings import PER_INSTANCE
@@ -32,7 +36,9 @@ meta: MetaSchema = {
__doc__ = get_meta_doc(meta)
-def handle(name, cfg, cloud, log, args):
+def handle(
+ name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
+) -> None:
if len(args) != 0:
timezone = args[0]
else:
diff --git a/cloudinit/config/cc_ubuntu_advantage.py b/cloudinit/config/cc_ubuntu_advantage.py
index c05d6297..9dd8f3a2 100644
--- a/cloudinit/config/cc_ubuntu_advantage.py
+++ b/cloudinit/config/cc_ubuntu_advantage.py
@@ -2,12 +2,17 @@
"""ubuntu_advantage: Configure Ubuntu Advantage support services"""
+import json
import re
+from logging import Logger
from textwrap import dedent
+from typing import Any, List
from urllib.parse import urlparse
from cloudinit import log as logging
from cloudinit import subp, util
+from cloudinit.cloud import Cloud
+from cloudinit.config import Config
from cloudinit.config.schema import MetaSchema, get_meta_doc
from cloudinit.settings import PER_INSTANCE
@@ -24,9 +29,16 @@ meta: MetaSchema = {
Attach machine to an existing Ubuntu Advantage support contract and
enable or disable support services such as Livepatch, ESM,
FIPS and FIPS Updates. When attaching a machine to Ubuntu Advantage,
- one can also specify services to enable. When the 'enable'
- list is present, any named service will supplement the contract-default
- enabled services.
+ one can also specify services to enable. When the 'enable'
+ list is present, only named services will be activated. Whereas
+ if the 'enable' list is not present, the contract's default
+ services will be enabled.
+
+ On Pro instances, when ``ubuntu_advantage`` config is provided to
+ cloud-init, Pro's auto-attach feature will be disabled and cloud-init
+ will perform the Pro auto-attach ignoring the ``token`` key.
+ The ``enable`` and ``enable_beta`` values will strictly determine what
+ services will be enabled, ignoring contract defaults.
Note that when enabling FIPS or FIPS updates you will need to schedule
a reboot to ensure the machine is running the FIPS-compliant kernel.
@@ -80,14 +92,40 @@ meta: MetaSchema = {
config:
http_proxy: 'http://some-proxy:8088'
https_proxy: 'https://some-proxy:8088'
- global_apt_https_proxy: 'http://some-global-apt-proxy:8088/'
- global_apt_http_proxy: 'https://some-global-apt-proxy:8088/'
+ global_apt_https_proxy: 'https://some-global-apt-proxy:8088/'
+ global_apt_http_proxy: 'http://some-global-apt-proxy:8088/'
ua_apt_http_proxy: 'http://10.0.10.10:3128'
ua_apt_https_proxy: 'https://10.0.10.10:3128'
enable:
- fips
"""
),
+ dedent(
+ """\
+ # On Ubuntu PRO instances, auto-attach but enable no PRO services.
+ ubuntu_advantage:
+ enable: []
+ enable_beta: []
+ """
+ ),
+ dedent(
+ """\
+ # Enable esm and beta realtime-kernel services in Ubuntu Pro instances.
+ ubuntu_advantage:
+ enable:
+ - esm
+ enable_beta:
+ - realtime-kernel
+ """
+ ),
+ dedent(
+ """\
+ # Disable auto-attach in Ubuntu Pro instances.
+ ubuntu_advantage:
+ features:
+ disable_auto_attach: True
+ """
+ ),
],
"frequency": PER_INSTANCE,
"activate_by_schema_keys": ["ubuntu_advantage", "ubuntu-advantage"],
@@ -96,142 +134,239 @@ meta: MetaSchema = {
__doc__ = get_meta_doc(meta)
LOG = logging.getLogger(__name__)
+REDACTED = "REDACTED"
+ERROR_MSG_SHOULD_AUTO_ATTACH = (
+ "Unable to determine if this is an Ubuntu Pro instance."
+ " Fallback to normal UA attach."
+)
+KNOWN_UA_CONFIG_PROPS = (
+ "http_proxy",
+ "https_proxy",
+ "global_apt_http_proxy",
+ "global_apt_https_proxy",
+ "ua_apt_http_proxy",
+ "ua_apt_https_proxy",
+)
+
+
+def validate_schema_features(ua_section: dict):
+ if "features" not in ua_section:
+ return
+
+ # Validate ubuntu_advantage.features type
+ features = ua_section["features"]
+ if not isinstance(features, dict):
+ msg = (
+ f"'ubuntu_advantage.features' should be a dict, not a"
+ f" {type(features).__name__}"
+ )
+ LOG.error(msg)
+ raise RuntimeError(msg)
+
+ # Validate ubuntu_advantage.features.disable_auto_attach
+ if "disable_auto_attach" not in features:
+ return
+ disable_auto_attach = features["disable_auto_attach"]
+ if not isinstance(disable_auto_attach, bool):
+ msg = (
+ f"'ubuntu_advantage.features.disable_auto_attach' should be a bool"
+ f", not a {type(disable_auto_attach).__name__}"
+ )
+ LOG.error(msg)
+ raise RuntimeError(msg)
-def supplemental_schema_validation(ua_config):
+def supplemental_schema_validation(ua_config: dict):
"""Validate user-provided ua:config option values.
This function supplements flexible jsonschema validation with specific
value checks to aid in triage of invalid user-provided configuration.
+ Note: It does not log/raise config values as they could be urls containing
+ sensitive auth info.
+
@param ua_config: Dictionary of config value under 'ubuntu_advantage'.
@raises: ValueError describing invalid values provided.
"""
errors = []
- nl = "\n"
for key, value in sorted(ua_config.items()):
- if key in (
- "http_proxy",
- "https_proxy",
- "global_apt_http_proxy",
- "global_apt_https_proxy",
- "ua_apt_http_proxy",
- "ua_apt_https_proxy",
- ):
- try:
- parsed_url = urlparse(value)
- if parsed_url.scheme not in ("http", "https"):
- errors.append(
- f"Expected URL scheme http/https for ua:config:{key}."
- f" Found: {value}"
- )
- except (AttributeError, ValueError):
+ if key not in KNOWN_UA_CONFIG_PROPS:
+ LOG.warning(
+ "Not validating unknown ubuntu_advantage.config.%s property",
+ key,
+ )
+ continue
+ elif value is None:
+ # key will be unset. No extra validation needed.
+ continue
+ try:
+ parsed_url = urlparse(value)
+ if parsed_url.scheme not in ("http", "https"):
errors.append(
- f"Expected a URL for ua:config:{key}. Found: {value}"
+ f"Expected URL scheme http/https for ua:config:{key}"
)
+ except (AttributeError, ValueError):
+ errors.append(f"Expected a URL for ua:config:{key}")
if errors:
raise ValueError(
- f"Invalid ubuntu_advantage configuration:{nl}{nl.join(errors)}"
+ "Invalid ubuntu_advantage configuration:\n{}".format(
+ "\n".join(errors)
+ )
)
-def configure_ua(token=None, enable=None, config=None):
- """Call ua commandline client to attach or enable services."""
- error = None
- if not token:
- error = "ubuntu_advantage: token must be provided"
- LOG.error(error)
- raise RuntimeError(error)
-
- if enable is None:
- enable = []
- elif isinstance(enable, str):
- LOG.warning(
- "ubuntu_advantage: enable should be a list, not"
- " a string; treating as a single enable"
- )
- enable = [enable]
- elif not isinstance(enable, list):
- LOG.warning(
- "ubuntu_advantage: enable should be a list, not"
- " a %s; skipping enabling services",
- type(enable).__name__,
- )
- enable = []
-
- if config is None:
- config = dict()
- elif not isinstance(config, dict):
- LOG.warning(
- "ubuntu_advantage: config should be a dict, not"
- " a %s; skipping enabling config parameters",
- type(config).__name__,
+def set_ua_config(ua_config: Any = None):
+ if ua_config is None:
+ return
+ if not isinstance(ua_config, dict):
+ raise RuntimeError(
+ f"ubuntu_advantage: config should be a dict, not"
+ f" a {type(ua_config).__name__};"
+ " skipping enabling config parameters"
)
- config = dict()
+ supplemental_schema_validation(ua_config)
enable_errors = []
-
- # UA Config
- for key, value in sorted(config.items()):
+ for key, value in sorted(ua_config.items()):
+ redacted_key_value = None
+ subp_kwargs: dict = {}
if value is None:
- LOG.debug("Unsetting UA config for %s", key)
+ LOG.debug("Disabling UA config for %s", key)
config_cmd = ["ua", "config", "unset", key]
else:
- LOG.debug("Setting UA config %s=%s", key, value)
+ redacted_key_value = f"{key}=REDACTED"
+ LOG.debug("Enabling UA config %s", redacted_key_value)
if re.search(r"\s", value):
key_value = f"{key}={re.escape(value)}"
else:
key_value = f"{key}={value}"
config_cmd = ["ua", "config", "set", key_value]
-
+ subp_kwargs = {"logstring": config_cmd[:-1] + [redacted_key_value]}
try:
- subp.subp(config_cmd)
+ subp.subp(config_cmd, **subp_kwargs)
except subp.ProcessExecutionError as e:
- enable_errors.append((key, e))
-
+ err_msg = str(e)
+ if redacted_key_value is not None:
+ err_msg = err_msg.replace(value, REDACTED)
+ enable_errors.append((key, err_msg))
if enable_errors:
for param, error in enable_errors:
- LOG.warning('Failure enabling "%s":\n%s', param, error)
+ LOG.warning('Failure enabling/disabling "%s":\n%s', param, error)
raise RuntimeError(
- "Failure enabling Ubuntu Advantage config(s): {}".format(
+ "Failure enabling/disabling Ubuntu Advantage config(s): {}".format(
", ".join('"{}"'.format(param) for param, _ in enable_errors)
)
)
- attach_cmd = ["ua", "attach", token]
- LOG.debug("Attaching to Ubuntu Advantage. %s", " ".join(attach_cmd))
+
+
+def configure_ua(token, enable=None):
+ """Call ua commandline client to attach and/or enable services."""
+ if enable is None:
+ enable = []
+ elif isinstance(enable, str):
+ LOG.warning(
+ "ubuntu_advantage: enable should be a list, not"
+ " a string; treating as a single enable"
+ )
+ enable = [enable]
+ elif not isinstance(enable, list):
+ LOG.warning(
+ "ubuntu_advantage: enable should be a list, not"
+ " a %s; skipping enabling services",
+ type(enable).__name__,
+ )
+ enable = []
+
+ # Perform attach
+ if enable:
+ attach_cmd = ["ua", "attach", "--no-auto-enable", token]
+ else:
+ attach_cmd = ["ua", "attach", token]
+ redacted_cmd = attach_cmd[:-1] + [REDACTED]
+ LOG.debug("Attaching to Ubuntu Advantage. %s", " ".join(redacted_cmd))
try:
- subp.subp(attach_cmd)
+ # Allow `ua attach` to fail in already attached machines
+ subp.subp(attach_cmd, rcs={0, 2}, logstring=redacted_cmd)
except subp.ProcessExecutionError as e:
- msg = "Failure attaching Ubuntu Advantage:\n{error}".format(
- error=str(e)
- )
+ err = str(e).replace(token, REDACTED)
+ msg = f"Failure attaching Ubuntu Advantage:\n{err}"
util.logexc(LOG, msg)
raise RuntimeError(msg) from e
- enable_errors = []
- for service in enable:
- try:
- cmd = ["ua", "enable", "--assume-yes", service]
- subp.subp(cmd, capture=True)
- except subp.ProcessExecutionError as e:
- enable_errors.append((service, e))
+
+ # Enable services
+ if not enable:
+ return
+ cmd = ["ua", "enable", "--assume-yes", "--format", "json", "--"] + enable
+ try:
+ enable_stdout, _ = subp.subp(cmd, capture=True, rcs={0, 1})
+ except subp.ProcessExecutionError as e:
+ raise RuntimeError(
+ "Error while enabling service(s): " + ", ".join(enable)
+ ) from e
+
+ try:
+ enable_resp = json.loads(enable_stdout)
+ except json.JSONDecodeError as e:
+ raise RuntimeError(f"UA response was not json: {enable_stdout}") from e
+
+ # At this point we were able to load the json response from UA. This
+ # response contains a list of errors under the key 'errors'. E.g.
+ #
+ # {
+ # "errors": [
+ # {
+ # "message": "UA Apps: ESM is already enabled ...",
+ # "message_code": "service-already-enabled",
+ # "service": "esm-apps",
+ # "type": "service"
+ # },
+ # {
+ # "message": "Cannot enable unknown service 'asdf' ...",
+ # "message_code": "invalid-service-or-failure",
+ # "service": null,
+ # "type": "system"
+ # }
+ # ]
+ # }
+ #
+ # From our pov there are two type of errors, service and non-service
+ # related. We can distinguish them by checking if `service` is non-null
+ # or null respectively.
+
+ # pylint: disable=import-error
+ from uaclient.messages import ALREADY_ENABLED
+
+ # pylint: enable=import-error
+
+ UA_MC_ALREADY_ENABLED = ALREADY_ENABLED.name
+
+ enable_errors: List[dict] = []
+ for err in enable_resp.get("errors", []):
+ if err["message_code"] == UA_MC_ALREADY_ENABLED:
+ LOG.debug("Service `%s` already enabled.", err["service"])
+ continue
+ enable_errors.append(err)
+
if enable_errors:
- for service, error in enable_errors:
- msg = 'Failure enabling "{service}":\n{error}'.format(
- service=service, error=str(error)
- )
+ error_services: List[str] = []
+ for err in enable_errors:
+ service = err.get("service")
+ if service is not None:
+ error_services.append(service)
+ msg = f'Failure enabling `{service}`: {err["message"]}'
+ else:
+ msg = f'Failure of type `{err["type"]}`: {err["message"]}'
util.logexc(LOG, msg)
+
raise RuntimeError(
- "Failure enabling Ubuntu Advantage service(s): {}".format(
- ", ".join(
- '"{}"'.format(service) for service, _ in enable_errors
- )
- )
+ "Failure enabling Ubuntu Advantage service(s): "
+ + ", ".join(error_services)
)
-def maybe_install_ua_tools(cloud):
+def maybe_install_ua_tools(cloud: Cloud):
"""Install ubuntu-advantage-tools if not present."""
if subp.which("ua"):
return
@@ -247,7 +382,82 @@ def maybe_install_ua_tools(cloud):
raise
-def handle(name, cfg, cloud, log, args):
+def _should_auto_attach(ua_section: dict) -> bool:
+ disable_auto_attach = bool(
+ ua_section.get("features", {}).get("disable_auto_attach", False)
+ )
+ if disable_auto_attach:
+ return False
+
+ # pylint: disable=import-error
+ from uaclient.api.exceptions import UserFacingError
+ from uaclient.api.u.pro.attach.auto.should_auto_attach.v1 import (
+ should_auto_attach,
+ )
+
+ # pylint: enable=import-error
+
+ try:
+ result = should_auto_attach()
+ except UserFacingError as ex:
+ LOG.debug("Error during `should_auto_attach`: %s", ex)
+ LOG.warning(ERROR_MSG_SHOULD_AUTO_ATTACH)
+ return False
+ return result.should_auto_attach
+
+
+def _attach(ua_section: dict):
+ token = ua_section.get("token")
+ if not token:
+ msg = "`ubuntu-advantage.token` required in non-Pro Ubuntu instances."
+ LOG.error(msg)
+ raise RuntimeError(msg)
+ enable_beta = ua_section.get("enable_beta")
+ if enable_beta:
+ LOG.debug(
+ "Ignoring `ubuntu-advantage.enable_beta` services in UA attach:"
+ " %s",
+ ", ".join(enable_beta),
+ )
+ configure_ua(token=token, enable=ua_section.get("enable"))
+
+
+def _auto_attach(ua_section: dict):
+
+ # pylint: disable=import-error
+ from uaclient.api.exceptions import AlreadyAttachedError, UserFacingError
+ from uaclient.api.u.pro.attach.auto.full_auto_attach.v1 import (
+ FullAutoAttachOptions,
+ full_auto_attach,
+ )
+
+ # pylint: enable=import-error
+
+ enable = ua_section.get("enable")
+ enable_beta = ua_section.get("enable_beta")
+ options = FullAutoAttachOptions(
+ enable=enable,
+ enable_beta=enable_beta,
+ )
+ try:
+ full_auto_attach(options=options)
+ except AlreadyAttachedError:
+ if enable_beta is not None or enable is not None:
+ # Only warn if the user defined some service to enable/disable.
+ LOG.warning(
+ "The instance is already attached to Pro. Leaving enabled"
+ " services untouched. Ignoring config directives"
+ " ubuntu_advantage: enable and enable_beta"
+ )
+ except UserFacingError as ex:
+ msg = f"Error during `full_auto_attach`: {ex.msg}"
+ LOG.error(msg)
+ raise RuntimeError(msg) from ex
+
+
+def handle(
+ name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
+) -> None:
ua_section = None
if "ubuntu-advantage" in cfg:
LOG.warning(
@@ -265,6 +475,13 @@ def handle(name, cfg, cloud, log, args):
name,
)
return
+ elif not isinstance(ua_section, dict):
+ msg = (
+ f"'ubuntu_advantage' should be a dict, not a"
+ f" {type(ua_section).__name__}"
+ )
+ LOG.error(msg)
+ raise RuntimeError(msg)
if "commands" in ua_section:
msg = (
'Deprecated configuration "ubuntu-advantage: commands" provided.'
@@ -273,17 +490,26 @@ def handle(name, cfg, cloud, log, args):
LOG.error(msg)
raise RuntimeError(msg)
- config = ua_section.get("config")
-
- if config is not None:
- supplemental_schema_validation(config)
-
maybe_install_ua_tools(cloud)
- configure_ua(
- token=ua_section.get("token"),
- enable=ua_section.get("enable"),
- config=config,
- )
+ set_ua_config(ua_section.get("config"))
+
+ # ua-auto-attach.service had noop-ed as ua_section is not empty
+ validate_schema_features(ua_section)
+ if _should_auto_attach(ua_section):
+ _auto_attach(ua_section)
+
+ # If ua-auto-attach.service did noop, we did not auto-attach and more keys
+ # than `features` are given under `ubuntu_advantage`, then try to attach.
+ # This supports the cases:
+ #
+ # 1) Previous attach behavior on non-pro instances.
+ # 2) Previous attach behavior on instances where ubuntu-advantage-tools
+ # is < v28.0 (UA apis for should_auto-attach and auto-attach are not
+ # available.
+ # 3) The user wants to disable auto-attach and attach by giving:
+ # `{"ubuntu_advantage": "features": {"disable_auto_attach": True}}`
+ elif not ua_section.keys() <= {"features"}:
+ _attach(ua_section)
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_ubuntu_autoinstall.py b/cloudinit/config/cc_ubuntu_autoinstall.py
index 3d79c9ea..3870cf59 100644
--- a/cloudinit/config/cc_ubuntu_autoinstall.py
+++ b/cloudinit/config/cc_ubuntu_autoinstall.py
@@ -3,9 +3,12 @@
"""Autoinstall: Support ubuntu live-server autoinstall syntax."""
import re
+from logging import Logger
from textwrap import dedent
from cloudinit import log as logging
+from cloudinit.cloud import Cloud
+from cloudinit.config import Config
from cloudinit.config.schema import (
MetaSchema,
SchemaProblem,
@@ -72,7 +75,9 @@ __doc__ = get_meta_doc(meta)
LIVE_INSTALLER_SNAPS = ("subiquity", "ubuntu-desktop-installer")
-def handle(name, cfg, cloud, log, _args):
+def handle(
+ name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
+) -> None:
if "autoinstall" not in cfg:
LOG.debug(
diff --git a/cloudinit/config/cc_ubuntu_drivers.py b/cloudinit/config/cc_ubuntu_drivers.py
index 09e7badd..59347e25 100644
--- a/cloudinit/config/cc_ubuntu_drivers.py
+++ b/cloudinit/config/cc_ubuntu_drivers.py
@@ -5,6 +5,9 @@
import os
from textwrap import dedent
+from cloudinit.cloud import Cloud
+from cloudinit.distros import Distro
+
try:
import debconf
@@ -13,8 +16,11 @@ except ImportError:
debconf = None
HAS_DEBCONF = False
+from logging import Logger
+
from cloudinit import log as logging
from cloudinit import subp, temp_utils, type_utils, util
+from cloudinit.config import Config
from cloudinit.config.schema import MetaSchema, get_meta_doc
from cloudinit.settings import PER_INSTANCE
@@ -70,7 +76,7 @@ Description: Late-link NVIDIA kernel modules?
X_LOADTEMPLATEFILE = "X_LOADTEMPLATEFILE"
-def install_drivers(cfg, pkg_install_func):
+def install_drivers(cfg, pkg_install_func, distro: Distro):
if not isinstance(cfg, dict):
raise TypeError(
"'drivers' config expected dict, found '%s': %s"
@@ -106,7 +112,7 @@ def install_drivers(cfg, pkg_install_func):
)
# Register and set debconf selection linux/nvidia/latelink = true
- tdir = temp_utils.mkdtemp(needs_exe=True)
+ tdir = temp_utils.mkdtemp(dir=distro.get_tmp_exec_path(), needs_exe=True)
debconf_file = os.path.join(tdir, "nvidia.template")
try:
util.write_file(debconf_file, NVIDIA_DEBCONF_CONTENT)
@@ -134,7 +140,9 @@ def install_drivers(cfg, pkg_install_func):
raise
-def handle(name, cfg, cloud, log, _args):
+def handle(
+ name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
+) -> None:
if "drivers" not in cfg:
log.debug("Skipping module named %s, no 'drivers' key in config", name)
return
@@ -145,4 +153,6 @@ def handle(name, cfg, cloud, log, _args):
)
return
- install_drivers(cfg["drivers"], cloud.distro.install_packages)
+ install_drivers(
+ cfg["drivers"], cloud.distro.install_packages, cloud.distro
+ )
diff --git a/cloudinit/config/cc_update_etc_hosts.py b/cloudinit/config/cc_update_etc_hosts.py
index 56c52fe4..7bb9dff0 100644
--- a/cloudinit/config/cc_update_etc_hosts.py
+++ b/cloudinit/config/cc_update_etc_hosts.py
@@ -8,9 +8,12 @@
"""Update Etc Hosts: Update the hosts file (usually ``/etc/hosts``)"""
+from logging import Logger
from textwrap import dedent
from cloudinit import templater, util
+from cloudinit.cloud import Cloud
+from cloudinit.config import Config
from cloudinit.config.schema import MetaSchema, get_meta_doc
from cloudinit.settings import PER_ALWAYS
@@ -94,7 +97,9 @@ meta: MetaSchema = {
__doc__ = get_meta_doc(meta)
-def handle(name, cfg, cloud, log, _args):
+def handle(
+ name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
+) -> None:
manage_hosts = util.get_cfg_option_str(cfg, "manage_etc_hosts", False)
hosts_fn = cloud.distro.hosts_fn
diff --git a/cloudinit/config/cc_update_hostname.py b/cloudinit/config/cc_update_hostname.py
index 01d2078f..8a99297f 100644
--- a/cloudinit/config/cc_update_hostname.py
+++ b/cloudinit/config/cc_update_hostname.py
@@ -9,9 +9,12 @@
"""Update Hostname: Update hostname and fqdn"""
import os
+from logging import Logger
from textwrap import dedent
from cloudinit import util
+from cloudinit.cloud import Cloud
+from cloudinit.config import Config
from cloudinit.config.schema import MetaSchema, get_meta_doc
from cloudinit.settings import PER_ALWAYS
@@ -79,7 +82,9 @@ meta: MetaSchema = {
__doc__ = get_meta_doc(meta)
-def handle(name, cfg, cloud, log, _args):
+def handle(
+ name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
+) -> None:
if util.get_cfg_option_bool(cfg, "preserve_hostname", False):
log.debug(
"Configuration option 'preserve_hostname' is set,"
diff --git a/cloudinit/config/cc_users_groups.py b/cloudinit/config/cc_users_groups.py
index 612f172b..c654270e 100644
--- a/cloudinit/config/cc_users_groups.py
+++ b/cloudinit/config/cc_users_groups.py
@@ -6,14 +6,17 @@
"Users and Groups: Configure users and groups"
+from logging import Logger
from textwrap import dedent
from cloudinit import log as logging
-from cloudinit.config.schema import MetaSchema, get_meta_doc
+from cloudinit.cloud import Cloud
# Ensure this is aliased to a name not 'distros'
# since the module attribute 'distros'
# is a list of distros that are supported, not a sub-module
+from cloudinit.config import Config
+from cloudinit.config.schema import MetaSchema, get_meta_doc
from cloudinit.distros import ug_util
from cloudinit.settings import PER_INSTANCE
@@ -158,7 +161,9 @@ NO_HOME = ("no_create_home", "system")
NEED_HOME = ("ssh_authorized_keys", "ssh_import_id", "ssh_redirect_user")
-def handle(name, cfg, cloud, _log, _args):
+def handle(
+ name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
+) -> None:
(users, groups) = ug_util.normalize_users_groups(cfg, cloud.distro)
(default_user, _user_config) = ug_util.extract_default(users)
cloud_keys = cloud.get_public_ssh_keys() or []
diff --git a/cloudinit/config/cc_wireguard.py b/cloudinit/config/cc_wireguard.py
index 366aff40..850c5a4f 100644
--- a/cloudinit/config/cc_wireguard.py
+++ b/cloudinit/config/cc_wireguard.py
@@ -4,11 +4,13 @@
"""Wireguard"""
import re
+from logging import Logger
from textwrap import dedent
from cloudinit import log as logging
from cloudinit import subp, util
from cloudinit.cloud import Cloud
+from cloudinit.config import Config
from cloudinit.config.schema import MetaSchema, get_meta_doc
from cloudinit.settings import PER_INSTANCE
@@ -256,7 +258,9 @@ def load_wireguard_kernel_module():
raise
-def handle(name: str, cfg: dict, cloud: Cloud, log, args: list):
+def handle(
+ name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
+) -> None:
wg_section = None
if "wireguard" in cfg:
diff --git a/cloudinit/config/cc_write_files.py b/cloudinit/config/cc_write_files.py
index a020fac4..f7d89935 100644
--- a/cloudinit/config/cc_write_files.py
+++ b/cloudinit/config/cc_write_files.py
@@ -8,11 +8,13 @@
import base64
import os
+from logging import Logger
from textwrap import dedent
from cloudinit import log as logging
from cloudinit import util
from cloudinit.cloud import Cloud
+from cloudinit.config import Config
from cloudinit.config.schema import MetaSchema, get_meta_doc
from cloudinit.settings import PER_INSTANCE
@@ -114,7 +116,9 @@ meta: MetaSchema = {
__doc__ = get_meta_doc(meta)
-def handle(name, cfg, cloud: Cloud, log, _args):
+def handle(
+ name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
+) -> None:
file_list = cfg.get("write_files", [])
filtered_files = [
f
diff --git a/cloudinit/config/cc_write_files_deferred.py b/cloudinit/config/cc_write_files_deferred.py
index 7cf5d593..a196ffb9 100644
--- a/cloudinit/config/cc_write_files_deferred.py
+++ b/cloudinit/config/cc_write_files_deferred.py
@@ -4,8 +4,11 @@
"""Write Files Deferred: Defer writing certain files"""
+from logging import Logger
+
from cloudinit import util
from cloudinit.cloud import Cloud
+from cloudinit.config import Config
from cloudinit.config.cc_write_files import DEFAULT_DEFER, write_files
from cloudinit.config.schema import MetaSchema
from cloudinit.distros import ALL_DISTROS
@@ -35,7 +38,9 @@ meta: MetaSchema = {
__doc__ = ""
-def handle(name, cfg, cloud: Cloud, log, _args):
+def handle(
+ name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
+) -> None:
file_list = cfg.get("write_files", [])
filtered_files = [
f
diff --git a/cloudinit/config/cc_yum_add_repo.py b/cloudinit/config/cc_yum_add_repo.py
index 0e683de2..cf81b844 100644
--- a/cloudinit/config/cc_yum_add_repo.py
+++ b/cloudinit/config/cc_yum_add_repo.py
@@ -9,9 +9,12 @@
import io
import os
from configparser import ConfigParser
+from logging import Logger
from textwrap import dedent
from cloudinit import util
+from cloudinit.cloud import Cloud
+from cloudinit.config import Config
from cloudinit.config.schema import MetaSchema, get_meta_doc
from cloudinit.settings import PER_INSTANCE
@@ -28,6 +31,7 @@ distros = [
"cloudlinux",
"eurolinux",
"fedora",
+ "mariner",
"openEuler",
"openmandriva",
"photon",
@@ -163,7 +167,9 @@ def _format_repository_config(repo_id, repo_config):
return "".join(lines)
-def handle(name, cfg, _cloud, log, _args):
+def handle(
+ name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
+) -> None:
repos = cfg.get("yum_repos")
if not repos:
log.debug(
diff --git a/cloudinit/config/cc_zypper_add_repo.py b/cloudinit/config/cc_zypper_add_repo.py
index 02867b8f..64e50577 100644
--- a/cloudinit/config/cc_zypper_add_repo.py
+++ b/cloudinit/config/cc_zypper_add_repo.py
@@ -6,12 +6,15 @@
"""zypper_add_repo: Add zypper repositories to the system"""
import os
+from logging import Logger
from textwrap import dedent
import configobj
from cloudinit import log as logging
from cloudinit import util
+from cloudinit.cloud import Cloud
+from cloudinit.config import Config
from cloudinit.config.schema import MetaSchema, get_meta_doc
from cloudinit.settings import PER_ALWAYS
@@ -177,7 +180,9 @@ def _write_zypp_config(zypper_config):
util.write_file(zypp_config, new_config)
-def handle(name, cfg, _cloud, log, _args):
+def handle(
+ name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
+) -> None:
zypper_section = cfg.get("zypper")
if not zypper_section:
LOG.debug(
diff --git a/cloudinit/config/modules.py b/cloudinit/config/modules.py
index 970343cd..6716fc32 100644
--- a/cloudinit/config/modules.py
+++ b/cloudinit/config/modules.py
@@ -8,7 +8,7 @@
import copy
from types import ModuleType
-from typing import Dict, List, NamedTuple
+from typing import Dict, List, NamedTuple, Optional
from cloudinit import config, importer
from cloudinit import log as logging
@@ -80,12 +80,12 @@ def _is_active(module_details: ModuleDetails, cfg: dict) -> bool:
return True
-class Modules(object):
+class Modules:
def __init__(self, init: Init, cfg_files=None, reporter=None):
self.init = init
self.cfg_files = cfg_files
# Created on first use
- self._cached_cfg = None
+ self._cached_cfg: Optional[config.Config] = None
if reporter is None:
reporter = ReportEventStack(
name="module-reporter",
@@ -95,7 +95,7 @@ class Modules(object):
self.reporter = reporter
@property
- def cfg(self):
+ def cfg(self) -> config.Config:
# None check to avoid empty case causing re-reading
if self._cached_cfg is None:
merger = ConfigMerger(
diff --git a/cloudinit/config/schemas/schema-cloud-config-v1.json b/cloudinit/config/schemas/schema-cloud-config-v1.json
index b7124cb7..a91dc482 100644
--- a/cloudinit/config/schemas/schema-cloud-config-v1.json
+++ b/cloudinit/config/schemas/schema-cloud-config-v1.json
@@ -1,13 +1,121 @@
{
"$schema": "http://json-schema.org/draft-04/schema#",
"$defs": {
+ "all_modules": {
+ "enum": [
+ "ansible",
+ "apk-configure",
+ "apk_configure",
+ "apt-configure",
+ "apt_configure",
+ "apt-pipelining",
+ "apt_pipelining",
+ "bootcmd",
+ "byobu",
+ "ca-certs",
+ "ca_certs",
+ "chef",
+ "disable-ec2-metadata",
+ "disable_ec2_metadata",
+ "disk-setup",
+ "disk_setup",
+ "fan",
+ "final-message",
+ "final_message",
+ "growpart",
+ "grub-dpkg",
+ "grub_dpkg",
+ "install-hotplug",
+ "install_hotplug",
+ "keyboard",
+ "keys-to-console",
+ "keys_to_console",
+ "landscape",
+ "locale",
+ "lxd",
+ "mcollective",
+ "migrator",
+ "mounts",
+ "ntp",
+ "package-update-upgrade-install",
+ "package_update_upgrade_install",
+ "phone-home",
+ "phone_home",
+ "power-state-change",
+ "power_state_change",
+ "puppet",
+ "refresh-rmc-and-interface",
+ "refresh_rmc_and_interface",
+ "reset-rmc",
+ "reset_rmc",
+ "resizefs",
+ "resolv-conf",
+ "resolv_conf",
+ "rh-subscription",
+ "rh_subscription",
+ "rightscale-userdata",
+ "rightscale_userdata",
+ "rsyslog",
+ "runcmd",
+ "salt-minion",
+ "salt_minion",
+ "scripts-per-boot",
+ "scripts_per_boot",
+ "scripts-per-instance",
+ "scripts_per_instance",
+ "scripts-per-once",
+ "scripts_per_once",
+ "scripts-user",
+ "scripts_user",
+ "scripts-vendor",
+ "scripts_vendor",
+ "seed-random",
+ "seed_random",
+ "set-hostname",
+ "set_hostname",
+ "set-passwords",
+ "set_passwords",
+ "snap",
+ "spacewalk",
+ "ssh",
+ "ssh-authkey-fingerprints",
+ "ssh_authkey_fingerprints",
+ "ssh-import-id",
+ "ssh_import_id",
+ "timezone",
+ "ubuntu-advantage",
+ "ubuntu_advantage",
+ "ubuntu-autoinstall",
+ "ubuntu_autoinstall",
+ "ubuntu-drivers",
+ "ubuntu_drivers",
+ "update-etc-hosts",
+ "update_etc_hosts",
+ "update-hostname",
+ "update_hostname",
+ "users-groups",
+ "users_groups",
+ "wireguard",
+ "write-files",
+ "write_files",
+ "write-files-deferred",
+ "write_files_deferred",
+ "yum-add-repo",
+ "yum_add_repo",
+ "zypper-add-repo",
+ "zypper_add_repo"
+ ]
+ },
"users_groups.groups_by_groupname": {
"additionalProperties": false,
"patternProperties": {
"^.+$": {
"label": "<group_name>",
"description": "Optional string of single username or a list of usernames to add to the group",
- "type": ["string", "array"],
+ "type": [
+ "string",
+ "array"
+ ],
"items": {
"type": "string"
},
@@ -17,8 +125,16 @@
},
"users_groups.user": {
"oneOf": [
- {"required": ["name"]},
- {"required": ["snapuser"]}
+ {
+ "required": [
+ "name"
+ ]
+ },
+ {
+ "required": [
+ "snapuser"
+ ]
+ }
],
"additionalProperties": false,
"properties": {
@@ -39,11 +155,15 @@
"groups": {
"description": "Optional comma-separated string of groups to add the user to.",
"oneOf": [
- {"type": "string"},
+ {
+ "type": "string"
+ },
{
"type": "array",
"items": {
- "type": ["string"]
+ "type": [
+ "string"
+ ]
},
"minItems": 1
},
@@ -54,11 +174,15 @@
"label": "<group_name>",
"description": "When providing an object for users.groups the ``<group_name>`` keys are the groups to add this user to",
"deprecated": true,
- "type": ["null"],
+ "type": [
+ "null"
+ ],
"minItems": 1
}
},
- "hidden": ["patternProperties"]
+ "hidden": [
+ "patternProperties"
+ ]
}
]
},
@@ -134,13 +258,17 @@
"ssh_authorized_keys": {
"description": "List of SSH keys to add to user's authkeys file. Can not be combined with ``ssh_redirect_user``",
"type": "array",
- "items": {"type": "string"},
+ "items": {
+ "type": "string"
+ },
"minItems": 1
},
"ssh_import_id": {
"description": "List of SSH IDs to import for user. Can not be combined with ``ssh_redirect_user``.",
"type": "array",
- "items": {"type": "string"},
+ "items": {
+ "type": "string"
+ },
"minItems": 1
},
"ssh_redirect_user": {
@@ -156,7 +284,10 @@
"sudo": {
"oneOf": [
{
- "type": ["string", "null"],
+ "type": [
+ "string",
+ "null"
+ ],
"description": "Sudo rule to use or false. Absence of a sudo value or ``null`` will result in no sudo rules added for this user."
},
{
@@ -169,7 +300,9 @@
"uid": {
"description": "The user's ID. Default is next available value.",
"oneOf": [
- {"type": "integer"},
+ {
+ "type": "integer"
+ },
{
"type": "string",
"description": "The use of ``string`` type will be dropped after April 2027. Use an ``integer`` instead.",
@@ -183,26 +316,42 @@
"type": "array",
"items": {
"type": "object",
- "required": ["arches"],
+ "required": [
+ "arches"
+ ],
"additionalProperties": false,
"properties": {
"arches": {
"type": "array",
- "items": {"type": "string"},
+ "items": {
+ "type": "string"
+ },
"minItems": 1
},
- "uri": {"type": "string", "format": "uri"},
+ "uri": {
+ "type": "string",
+ "format": "uri"
+ },
"search": {
"type": "array",
- "items": {"type": "string", "format": "uri"},
+ "items": {
+ "type": "string",
+ "format": "uri"
+ },
"minItems": 1
},
"search_dns": {
"type": "boolean"
},
- "keyid": {"type": "string"},
- "key": {"type": "string"},
- "keyserver": {"type": "string"}
+ "keyid": {
+ "type": "string"
+ },
+ "key": {
+ "type": "string"
+ },
+ "keyserver": {
+ "type": "string"
+ }
}
},
"minItems": 1
@@ -225,12 +374,55 @@
"trusted": {
"description": "List of trusted CA certificates to add.",
"type": "array",
- "items": {"type": "string"},
+ "items": {
+ "type": "string"
+ },
"minItems": 1
}
},
"minProperties": 1
},
+ "modules_definition": {
+ "type": "array",
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/$defs/all_modules"
+ },
+ {
+ "type": "array",
+ "prefixItems": [
+ {
+ "enum": {
+ "$ref": "#/$defs/all_modules"
+ }
+ },
+ {
+ "enum": [
+ "always",
+ "once",
+ "once-per-instance"
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "base_config": {
+ "type": "object",
+ "properties": {
+ "cloud_init_modules": {
+ "$ref": "#/$defs/modules_definition"
+ },
+ "cloud_config_modules": {
+ "$ref": "#/$defs/modules_definition"
+ },
+ "cloud_final_modules": {
+ "$ref": "#/$defs/modules_definition"
+ }
+ }
+ },
"cc_ubuntu_autoinstall": {
"type": "object",
"properties": {
@@ -242,7 +434,9 @@
"type": "integer"
}
},
- "required": ["version"]
+ "required": [
+ "version"
+ ]
}
},
"additionalProperties": true
@@ -254,7 +448,7 @@
"type": "object",
"additionalProperties": false,
"properties": {
- "install-method": {
+ "install_method": {
"type": "string",
"default": "distro",
"enum": [
@@ -263,16 +457,164 @@
],
"description": "The type of installation for ansible. It can be one of the following values:\n\n - ``distro``\n - ``pip``"
},
- "package-name": {
+ "run_user": {
+ "type": "string",
+ "description": "User to run module commands as. If install_method: pip, the pip install runs as this user as well."
+ },
+ "ansible_config": {
+ "description": "Sets the ANSIBLE_CONFIG environment variable. If set, overrides default config.",
+ "type": "string"
+ },
+ "setup_controller": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "repositories": {
+ "type": "array",
+ "items": {
+ "required": [
+ "path",
+ "source"
+ ],
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "path": {
+ "type": "string"
+ },
+ "source": {
+ "type": "string"
+ }
+ }
+ }
+ },
+ "run_ansible": {
+ "type": "array",
+ "items": {
+ "properties": {
+ "playbook_name": {
+ "type": "string"
+ },
+ "playbook_dir": {
+ "type": "string"
+ },
+ "become_password_file": {
+ "type": "string"
+ },
+ "connection_password_file": {
+ "type": "string"
+ },
+ "list_hosts": {
+ "type": "boolean",
+ "default": false
+ },
+ "syntax_check": {
+ "type": "boolean",
+ "default": false
+ },
+ "timeout": {
+ "type": "number",
+ "minimum": 0
+ },
+ "vault_id": {
+ "type": "string"
+ },
+ "vault_password_file": {
+ "type": "string"
+ },
+ "background": {
+ "type": "number",
+ "minimum": 0
+ },
+ "check": {
+ "type": "boolean",
+ "default": false
+ },
+ "diff": {
+ "type": "boolean",
+ "default": false
+ },
+ "module_path": {
+ "type": "string"
+ },
+ "poll": {
+ "type": "number",
+ "minimum": 0
+ },
+ "args": {
+ "type": "string"
+ },
+ "extra_vars": {
+ "type": "string"
+ },
+ "forks": {
+ "type": "number",
+ "minimum": 0
+ },
+ "inventory": {
+ "type": "string"
+ },
+ "scp_extra_args": {
+ "type": "string"
+ },
+ "sftp_extra_args": {
+ "type": "string"
+ },
+ "private_key": {
+ "type": "string"
+ },
+ "connection": {
+ "type": "string"
+ },
+ "module_name": {
+ "type": "string"
+ },
+ "sleep": {
+ "type": "string"
+ },
+ "tags": {
+ "type": "string"
+ },
+ "skip_tags": {
+ "type": "string"
+ }
+ }
+ }
+ }
+ }
+ },
+ "galaxy": {
+ "required": [
+ "actions"
+ ],
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "actions": {
+ "type": "array",
+ "items": {
+ "type": "array",
+ "items": {
+ "type": "string",
+ "pattern": "^.*$"
+ }
+ }
+ }
+ }
+ },
+ "package_name": {
"type": "string",
"default": "ansible"
},
"pull": {
- "required": ["url", "playbook-name"],
+ "required": [
+ "url",
+ "playbook_name"
+ ],
"type": "object",
"additionalProperties": false,
"properties": {
- "accept-host-key": {
+ "accept_host_key": {
"type": "boolean",
"default": false
},
@@ -288,22 +630,22 @@
"type": "boolean",
"default": false
},
- "ssh-common-args": {
+ "ssh_common_args": {
"type": "string"
},
- "scp-extra-args": {
+ "scp_extra_args": {
"type": "string"
},
- "sftp-extra-args": {
+ "sftp_extra_args": {
"type": "string"
},
- "private-key": {
+ "private_key": {
"type": "string"
},
"checkout": {
"type": "string"
},
- "module-path": {
+ "module_path": {
"type": "string"
},
"timeout": {
@@ -315,13 +657,13 @@
"connection": {
"type": "string"
},
- "vault-id": {
+ "vault_id": {
"type": "string"
},
- "vault-password-file": {
+ "vault_password_file": {
"type": "string"
},
- "module-name": {
+ "module_name": {
"type": "string"
},
"sleep": {
@@ -330,10 +672,10 @@
"tags": {
"type": "string"
},
- "skip-tags": {
+ "skip_tags": {
"type": "string"
},
- "playbook-name": {
+ "playbook_name": {
"type": "string"
}
}
@@ -356,7 +698,10 @@
"description": "By default, cloud-init will generate a new repositories file ``/etc/apk/repositories`` based on any valid configuration settings specified within a apk_repos section of cloud config. To disable this behavior and preserve the repositories file from the pristine image, set ``preserve_repositories`` to ``true``.\n\n The ``preserve_repositories`` option overrides all other config keys that would alter ``/etc/apk/repositories``."
},
"alpine_repo": {
- "type": ["object", "null"],
+ "type": [
+ "object",
+ "null"
+ ],
"additionalProperties": false,
"properties": {
"base_url": {
@@ -379,12 +724,14 @@
"description": "The Alpine version to use (e.g. ``v3.12`` or ``edge``)"
}
},
- "required": ["version"],
+ "required": [
+ "version"
+ ],
"minProperties": 1
},
"local_repo_base_url": {
"type": "string",
- "description": "The base URL of an Alpine repository containing unofficial packages"
+ "description": "The base URL of an Alpine repository containing unofficial packages"
}
}
}
@@ -404,7 +751,9 @@
},
"disable_suites": {
"type": "array",
- "items": {"type": "string"},
+ "items": {
+ "type": "string"
+ },
"minItems": 1,
"uniqueItems": true,
"description": "Entries in the sources list can be disabled using ``disable_suites``, which takes a list of suites to be disabled. If the string ``$RELEASE`` is present in a suite in the ``disable_suites`` list, it will be replaced with the release name. If a suite specified in ``disable_suites`` is not present in ``sources.list`` it will be ignored. For convenience, several aliases are provided for`` disable_suites``:\n\n - ``updates`` => ``$RELEASE-updates``\n - ``backports`` => ``$RELEASE-backports``\n - ``security`` => ``$RELEASE-security``\n - ``proposed`` => ``$RELEASE-proposed``\n - ``release`` => ``$RELEASE``.\n\nWhen a suite is disabled using ``disable_suites``, its entry in ``sources.list`` is not deleted; it is just commented out."
@@ -412,7 +761,7 @@
"primary": {
"$ref": "#/$defs/apt_configure.mirror",
"description": "The primary and security archive mirrors can be specified using the ``primary`` and ``security`` keys, respectively. Both the ``primary`` and ``security`` keys take a list of configs, allowing mirrors to be specified on a per-architecture basis. Each config is a dictionary which must have an entry for ``arches``, specifying which architectures that config entry is for. The keyword ``default`` applies to any architecture not explicitly listed. The mirror url can be specified with the ``uri`` key, or a list of mirrors to check can be provided in order, with the first mirror that can be resolved being selected. This allows the same configuration to be used in different environment, with different hosts used for a local APT mirror. If no mirror is provided by ``uri`` or ``search``, ``search_dns`` may be used to search for dns names in the format ``<distro>-mirror`` in each of the following:\n\n - fqdn of this host per cloud metadata,\n - localdomain,\n - domains listed in ``/etc/resolv.conf``.\n\nIf there is a dns entry for ``<distro>-mirror``, then it is assumed that there is a distro mirror at ``http://<distro>-mirror.<domain>/<distro>``. If the ``primary`` key is defined, but not the ``security`` key, then then configuration for ``primary`` is also used for ``security``. If ``search_dns`` is used for the ``security`` key, the search pattern will be ``<distro>-security-mirror``.\n\nEach mirror may also specify a key to import via any of the following optional keys:\n\n - ``keyid``: a key to import via shortid or fingerprint.\n - ``key``: a raw PGP key.\n - ``keyserver``: alternate keyserver to pull ``keyid`` key from.\n\nIf no mirrors are specified, or all lookups fail, then default mirrors defined in the datasource are used. If none are present in the datasource either the following defaults are used:\n\n - ``primary`` => ``http://archive.ubuntu.com/ubuntu``.\n - ``security`` => ``http://security.ubuntu.com/ubuntu``"
- },
+ },
"security": {
"$ref": "#/$defs/apt_configure.mirror",
"description": "Please refer to the primary config documentation"
@@ -465,26 +814,30 @@
"type": "object",
"additionalProperties": false,
"properties": {
- "source": {
- "type": "string"
- },
- "keyid": {
- "type": "string"
- },
- "key": {
- "type": "string"
- },
- "keyserver": {
- "type": "string"
- },
- "filename": {
- "type": "string"
- }
+ "source": {
+ "type": "string"
+ },
+ "keyid": {
+ "type": "string"
+ },
+ "key": {
+ "type": "string"
+ },
+ "keyserver": {
+ "type": "string"
+ },
+ "filename": {
+ "type": "string"
+ },
+ "append": {
+ "type": "boolean",
+ "default": true
+ }
},
"minProperties": 1
}
},
- "description": "Source list entries can be specified as a dictionary under the ``sources`` config key, with each key in the dict representing a different source file. The key of each source entry will be used as an id that can be referenced in other config entries, as well as the filename for the source's configuration under ``/etc/apt/sources.list.d``. If the name does not end with ``.list``, it will be appended. If there is no configuration for a key in ``sources``, no file will be written, but the key may still be referred to as an id in other ``sources`` entries.\n\nEach entry under ``sources`` is a dictionary which may contain any of the following optional keys:\n - ``source``: a sources.list entry (some variable replacements apply).\n - ``keyid``: a key to import via shortid or fingerprint.\n - ``key``: a raw PGP key.\n - ``keyserver``: alternate keyserver to pull ``keyid`` key from.\n - ``filename``: specify the name of the list file\n\nThe ``source`` key supports variable replacements for the following strings:\n\n - ``$MIRROR``\n - ``$PRIMARY``\n - ``$SECURITY``\n - ``$RELEASE``\n - ``$KEY_FILE``"
+ "description": "Source list entries can be specified as a dictionary under the ``sources`` config key, with each key in the dict representing a different source file. The key of each source entry will be used as an id that can be referenced in other config entries, as well as the filename for the source's configuration under ``/etc/apt/sources.list.d``. If the name does not end with ``.list``, it will be appended. If there is no configuration for a key in ``sources``, no file will be written, but the key may still be referred to as an id in other ``sources`` entries.\n\nEach entry under ``sources`` is a dictionary which may contain any of the following optional keys:\n - ``source``: a sources.list entry (some variable replacements apply).\n - ``keyid``: a key to import via shortid or fingerprint.\n - ``key``: a raw PGP key.\n - ``keyserver``: alternate keyserver to pull ``keyid`` key from.\n - ``filename``: specify the name of the list file.\n - ``append``: If ``true``, append to sources file, otherwise overwrite it. Default: ``true``.\n\nThe ``source`` key supports variable replacements for the following strings:\n\n - ``$MIRROR``\n - ``$PRIMARY``\n - ``$SECURITY``\n - ``$RELEASE``\n - ``$KEY_FILE``"
}
}
}
@@ -495,9 +848,20 @@
"properties": {
"apt_pipelining": {
"oneOf": [
- {"type": "integer"},
- {"type": "boolean"},
- {"type": "string", "enum": ["none", "unchanged", "os"]}
+ {
+ "type": "integer"
+ },
+ {
+ "type": "boolean"
+ },
+ {
+ "type": "string",
+ "enum": [
+ "none",
+ "unchanged",
+ "os"
+ ]
+ }
]
}
}
@@ -509,8 +873,15 @@
"type": "array",
"items": {
"oneOf": [
- {"type": "array", "items": {"type": "string"}},
- {"type": "string"}
+ {
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ {
+ "type": "string"
+ }
]
},
"additionalItems": false,
@@ -540,16 +911,18 @@
"type": "object",
"properties": {
"ca_certs": {
- "$ref": "#/$defs/ca_certs.properties"
+ "$ref": "#/$defs/ca_certs.properties"
},
"ca-certs": {
- "allOf": [
- {"$ref": "#/$defs/ca_certs.properties"},
- {
- "deprecated": true,
- "description": "Dropped after April 2027. Use ``ca_certs``."
- }
- ]
+ "allOf": [
+ {
+ "$ref": "#/$defs/ca_certs.properties"
+ },
+ {
+ "deprecated": true,
+ "description": "Dropped after April 2027. Use ``ca_certs``."
+ }
+ ]
}
}
},
@@ -563,7 +936,9 @@
"properties": {
"directories": {
"type": "array",
- "items": {"type": "string"},
+ "items": {
+ "type": "string"
+ },
"minItems": 1,
"uniqueItems": true,
"description": "Create the necessary directories for chef to run. By default, it creates the following directories:\n\n - ``/etc/chef``\n - ``/var/log/chef``\n - ``/var/lib/chef``\n - ``/var/cache/chef``\n - ``/var/backups/chef``\n - ``/var/run/chef``"
@@ -675,7 +1050,9 @@
},
"initial_attributes": {
"type": "object",
- "items": {"type": "string"},
+ "items": {
+ "type": "string"
+ },
"description": "Specify a list of initial attributes used by the cookbooks."
},
"install_type": {
@@ -690,7 +1067,9 @@
},
"run_list": {
"type": "array",
- "items": {"type": "string"},
+ "items": {
+ "type": "string"
+ },
"description": "A run list for a first boot json."
},
"chef_license": {
@@ -737,22 +1116,36 @@
"table_type": {
"type": "string",
"default": "mbr",
- "enum": ["mbr", "gpt"],
+ "enum": [
+ "mbr",
+ "gpt"
+ ],
"description": "Specifies the partition table type, either ``mbr`` or ``gpt``. Default: ``mbr``."
},
"layout": {
"default": false,
"oneOf": [
- {"type": "string", "enum": ["remove"]},
- {"type": "boolean"},
+ {
+ "type": "string",
+ "enum": [
+ "remove"
+ ]
+ },
+ {
+ "type": "boolean"
+ },
{
"type": "array",
"items": {
"oneOf": [
- {"type": "integer"},
+ {
+ "type": "integer"
+ },
{
"type": "array",
- "items": {"type": "integer"},
+ "items": {
+ "type": "integer"
+ },
"minItems": 2,
"maxItems": 2
}
@@ -760,7 +1153,7 @@
}
}
],
- "description": "If set to ``true``, a single partition using all the space on the device will be created. If set to ``false``, no partitions will be created. If set to ``remove``, any existing partition table will be purged. Partitions can be specified by providing a list to ``layout``, where each entry in the list is either a size or a list containing a size and the numerical value for a partition type. The size for partitions is specified in **percentage** of disk space, not in bytes (e.g. a size of 33 would take up 1/3 of the disk space). Default: ``false``."
+ "description": "If set to ``true``, a single partition using all the space on the device will be created. If set to ``false``, no partitions will be created. If set to ``remove``, any existing partition table will be purged. Partitions can be specified by providing a list to ``layout``, where each entry in the list is either a size or a list containing a size and the numerical value for a partition type. The size for partitions is specified in **percentage** of disk space, not in bytes (e.g. a size of 33 would take up 1/3 of the disk space). The partition type defaults to '83' (Linux partition), for other types of partition, such as Linux swap, the type must be passed as part of a list along with the size. Default: ``false``."
},
"overwrite": {
"type": "boolean",
@@ -790,11 +1183,18 @@
"description": "Specified either as a path or as an alias in the format ``<alias name>.<y>`` where ``<y>`` denotes the partition number on the device. If specifying device using the ``<device name>.<partition number>`` format, the value of ``partition`` will be overwritten."
},
"partition": {
- "type": ["string", "integer"],
+ "type": [
+ "string",
+ "integer"
+ ],
"oneOf": [
{
"type": "string",
- "enum": ["auto", "any", "none"]
+ "enum": [
+ "auto",
+ "any",
+ "none"
+ ]
}
],
"description": "The partition can be specified by setting ``partition`` to the desired partition number. The ``partition`` option may also be set to ``auto``, in which this module will search for the existence of a filesystem matching the ``label``, ``type`` and ``device`` of the ``fs_setup`` entry and will skip creating the filesystem if one is found. The ``partition`` option may also be set to ``any``, in which case any file system that matches ``type`` and ``device`` will cause this module to skip filesystem creation for the ``fs_setup`` entry, regardless of ``label`` matching or not. To write a filesystem directly to a device, use ``partition: none``. ``partition: none`` will **always** write the filesystem, even when the ``label`` and ``filesystem`` are matched, and ``overwrite`` is ``false``."
@@ -808,13 +1208,23 @@
"description": "Ignored unless ``partition`` is ``auto`` or ``any``. Default ``false``."
},
"extra_opts": {
- "type": ["array", "string"],
- "items": {"type": "string"},
+ "type": [
+ "array",
+ "string"
+ ],
+ "items": {
+ "type": "string"
+ },
"description": "Optional options to pass to the filesystem creation command. Ignored if you using ``cmd`` directly."
},
"cmd": {
- "type": ["array", "string"],
- "items": {"type": "string"},
+ "type": [
+ "array",
+ "string"
+ ],
+ "items": {
+ "type": "string"
+ },
"description": "Optional command to run to create the filesystem. Can include string substitutions of the other ``fs_setup`` config keys. This is only necessary if you need to override the default command."
}
}
@@ -827,7 +1237,9 @@
"properties": {
"fan": {
"type": "object",
- "required": ["config"],
+ "required": [
+ "config"
+ ],
"additionalProperties": false,
"properties": {
"config": {
@@ -864,10 +1276,17 @@
"description": "The utility to use for resizing. Default: ``auto``\n\nPossible options:\n\n* ``auto`` - Use any available utility\n\n* ``growpart`` - Use growpart utility\n\n* ``gpart`` - Use BSD gpart utility\n\n* ``off`` - Take no action.",
"oneOf": [
{
- "enum": ["auto", "growpart", "gpart", "off"]
+ "enum": [
+ "auto",
+ "growpart",
+ "gpart",
+ "off"
+ ]
},
{
- "enum": [false],
+ "enum": [
+ false
+ ],
"description": "Specifying a boolean ``false`` value for this key is deprecated. Use ``off`` instead.",
"deprecated": true
}
@@ -875,7 +1294,9 @@
},
"devices": {
"type": "array",
- "default": ["/"],
+ "default": [
+ "/"
+ ],
"items": {
"type": "string"
},
@@ -937,7 +1358,9 @@
"properties": {
"network": {
"type": "object",
- "required": ["when"],
+ "required": [
+ "when"
+ ],
"additionalProperties": false,
"properties": {
"when": {
@@ -983,7 +1406,9 @@
"description": "Optional. Keyboard options. Corresponds to XKBOPTIONS."
}
},
- "required": ["layout"]
+ "required": [
+ "layout"
+ ]
}
}
},
@@ -999,20 +1424,28 @@
"default": true,
"description": "Set false to avoid printing SSH keys to system console. Default: ``true``."
}
- },
- "required": ["emit_keys_to_console"]
+ },
+ "required": [
+ "emit_keys_to_console"
+ ]
},
"ssh_key_console_blacklist": {
"type": "array",
- "default": ["ssh-dss"],
+ "default": [
+ "ssh-dss"
+ ],
"description": "Avoid printing matching SSH key types to the system console.",
- "items": {"type": "string"},
+ "items": {
+ "type": "string"
+ },
"uniqueItems": true
},
"ssh_fp_console_blacklist": {
"type": "array",
"description": "Avoid printing matching SSH fingerprints to the system console.",
- "items": {"type": "string"},
+ "items": {
+ "type": "string"
+ },
"uniqueItems": true
}
}
@@ -1022,7 +1455,9 @@
"properties": {
"landscape": {
"type": "object",
- "required": ["client"],
+ "required": [
+ "client"
+ ],
"additionalProperties": false,
"properties": {
"client": {
@@ -1042,12 +1477,18 @@
"data_path": {
"type": "string",
"default": "/var/lib/landscape/client",
- "description": "The directory to store data files in. Default: ``/var/lib/land‐scape/client/``."
+ "description": "The directory to store data files in. Default: ``/var/lib/land\u2010scape/client/``."
},
"log_level": {
"type": "string",
"default": "info",
- "enum": ["debug", "info", "warning", "error", "critical"],
+ "enum": [
+ "debug",
+ "info",
+ "warning",
+ "error",
+ "critical"
+ ],
"description": "The log level for the client. Default: ``info``."
},
"computer_title": {
@@ -1104,6 +1545,7 @@
"init": {
"type": "object",
"additionalProperties": false,
+ "description": "LXD init configuration values to provide to `lxd init --auto` command. Can not be combined with ``lxd.preseed``.",
"properties": {
"network_address": {
"type": "string",
@@ -1115,7 +1557,12 @@
},
"storage_backend": {
"type": "string",
- "enum": ["zfs", "dir", "lvm", "btrfs"],
+ "enum": [
+ "zfs",
+ "dir",
+ "lvm",
+ "btrfs"
+ ],
"default": "dir",
"description": "Storage backend to use. Default: ``dir``."
},
@@ -1139,13 +1586,20 @@
},
"bridge": {
"type": "object",
- "required": ["mode"],
+ "required": [
+ "mode"
+ ],
"additionalProperties": false,
+ "description": "LXD bridge configuration provided to setup the host lxd bridge. Can not be combined with ``lxd.preseed``.",
"properties": {
"mode": {
"type": "string",
"description": "Whether to setup LXD bridge, use an existing bridge by ``name`` or create a new bridge. `none` will avoid bridge setup, `existing` will configure lxd to use the bring matching ``name`` and `new` will create a new bridge.",
- "enum": ["none", "existing", "new"]
+ "enum": [
+ "none",
+ "existing",
+ "new"
+ ]
},
"name": {
"type": "string",
@@ -1201,6 +1655,10 @@
"description": "Domain to advertise to DHCP clients and use for DNS resolution."
}
}
+ },
+ "preseed": {
+ "type": "string",
+ "description": "Opaque LXD preseed YAML config passed via stdin to the command: lxd init --preseed. See: https://linuxcontainers.org/lxd/docs/master/preseed/ or lxd init --dump for viable config. Can not be combined with either ``lxd.init`` or ``lxd.bridge``."
}
}
}
@@ -1230,9 +1688,15 @@
"^.+$": {
"description": "Optional config key: value pairs which will be appended to ``/etc/mcollective/server.cfg``.",
"oneOf": [
- {"type": "boolean"},
- {"type": "integer"},
- {"type": "string"}
+ {
+ "type": "boolean"
+ },
+ {
+ "type": "integer"
+ },
+ {
+ "type": "string"
+ }
]
}
}
@@ -1257,10 +1721,12 @@
"mounts": {
"type": "array",
"items": {
- "type": "array",
- "items": {"type": "string"},
- "minItems": 1,
- "maxItems": 6
+ "type": "array",
+ "items": {
+ "type": "string"
+ },
+ "minItems": 1,
+ "maxItems": 6
},
"description": "List of lists. Each inner list entry is a list of ``/etc/fstab`` mount declarations of the format: [ fs_spec, fs_file, fs_vfstype, fs_mntops, fs-freq, fs_passno ]. A mount declaration with less than 6 items will get remaining values from ``mount_default_fields``. A mount declaration with only `fs_spec` and no `fs_file` mountpoint will be skipped.",
"minItems": 1
@@ -1268,11 +1734,22 @@
"mount_default_fields": {
"type": "array",
"description": "Default mount configuration for any mount entry with less than 6 options provided. When specified, 6 items are required and represent ``/etc/fstab`` entries. Default: ``defaults,nofail,x-systemd.requires=cloud-init.service,_netdev``",
- "default": [null, null, "auto", "defaults,nofail,x-systemd.requires=cloud-init.service", "0", "2"],
+ "default": [
+ null,
+ null,
+ "auto",
+ "defaults,nofail,x-systemd.requires=cloud-init.service",
+ "0",
+ "2"
+ ],
"items": {
"oneOf": [
- {"type": "string"},
- {"type": "null"}
+ {
+ "type": "string"
+ },
+ {
+ "type": "null"
+ }
]
},
"minItems": 6,
@@ -1289,15 +1766,29 @@
"size": {
"description": "The size in bytes of the swap file, 'auto' or a human-readable size abbreviation of the format <float_size><units> where units are one of B, K, M, G or T.",
"oneOf": [
- {"enum": ["auto"]},
- {"type": "integer"},
- {"type": "string", "pattern": "^([0-9]+)?\\.?[0-9]+[BKMGT]$"}
+ {
+ "enum": [
+ "auto"
+ ]
+ },
+ {
+ "type": "integer"
+ },
+ {
+ "type": "string",
+ "pattern": "^([0-9]+)?\\.?[0-9]+[BKMGT]$"
+ }
]
},
"maxsize": {
"oneOf": [
- {"type": "integer"},
- {"type": "string", "pattern": "^([0-9]+)?\\.?[0-9]+[BKMGT]$"}
+ {
+ "type": "integer"
+ },
+ {
+ "type": "string",
+ "pattern": "^([0-9]+)?\\.?[0-9]+[BKMGT]$"
+ }
],
"description": "The maxsize in bytes of the swap file"
}
@@ -1309,7 +1800,10 @@
"type": "object",
"properties": {
"ntp": {
- "type": ["null", "object"],
+ "type": [
+ "null",
+ "object"
+ ],
"additionalProperties": false,
"properties": {
"pools": {
@@ -1333,7 +1827,7 @@
"ntp_client": {
"type": "string",
"default": "auto",
- "description": "Name of an NTP client to use to configure system NTP.\nWhen unprovided or 'auto' the default client preferred\nby the distribution will be used. The following\nbuilt-in client names can be used to override existing\nconfiguration defaults: chrony, ntp, ntpdate,\nsystemd-timesyncd."
+ "description": "Name of an NTP client to use to configure system NTP.\nWhen unprovided or 'auto' the default client preferred\nby the distribution will be used. The following\nbuilt-in client names can be used to override existing\nconfiguration defaults: chrony, ntp, openntpd,\nntpdate, systemd-timesyncd."
},
"enabled": {
"type": "boolean",
@@ -1377,60 +1871,71 @@
}
},
"cc_package_update_upgrade_install": {
- "type": "object",
- "properties": {
- "packages": {
- "type": "array",
- "description": "A list of packages to install. Each entry in the list can be either a package name or a list with two entries, the first being the package name and the second being the specific package version to install.",
- "items": {
- "oneOf": [
- {"type": "array", "items": {"type": "string"}, "minItems": 2, "maxItems": 2},
- {"type": "string"}
- ]
+ "type": "object",
+ "properties": {
+ "packages": {
+ "type": "array",
+ "description": "A list of packages to install. Each entry in the list can be either a package name or a list with two entries, the first being the package name and the second being the specific package version to install.",
+ "items": {
+ "oneOf": [
+ {
+ "type": "array",
+ "items": {
+ "type": "string"
+ },
+ "minItems": 2,
+ "maxItems": 2
},
- "minItems": 1
- },
- "package_update": {
- "type": "boolean",
- "default": false,
- "description": "Set ``true`` to update packages. Happens before upgrade or install. Default: ``false``"
- },
- "package_upgrade": {
- "type": "boolean",
- "default": false,
- "description": "Set ``true`` to upgrade packages. Happens before install. Default: ``false``"
- },
- "package_reboot_if_required": {
- "type": "boolean",
- "default": false,
- "description": "Set ``true`` to reboot the system if required by presence of `/var/run/reboot-required`. Default: ``false``"
- },
- "apt_update": {
- "type": "boolean",
- "default": false,
- "description": "Dropped after April 2027. Use ``package_update``. Default: ``false``",
- "deprecated": true
- },
- "apt_upgrade": {
- "type": "boolean",
- "default": false,
- "description": "Dropped after April 2027. Use ``package_upgrade``. Default: ``false``",
- "deprecated": true
- },
- "apt_reboot_if_required": {
- "type": "boolean",
- "default": false,
- "description": "Dropped after April 2027. Use ``package_reboot_if_required``. Default: ``false``",
- "deprecated": true
- }
+ {
+ "type": "string"
+ }
+ ]
+ },
+ "minItems": 1
+ },
+ "package_update": {
+ "type": "boolean",
+ "default": false,
+ "description": "Set ``true`` to update packages. Happens before upgrade or install. Default: ``false``"
+ },
+ "package_upgrade": {
+ "type": "boolean",
+ "default": false,
+ "description": "Set ``true`` to upgrade packages. Happens before install. Default: ``false``"
+ },
+ "package_reboot_if_required": {
+ "type": "boolean",
+ "default": false,
+ "description": "Set ``true`` to reboot the system if required by presence of `/var/run/reboot-required`. Default: ``false``"
+ },
+ "apt_update": {
+ "type": "boolean",
+ "default": false,
+ "description": "Dropped after April 2027. Use ``package_update``. Default: ``false``",
+ "deprecated": true
+ },
+ "apt_upgrade": {
+ "type": "boolean",
+ "default": false,
+ "description": "Dropped after April 2027. Use ``package_upgrade``. Default: ``false``",
+ "deprecated": true
+ },
+ "apt_reboot_if_required": {
+ "type": "boolean",
+ "default": false,
+ "description": "Dropped after April 2027. Use ``package_reboot_if_required``. Default: ``false``",
+ "deprecated": true
}
+ }
},
"cc_phone_home": {
"type": "object",
"properties": {
"phone_home": {
"type": "object",
- "required": ["url"],
+ "required": [
+ "url"
+ ],
"additionalProperties": false,
"properties": {
"url": {
@@ -1441,7 +1946,11 @@
"post": {
"description": "A list of keys to post or ``all``. Default: ``all``",
"oneOf": [
- {"enum": ["all"]},
+ {
+ "enum": [
+ "all"
+ ]
+ },
{
"type": "array",
"items": {
@@ -1473,27 +1982,40 @@
"properties": {
"power_state": {
"type": "object",
- "required": ["mode"],
+ "required": [
+ "mode"
+ ],
"additionalProperties": false,
"properties": {
"delay": {
"description": "Time in minutes to delay after cloud-init has finished. Can be ``now`` or an integer specifying the number of minutes to delay. Default: ``now``",
"default": "now",
"oneOf": [
- {"type": "integer", "minimum": 0},
{
- "type": "string",
- "pattern": "^\\+?[0-9]+$",
- "deprecated": true,
- "description": "Use of string for this value will be dropped after April 2027. Use ``now`` or integer type."
+ "type": "integer",
+ "minimum": 0
+ },
+ {
+ "type": "string",
+ "pattern": "^\\+?[0-9]+$",
+ "deprecated": true,
+ "description": "Use of string for this value will be dropped after April 2027. Use ``now`` or integer type."
},
- {"enum": ["now"]}
+ {
+ "enum": [
+ "now"
+ ]
+ }
]
},
"mode": {
"description": "Must be one of ``poweroff``, ``halt``, or ``reboot``.",
"type": "string",
- "enum": ["poweroff", "reboot", "halt"]
+ "enum": [
+ "poweroff",
+ "reboot",
+ "halt"
+ ]
},
"message": {
"description": "Optional message to display to the user when the system is powering off or rebooting.",
@@ -1508,9 +2030,15 @@
"description": "Apply state change only if condition is met. May be boolean true (always met), false (never met), or a command string or list to be executed. For command formatting, see the documentation for ``cc_runcmd``. If exit code is 0, condition is met, otherwise not. Default: ``true``",
"default": true,
"oneOf": [
- {"type": "string"},
- {"type": "boolean"},
- {"type": "array"}
+ {
+ "type": "string"
+ },
+ {
+ "type": "boolean"
+ },
+ {
+ "type": "array"
+ }
]
}
}
@@ -1536,7 +2064,10 @@
"install_type": {
"type": "string",
"description": "Valid values are ``packages`` and ``aio``. Agent packages from the puppetlabs repositories can be installed by setting ``aio``. Based on this setting, the default config/SSL/CSR paths will be adjusted accordingly. Default: ``packages``",
- "enum": ["packages", "aio"],
+ "enum": [
+ "packages",
+ "aio"
+ ],
"default": "packages"
},
"collection": {
@@ -1628,7 +2159,11 @@
"type": "object",
"properties": {
"resize_rootfs": {
- "enum": [true, false, "noblock"],
+ "enum": [
+ true,
+ false,
+ "noblock"
+ ],
"description": "Whether to resize the root partition. ``noblock`` will resize in the background. Default: ``true``"
}
}
@@ -1753,10 +2288,14 @@
"description": "Each entry in ``configs`` is either a string or an object. Each config entry contains a configuration string and a file to write it to. For config entries that are an object, ``filename`` sets the target filename and ``content`` specifies the config string to write. For config entries that are only a string, the string is used as the config string to write. If the filename to write the config to is not specified, the value of the ``config_filename`` key is used. A file with the selected filename will be written inside the directory specified by ``config_dir``.",
"items": {
"oneOf": [
- {"type": "string"},
+ {
+ "type": "string"
+ },
{
"type": "object",
- "required": ["content"],
+ "required": [
+ "content"
+ ],
"additionalProperties": false,
"properties": {
"filename": {
@@ -1777,8 +2316,17 @@
"service_reload_command": {
"description": "The command to use to reload the rsyslog service after the config has been updated. If this is set to ``auto``, then an appropriate command for the distro will be used. This is the default behavior. To manually set the command, use a list of command args (e.g. ``[systemctl, restart, rsyslog]``).",
"oneOf": [
- {"enum": ["auto"]},
- {"type": "array", "items": {"type": "string"}}
+ {
+ "enum": [
+ "auto"
+ ]
+ },
+ {
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ }
]
}
}
@@ -1792,9 +2340,18 @@
"type": "array",
"items": {
"oneOf": [
- {"type": "array", "items": {"type": "string"}},
- {"type": "string"},
- {"type": "null"}
+ {
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "null"
+ }
]
},
"minItems": 1
@@ -1866,8 +2423,16 @@
]
},
"prefix": {
- "type": ["array", "string"],
- "items": {"type": ["string", "integer"]},
+ "type": [
+ "array",
+ "string"
+ ],
+ "items": {
+ "type": [
+ "string",
+ "integer"
+ ]
+ },
"description": "The command to run before any vendor scripts. Its primary use case is for profiling a script, not to prevent its run"
}
}
@@ -1893,13 +2458,21 @@
"encoding": {
"type": "string",
"default": "raw",
- "enum": ["raw", "base64", "b64", "gzip", "gz"],
+ "enum": [
+ "raw",
+ "base64",
+ "b64",
+ "gzip",
+ "gz"
+ ],
"description": "Used to decode ``data`` provided. Allowed values are ``raw``, ``base64``, ``b64``, ``gzip``, or ``gz``. Default: ``raw``"
},
"command": {
"type": "array",
- "items": {"type": "string"},
- "description": "Execute this command to seed random. The command will have RANDOM_SEED_FILE in its environment set to the value of ``file`` above."
+ "items": {
+ "type": "string"
+ },
+ "description": "Execute this command to seed random. The command will have RANDOM_SEED_FILE in its environment set to the value of ``file`` above."
},
"command_required": {
"type": "boolean",
@@ -1937,7 +2510,9 @@
"properties": {
"ssh_pwauth": {
"oneOf": [
- {"type": "boolean"},
+ {
+ "type": "boolean"
+ },
{
"type": "string",
"description": "Use of non-boolean values for this field is DEPRECATED and will result in an error in a future version of cloud-init.",
@@ -1963,27 +2538,38 @@
"type": "object",
"anyOf": [
{
- "required": ["name", "type"],
+ "required": [
+ "name",
+ "type"
+ ],
"additionalProperties": false,
"properties": {
"name": {
"type": "string"
},
"type": {
- "enum": ["RANDOM"],
+ "enum": [
+ "RANDOM"
+ ],
"type": "string"
}
}
},
{
- "required": ["name", "password"],
+ "required": [
+ "name",
+ "password"
+ ],
"additionalProperties": false,
"properties": {
"name": {
"type": "string"
},
"type": {
- "enum": ["hash", "text"],
+ "enum": [
+ "hash",
+ "text"
+ ],
"default": "hash",
"type": "string"
},
@@ -1997,13 +2583,16 @@
},
"list": {
"oneOf": [
- {"type": "string"},
+ {
+ "type": "string"
+ },
{
"type": "array",
"items": {
"type": "string",
"pattern": "^.+:.+$"
- }}
+ }
+ }
],
"minItems": 1,
"description": "List of ``username:password`` pairs. Each user will have the corresponding password set. A password can be randomly generated by specifying ``RANDOM`` or ``R`` as a user's password. A hashed password, created by a tool like ``mkpasswd``, can be specified. A regex (``r'\\$(1|2a|2y|5|6)(\\$.+){2}'``) is used to determine if a password value should be treated as a hash.\n\nUse of a multiline string for this field is DEPRECATED and will result in an error in a future version of cloud-init.",
@@ -2027,21 +2616,38 @@
"properties": {
"assertions": {
"description": "Properly-signed snap assertions which will run before and snap ``commands``.",
- "type": ["object", "array"],
- "items": {"type": "string"},
+ "type": [
+ "object",
+ "array"
+ ],
+ "items": {
+ "type": "string"
+ },
"additionalItems": false,
"minItems": 1,
"minProperties": 1,
"uniqueItems": true,
- "additionalProperties": {"type": "string"}
+ "additionalProperties": {
+ "type": "string"
+ }
},
"commands": {
- "type": ["object", "array"],
+ "type": [
+ "object",
+ "array"
+ ],
"description": "Snap commands to run on the target system",
"items": {
"oneOf": [
- {"type": "string"},
- {"type": "array", "items": {"type": "string"}}
+ {
+ "type": "string"
+ },
+ {
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ }
]
},
"additionalItems": false,
@@ -2049,8 +2655,15 @@
"minProperties": 1,
"additionalProperties": {
"oneOf": [
- {"type": "string"},
- {"type": "array", "items": {"type": "string"}}
+ {
+ "type": "string"
+ },
+ {
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ }
]
}
}
@@ -2130,7 +2743,7 @@
"type": "string"
}
},
- "ssh_deletekeys" : {
+ "ssh_deletekeys": {
"type": "boolean",
"default": true,
"description": "Remove host SSH keys. This prevents re-use of a private host key from an image with default host SSH keys. Default: ``true``"
@@ -2138,11 +2751,21 @@
"ssh_genkeytypes": {
"type": "array",
"description": "The SSH key types to generate. Default: ``[rsa, dsa, ecdsa, ed25519]``",
- "default": ["dsa", "ecdsa", "ed25519", "rsa"],
+ "default": [
+ "dsa",
+ "ecdsa",
+ "ed25519",
+ "rsa"
+ ],
"minItems": 1,
"items": {
"type": "string",
- "enum": ["dsa", "ecdsa", "ed25519", "rsa"]
+ "enum": [
+ "dsa",
+ "ecdsa",
+ "ed25519",
+ "rsa"
+ ]
}
},
"disable_root": {
@@ -2199,51 +2822,90 @@
"properties": {
"ubuntu_advantage": {
"type": "object",
- "required": ["token"],
"additionalProperties": false,
"properties": {
"enable": {
"type": "array",
- "items": {"type": "string"},
- "description": "Optional list of ubuntu-advantage services to enable. Any of: cc-eal, cis, esm-infra, fips, fips-updates, livepatch. By default, a given contract token will automatically enable a number of services, use this list to supplement which services should additionally be enabled. Any service unavailable on a given Ubuntu release or unentitled in a given contract will remain disabled."
+ "items": {
+ "type": "string"
+ },
+ "description": "Optional list of ubuntu-advantage services to enable. Any of: cc-eal, cis, esm-infra, fips, fips-updates, livepatch. By default, a given contract token will automatically enable a number of services, use this list to supplement which services should additionally be enabled. Any service unavailable on a given Ubuntu release or unentitled in a given contract will remain disabled. In Ubuntu Pro instances, if this list is given, then only those services will be enabled, ignoring contract defaults. Passing beta services here will cause an error."
+ },
+ "enable_beta": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ },
+ "description": "Optional list of ubuntu-advantage beta services to enable. By default, a given contract token will automatically enable a number of services, use this list to supplement which services should additionally be enabled. Any service unavailable on a given Ubuntu release or unentitled in a given contract will remain disabled. In Ubuntu Pro instances, if this list is given, then only those services will be enabled, ignoring contract defaults."
},
"token": {
"type": "string",
- "description": "Required contract token obtained from https://ubuntu.com/advantage to attach."
+ "description": "Contract token obtained from https://ubuntu.com/advantage to attach. Required for non-Pro instances."
+ },
+ "features": {
+ "type": "object",
+ "description": "Ubuntu Advantage features.",
+ "additionalProperties": false,
+ "properties": {
+ "disable_auto_attach": {
+ "type": "boolean",
+ "description": "Optional boolean for controlling if ua-auto-attach.service (in Ubuntu Pro instances) will be attempted each boot. Default: ``false``",
+ "default": false
+ }
+ }
},
"config": {
"type": "object",
- "description": "Configuration settings or override Ubuntu Advantage config",
+ "description": "Configuration settings or override Ubuntu Advantage config.",
+ "additionalProperties": true,
"properties": {
"http_proxy": {
- "type": "string",
+ "type": [
+ "string",
+ "null"
+ ],
"format": "uri",
- "description": "Ubuntu Advantage HTTP Proxy URL"
+ "description": "Ubuntu Advantage HTTP Proxy URL or null to unset."
},
"https_proxy": {
- "type": "string",
+ "type": [
+ "string",
+ "null"
+ ],
"format": "uri",
- "description": "Ubuntu Advantage HTTPS Proxy URL"
+ "description": "Ubuntu Advantage HTTPS Proxy URL or null to unset."
},
"global_apt_http_proxy": {
- "type": "string",
+ "type": [
+ "string",
+ "null"
+ ],
"format": "uri",
- "description": "HTTP Proxy URL used for all APT repositories on a system. Stored at ``/etc/apt/apt.conf.d/90ubuntu-advantage-aptproxy``"
+ "description": "HTTP Proxy URL used for all APT repositories on a system or null to unset. Stored at ``/etc/apt/apt.conf.d/90ubuntu-advantage-aptproxy``"
},
"global_apt_https_proxy": {
- "type": "string",
+ "type": [
+ "string",
+ "null"
+ ],
"format": "uri",
- "description": "HTTPS Proxy URL used for all APT repositories on a system. Stored at ``/etc/apt/apt.conf.d/90ubuntu-advantage-aptproxy``"
+ "description": "HTTPS Proxy URL used for all APT repositories on a system or null to unset. Stored at ``/etc/apt/apt.conf.d/90ubuntu-advantage-aptproxy``"
},
"ua_apt_http_proxy": {
- "type": "string",
+ "type": [
+ "string",
+ "null"
+ ],
"format": "uri",
- "description": "HTTP Proxy URL used only for Ubuntu Advantage APT repositories. Stored at ``/etc/apt/apt.conf.d/90ubuntu-advantage-aptproxy``"
+ "description": "HTTP Proxy URL used only for Ubuntu Advantage APT repositories or null to unset. Stored at ``/etc/apt/apt.conf.d/90ubuntu-advantage-aptproxy``"
},
"ua_apt_https_proxy": {
- "type": "string",
+ "type": [
+ "string",
+ "null"
+ ],
"format": "uri",
- "description": "HTTPS Proxy URL used only for Ubuntu Advantage APT repositories. Stored at ``/etc/apt/apt.conf.d/90ubuntu-advantage-aptproxy``"
+ "description": "HTTPS Proxy URL used only for Ubuntu Advantage APT repositories or null to unset. Stored at ``/etc/apt/apt.conf.d/90ubuntu-advantage-aptproxy``"
}
}
}
@@ -2286,9 +2948,17 @@
"default": false,
"description": "Whether to manage ``/etc/hosts`` on the system. If ``true``, render the hosts file using ``/etc/cloud/templates/hosts.tmpl`` replacing ``$hostname`` and ``$fdqn``. If ``localhost``, append a ``127.0.1.1`` entry that resolves from FQDN and hostname every boot. Default: ``false``.",
"oneOf": [
- {"enum": [true, false, "localhost"]},
{
- "enum": ["template"],
+ "enum": [
+ true,
+ false,
+ "localhost"
+ ]
+ },
+ {
+ "enum": [
+ "template"
+ ],
"description": "Value ``template`` will be dropped after April 2027. Use ``true`` instead.",
"deprecated": true
}
@@ -2323,29 +2993,57 @@
"type": "object",
"properties": {
"groups": {
- "type": ["string", "object", "array"],
- "hidden": ["patternProperties"],
+ "type": [
+ "string",
+ "object",
+ "array"
+ ],
+ "hidden": [
+ "patternProperties"
+ ],
"$ref": "#/$defs/users_groups.groups_by_groupname",
"items": {
- "type": ["string", "object"],
+ "type": [
+ "string",
+ "object"
+ ],
"$ref": "#/$defs/users_groups.groups_by_groupname"
},
"minItems": 1
},
"user": {
"oneOf": [
- {"type": "string"},
- {"type": "object", "$ref": "#/$defs/users_groups.user"}
+ {
+ "type": "string"
+ },
+ {
+ "type": "object",
+ "$ref": "#/$defs/users_groups.user"
+ }
],
"description": "The ``user`` dictionary values override the ``default_user`` configuration from ``/etc/cloud/cloud.cfg``. The `user` dictionary keys supported for the default_user are the same as the ``users`` schema."
},
"users": {
- "type": ["string", "array", "object"],
+ "type": [
+ "string",
+ "array",
+ "object"
+ ],
"items": {
"oneOf": [
- {"type": "string"},
- {"type": "array", "items": {"type": "string"}},
- {"type": "object", "$ref": "#/$defs/users_groups.user"}
+ {
+ "type": "string"
+ },
+ {
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ {
+ "type": "object",
+ "$ref": "#/$defs/users_groups.user"
+ }
]
},
"minItems": 1
@@ -2356,7 +3054,10 @@
"type": "object",
"properties": {
"wireguard": {
- "type": ["null", "object"],
+ "type": [
+ "null",
+ "object"
+ ],
"properties": {
"interfaces": {
"type": "array",
@@ -2389,7 +3090,9 @@
"description": "List of shell commands to be executed as probes."
}
},
- "required": ["interfaces"],
+ "required": [
+ "interfaces"
+ ],
"minProperties": 1,
"additionalProperties": false
}
@@ -2402,7 +3105,9 @@
"type": "array",
"items": {
"type": "object",
- "required": ["path"],
+ "required": [
+ "path"
+ ],
"additionalProperties": false,
"properties": {
"path": {
@@ -2427,7 +3132,17 @@
"encoding": {
"type": "string",
"default": "text/plain",
- "enum": ["gz", "gzip", "gz+base64", "gzip+base64", "gz+b64", "gzip+b64", "b64", "base64", "text/plain"],
+ "enum": [
+ "gz",
+ "gzip",
+ "gz+base64",
+ "gzip+base64",
+ "gz+b64",
+ "gzip+b64",
+ "b64",
+ "base64",
+ "text/plain"
+ ],
"description": "Optional encoding type of the content. Default is ``text/plain`` and no content decoding is performed. Supported encoding types are: gz, gzip, gz+base64, gzip+base64, gz+b64, gzip+b64, b64, base64"
},
"append": {
@@ -2450,9 +3165,9 @@
"type": "object",
"properties": {
"yum_repo_dir": {
- "type": "string",
- "default": "/etc/yum.repos.d",
- "description": "The repo parts directory where individual yum repo config files will be written. Default: ``/etc/yum.repos.d``"
+ "type": "string",
+ "default": "/etc/yum.repos.d",
+ "description": "The repo parts directory where individual yum repo config files will be written. Default: ``/etc/yum.repos.d``"
},
"yum_repos": {
"type": "object",
@@ -2466,9 +3181,9 @@
"additionalProperties": false,
"properties": {
"baseurl": {
- "type": "string",
- "format": "uri",
- "description": "URL to the directory where the yum repository's 'repodata' directory lives"
+ "type": "string",
+ "format": "uri",
+ "description": "URL to the directory where the yum repository's 'repodata' directory lives"
},
"name": {
"type": "string",
@@ -2482,16 +3197,24 @@
},
"patternProperties": {
"^[0-9a-zA-Z_]+$": {
- "label": "<yum_config_option>",
- "oneOf": [
- {"type": "integer"},
- {"type": "boolean"},
- {"type": "string"}
- ],
- "description": "Any supported yum repository configuration options will be written to the yum repo config file. See: man yum.conf"
+ "label": "<yum_config_option>",
+ "oneOf": [
+ {
+ "type": "integer"
+ },
+ {
+ "type": "boolean"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "description": "Any supported yum repository configuration options will be written to the yum repo config file. See: man yum.conf"
}
},
- "required": ["baseurl"]
+ "required": [
+ "baseurl"
+ ]
}
}
}
@@ -2549,36 +3272,55 @@
"oneOf": [
{
"additionalProperties": false,
- "required": ["type"],
+ "required": [
+ "type"
+ ],
"properties": {
"type": {
"type": "string",
- "enum": ["log"]
+ "enum": [
+ "log"
+ ]
},
"level": {
"type": "string",
- "enum": ["DEBUG", "INFO", "WARN", "ERROR", "FATAL"],
+ "enum": [
+ "DEBUG",
+ "INFO",
+ "WARN",
+ "ERROR",
+ "FATAL"
+ ],
"default": "DEBUG"
}
}
},
{
"additionalProperties": false,
- "required": ["type"],
+ "required": [
+ "type"
+ ],
"properties": {
"type": {
"type": "string",
- "enum": ["print"]
+ "enum": [
+ "print"
+ ]
}
}
},
{
"additionalProperties": false,
- "required": ["type", "endpoint"],
+ "required": [
+ "type",
+ "endpoint"
+ ],
"properties": {
"type": {
"type": "string",
- "enum": ["webhook"]
+ "enum": [
+ "webhook"
+ ]
},
"endpoint": {
"type": "string",
@@ -2615,11 +3357,15 @@
},
{
"additionalProperties": false,
- "required": ["type"],
+ "required": [
+ "type"
+ ],
"properties": {
"type": {
"type": "string",
- "enum": ["hyperv"]
+ "enum": [
+ "hyperv"
+ ]
},
"kvp_file_path": {
"type": "string",
@@ -2642,60 +3388,173 @@
}
},
"allOf": [
- { "$ref": "#/$defs/cc_ansible" },
- { "$ref": "#/$defs/cc_apk_configure" },
- { "$ref": "#/$defs/cc_apt_configure" },
- { "$ref": "#/$defs/cc_apt_pipelining" },
- { "$ref": "#/$defs/cc_ubuntu_autoinstall"},
- { "$ref": "#/$defs/cc_bootcmd" },
- { "$ref": "#/$defs/cc_byobu" },
- { "$ref": "#/$defs/cc_ca_certs" },
- { "$ref": "#/$defs/cc_chef" },
- { "$ref": "#/$defs/cc_disable_ec2_metadata" },
- { "$ref": "#/$defs/cc_disk_setup" },
- { "$ref": "#/$defs/cc_fan" },
- { "$ref": "#/$defs/cc_final_message"},
- { "$ref": "#/$defs/cc_growpart"},
- { "$ref": "#/$defs/cc_grub_dpkg"},
- { "$ref": "#/$defs/cc_install_hotplug"},
- { "$ref": "#/$defs/cc_keyboard" },
- { "$ref": "#/$defs/cc_keys_to_console" },
- { "$ref": "#/$defs/cc_landscape" },
- { "$ref": "#/$defs/cc_locale" },
- { "$ref": "#/$defs/cc_lxd" },
- { "$ref": "#/$defs/cc_mcollective" },
- { "$ref": "#/$defs/cc_migrator" },
- { "$ref": "#/$defs/cc_mounts" },
- { "$ref": "#/$defs/cc_ntp" },
- { "$ref": "#/$defs/cc_package_update_upgrade_install" },
- { "$ref": "#/$defs/cc_phone_home" },
- { "$ref": "#/$defs/cc_power_state_change"},
- { "$ref": "#/$defs/cc_puppet"},
- { "$ref": "#/$defs/cc_resizefs"},
- { "$ref": "#/$defs/cc_resolv_conf"},
- { "$ref": "#/$defs/cc_rh_subscription"},
- { "$ref": "#/$defs/cc_rsyslog"},
- { "$ref": "#/$defs/cc_runcmd"},
- { "$ref": "#/$defs/cc_salt_minion"},
- { "$ref": "#/$defs/cc_scripts_vendor"},
- { "$ref": "#/$defs/cc_seed_random"},
- { "$ref": "#/$defs/cc_set_hostname"},
- { "$ref": "#/$defs/cc_set_passwords"},
- { "$ref": "#/$defs/cc_snap"},
- { "$ref": "#/$defs/cc_spacewalk"},
- { "$ref": "#/$defs/cc_ssh_authkey_fingerprints"},
- { "$ref": "#/$defs/cc_ssh_import_id"},
- { "$ref": "#/$defs/cc_ssh"},
- { "$ref": "#/$defs/cc_timezone"},
- { "$ref": "#/$defs/cc_ubuntu_advantage"},
- { "$ref": "#/$defs/cc_ubuntu_drivers"},
- { "$ref": "#/$defs/cc_update_etc_hosts"},
- { "$ref": "#/$defs/cc_update_hostname"},
- { "$ref": "#/$defs/cc_users_groups"},
- { "$ref": "#/$defs/cc_wireguard"},
- { "$ref": "#/$defs/cc_write_files"},
- { "$ref": "#/$defs/cc_yum_add_repo"},
- { "$ref": "#/$defs/cc_zypper_add_repo"},
- { "$ref": "#/$defs/reporting_config"}
+ {
+ "$ref": "#/$defs/base_config"
+ },
+ {
+ "$ref": "#/$defs/cc_ansible"
+ },
+ {
+ "$ref": "#/$defs/cc_apk_configure"
+ },
+ {
+ "$ref": "#/$defs/cc_apt_configure"
+ },
+ {
+ "$ref": "#/$defs/cc_apt_pipelining"
+ },
+ {
+ "$ref": "#/$defs/cc_ubuntu_autoinstall"
+ },
+ {
+ "$ref": "#/$defs/cc_bootcmd"
+ },
+ {
+ "$ref": "#/$defs/cc_byobu"
+ },
+ {
+ "$ref": "#/$defs/cc_ca_certs"
+ },
+ {
+ "$ref": "#/$defs/cc_chef"
+ },
+ {
+ "$ref": "#/$defs/cc_disable_ec2_metadata"
+ },
+ {
+ "$ref": "#/$defs/cc_disk_setup"
+ },
+ {
+ "$ref": "#/$defs/cc_fan"
+ },
+ {
+ "$ref": "#/$defs/cc_final_message"
+ },
+ {
+ "$ref": "#/$defs/cc_growpart"
+ },
+ {
+ "$ref": "#/$defs/cc_grub_dpkg"
+ },
+ {
+ "$ref": "#/$defs/cc_install_hotplug"
+ },
+ {
+ "$ref": "#/$defs/cc_keyboard"
+ },
+ {
+ "$ref": "#/$defs/cc_keys_to_console"
+ },
+ {
+ "$ref": "#/$defs/cc_landscape"
+ },
+ {
+ "$ref": "#/$defs/cc_locale"
+ },
+ {
+ "$ref": "#/$defs/cc_lxd"
+ },
+ {
+ "$ref": "#/$defs/cc_mcollective"
+ },
+ {
+ "$ref": "#/$defs/cc_migrator"
+ },
+ {
+ "$ref": "#/$defs/cc_mounts"
+ },
+ {
+ "$ref": "#/$defs/cc_ntp"
+ },
+ {
+ "$ref": "#/$defs/cc_package_update_upgrade_install"
+ },
+ {
+ "$ref": "#/$defs/cc_phone_home"
+ },
+ {
+ "$ref": "#/$defs/cc_power_state_change"
+ },
+ {
+ "$ref": "#/$defs/cc_puppet"
+ },
+ {
+ "$ref": "#/$defs/cc_resizefs"
+ },
+ {
+ "$ref": "#/$defs/cc_resolv_conf"
+ },
+ {
+ "$ref": "#/$defs/cc_rh_subscription"
+ },
+ {
+ "$ref": "#/$defs/cc_rsyslog"
+ },
+ {
+ "$ref": "#/$defs/cc_runcmd"
+ },
+ {
+ "$ref": "#/$defs/cc_salt_minion"
+ },
+ {
+ "$ref": "#/$defs/cc_scripts_vendor"
+ },
+ {
+ "$ref": "#/$defs/cc_seed_random"
+ },
+ {
+ "$ref": "#/$defs/cc_set_hostname"
+ },
+ {
+ "$ref": "#/$defs/cc_set_passwords"
+ },
+ {
+ "$ref": "#/$defs/cc_snap"
+ },
+ {
+ "$ref": "#/$defs/cc_spacewalk"
+ },
+ {
+ "$ref": "#/$defs/cc_ssh_authkey_fingerprints"
+ },
+ {
+ "$ref": "#/$defs/cc_ssh_import_id"
+ },
+ {
+ "$ref": "#/$defs/cc_ssh"
+ },
+ {
+ "$ref": "#/$defs/cc_timezone"
+ },
+ {
+ "$ref": "#/$defs/cc_ubuntu_advantage"
+ },
+ {
+ "$ref": "#/$defs/cc_ubuntu_drivers"
+ },
+ {
+ "$ref": "#/$defs/cc_update_etc_hosts"
+ },
+ {
+ "$ref": "#/$defs/cc_update_hostname"
+ },
+ {
+ "$ref": "#/$defs/cc_users_groups"
+ },
+ {
+ "$ref": "#/$defs/cc_wireguard"
+ },
+ {
+ "$ref": "#/$defs/cc_write_files"
+ },
+ {
+ "$ref": "#/$defs/cc_yum_add_repo"
+ },
+ {
+ "$ref": "#/$defs/cc_zypper_add_repo"
+ },
+ {
+ "$ref": "#/$defs/reporting_config"
+ }
]
}
diff --git a/cloudinit/config/schemas/versions.schema.cloud-config.json b/cloudinit/config/schemas/versions.schema.cloud-config.json
index bca0a11e..fc0e0dec 100644
--- a/cloudinit/config/schemas/versions.schema.cloud-config.json
+++ b/cloudinit/config/schemas/versions.schema.cloud-config.json
@@ -7,11 +7,15 @@
{
"properties": {
"version": {
- "enum": [ "v1" ]
+ "enum": [
+ "v1"
+ ]
}
}
},
- {"$ref": "https://raw.githubusercontent.com/canonical/cloud-init/main/cloudinit/config/schemas/schema-cloud-config-v1.json"}
+ {
+ "$ref": "https://raw.githubusercontent.com/canonical/cloud-init/main/cloudinit/config/schemas/schema-cloud-config-v1.json"
+ }
]
}
]
diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py
index 4a468cf8..735a7832 100644
--- a/cloudinit/distros/__init__.py
+++ b/cloudinit/distros/__init__.py
@@ -20,15 +20,22 @@ from typing import Any, Mapping, MutableMapping, Optional, Type
from cloudinit import importer
from cloudinit import log as logging
-from cloudinit import net, persistence, ssh_util, subp, type_utils, util
+from cloudinit import (
+ net,
+ persistence,
+ ssh_util,
+ subp,
+ temp_utils,
+ type_utils,
+ util,
+)
+from cloudinit.distros.networking import LinuxNetworking, Networking
from cloudinit.distros.parsers import hosts
from cloudinit.features import ALLOW_EC2_MIRRORS_ON_NON_AWS_INSTANCE_TYPES
from cloudinit.net import activators, eni, network_state, renderers
from cloudinit.net.network_state import parse_net_config_data
from cloudinit.net.renderer import Renderer
-from .networking import LinuxNetworking, Networking
-
# Used when a cloud-config module can be run on all cloud-init distibutions.
# The value 'all' is surfaced in module documentation for distro support.
ALL_DISTROS = "all"
@@ -37,8 +44,10 @@ OSFAMILIES = {
"alpine": ["alpine"],
"arch": ["arch"],
"debian": ["debian", "ubuntu"],
- "freebsd": ["freebsd"],
- "gentoo": ["gentoo"],
+ "freebsd": ["freebsd", "dragonfly"],
+ "gentoo": ["gentoo", "cos"],
+ "netbsd": ["netbsd"],
+ "openbsd": ["openbsd"],
"redhat": [
"almalinux",
"amazon",
@@ -46,6 +55,7 @@ OSFAMILIES = {
"cloudlinux",
"eurolinux",
"fedora",
+ "mariner",
"miraclelinux",
"openEuler",
"openmandriva",
@@ -71,7 +81,7 @@ LDH_ASCII_CHARS = string.ascii_letters + string.digits + "-"
class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta):
-
+ pip_package_name = "python3-pip"
usr_lib_exec = "/usr/lib"
hosts_fn = "/etc/hosts"
ci_sudoers_fn = "/etc/sudoers.d/90-cloud-init-users"
@@ -90,6 +100,8 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta):
prefer_fqdn = False
resolve_conf_fn = "/etc/resolv.conf"
+ osfamily: str
+
def __init__(self, name, cfg, paths):
self._paths = paths
self._cfg = cfg
@@ -909,6 +921,7 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta):
"stop": ["stop", service],
"start": ["start", service],
"enable": ["enable", service],
+ "disable": ["disable", service],
"restart": ["restart", service],
"reload": ["reload-or-restart", service],
"try-reload": ["reload-or-try-restart", service],
@@ -919,6 +932,7 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta):
"stop": [service, "stop"],
"start": [service, "start"],
"enable": [service, "start"],
+ "disable": [service, "stop"],
"restart": [service, "restart"],
"reload": [service, "restart"],
"try-reload": [service, "restart"],
@@ -942,6 +956,33 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta):
else:
raise NotImplementedError()
+ def get_tmp_exec_path(self) -> str:
+ tmp_dir = temp_utils.get_tmp_ancestor(needs_exe=True)
+ if not util.has_mount_opt(tmp_dir, "noexec"):
+ return tmp_dir
+ return os.path.join(self.usr_lib_exec, "cloud-init", "clouddir")
+
+ def do_as(self, command: list, user: str, cwd: str = "", **kwargs):
+ """
+ Perform a command as the requested user. Behaves like subp()
+
+ Note: We pass `PATH` to the user env by using `env`. This could be
+ probably simplified after bionic EOL by using
+ `su --whitelist-environment=PATH ...`, more info on:
+ https://lore.kernel.org/all/20180815110445.4qefy5zx5gfgbqly@ws.net.home/T/
+ """
+ directory = f"cd {cwd} && " if cwd else ""
+ return subp.subp(
+ [
+ "su",
+ "-",
+ user,
+ "-c",
+ directory + "env PATH=$PATH " + " ".join(command),
+ ],
+ **kwargs,
+ )
+
def _apply_hostname_transformations_to_url(url: str, transformations: list):
"""
diff --git a/cloudinit/distros/alpine.py b/cloudinit/distros/alpine.py
index 3d7d4891..4a23fe07 100644
--- a/cloudinit/distros/alpine.py
+++ b/cloudinit/distros/alpine.py
@@ -21,7 +21,7 @@ NETWORK_FILE_HEADER = """\
class Distro(distros.Distro):
- init_cmd = ["rc-service"] # init scripts
+ pip_package_name = "py3-pip"
locale_conf_fn = "/etc/profile.d/locale.sh"
network_conf_fn = "/etc/network/interfaces"
renderer_configs = {
@@ -173,5 +173,33 @@ class Distro(distros.Distro):
return command
+ def uses_systemd(self):
+ """
+ Alpine uses OpenRC, not systemd
+ """
+ return False
-# vi: ts=4 expandtab
+ def manage_service(self, action: str, service: str):
+ """
+ Perform the requested action on a service. This handles OpenRC
+ specific implementation details.
+
+ OpenRC has two distinct commands relating to services,
+ 'rc-service' and 'rc-update' and the order of their argument
+ lists differ.
+ May raise ProcessExecutionError
+ """
+ init_cmd = ["rc-service", "--nocolor"]
+ update_cmd = ["rc-update", "--nocolor"]
+ cmds = {
+ "stop": list(init_cmd) + [service, "stop"],
+ "start": list(init_cmd) + [service, "start"],
+ "disable": list(update_cmd) + ["del", service],
+ "enable": list(update_cmd) + ["add", service],
+ "restart": list(init_cmd) + [service, "restart"],
+ "reload": list(init_cmd) + [service, "restart"],
+ "try-reload": list(init_cmd) + [service, "restart"],
+ "status": list(init_cmd) + [service, "status"],
+ }
+ cmd = list(cmds[action])
+ return subp.subp(cmd, capture=True)
diff --git a/cloudinit/distros/bsd.py b/cloudinit/distros/bsd.py
index 77e9bf11..c4e1e15c 100644
--- a/cloudinit/distros/bsd.py
+++ b/cloudinit/distros/bsd.py
@@ -5,8 +5,7 @@ from cloudinit import distros, helpers
from cloudinit import log as logging
from cloudinit import net, subp, util
from cloudinit.distros import bsd_utils
-
-from .networking import BSDNetworking
+from cloudinit.distros.networking import BSDNetworking
LOG = logging.getLogger(__name__)
diff --git a/cloudinit/distros/cos.py b/cloudinit/distros/cos.py
new file mode 100644
index 00000000..fe439eb3
--- /dev/null
+++ b/cloudinit/distros/cos.py
@@ -0,0 +1,12 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+from cloudinit.distros import gentoo
+
+
+# Support for Container-Optimized OS
+# https://cloud.google.com/container-optimized-os/docs
+class Distro(gentoo.Distro):
+ pass
+
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/distros/freebsd.py b/cloudinit/distros/freebsd.py
index fa5c6616..b9fd37b8 100644
--- a/cloudinit/distros/freebsd.py
+++ b/cloudinit/distros/freebsd.py
@@ -11,10 +11,9 @@ from io import StringIO
import cloudinit.distros.bsd
from cloudinit import log as logging
from cloudinit import subp, util
+from cloudinit.distros.networking import FreeBSDNetworking
from cloudinit.settings import PER_INSTANCE
-from .networking import FreeBSDNetworking
-
LOG = logging.getLogger(__name__)
@@ -38,6 +37,27 @@ class Distro(cloudinit.distros.bsd.BSD):
prefer_fqdn = True # See rc.conf(5) in FreeBSD
home_dir = "/usr/home"
+ def manage_service(self, action: str, service: str):
+ """
+ Perform the requested action on a service. This handles FreeBSD's
+ 'service' case. The FreeBSD 'service' is closer in features to
+ 'systemctl' than SysV init's 'service', so we override it.
+ May raise ProcessExecutionError
+ """
+ init_cmd = self.init_cmd
+ cmds = {
+ "stop": [service, "stop"],
+ "start": [service, "start"],
+ "enable": [service, "enable"],
+ "disable": [service, "disable"],
+ "restart": [service, "restart"],
+ "reload": [service, "restart"],
+ "try-reload": [service, "restart"],
+ "status": [service, "status"],
+ }
+ cmd = list(init_cmd) + list(cmds[action])
+ return subp.subp(cmd, capture=True)
+
def _get_add_member_to_group_cmd(self, member_name, group_name):
return ["pw", "usermod", "-n", member_name, "-G", group_name]
@@ -157,7 +177,8 @@ class Distro(cloudinit.distros.bsd.BSD):
)
def _get_pkg_cmd_environ(self):
- """Return environment vars used in *BSD package_command operations"""
+ """Return environment vars used in FreeBSD package_command
+ operations"""
e = os.environ.copy()
e["ASSUME_ALWAYS_YES"] = "YES"
return e
diff --git a/cloudinit/distros/mariner.py b/cloudinit/distros/mariner.py
new file mode 100644
index 00000000..41b0dc75
--- /dev/null
+++ b/cloudinit/distros/mariner.py
@@ -0,0 +1,55 @@
+#!/usr/bin/env python3
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2021 VMware Inc.
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+from cloudinit import helpers
+from cloudinit import log as logging
+from cloudinit.distros import photon
+
+LOG = logging.getLogger(__name__)
+
+NETWORK_FILE_HEADER = """\
+# This file is generated from information provided by the datasource. Changes
+# to it will not persist across an instance reboot. To disable cloud-init's
+# network configuration capabilities, write a file
+# /etc/cloud/cloud.cfg.d/99-disable-network-config.cfg with the following:
+# network: {config: disabled}
+"""
+
+
+class Distro(photon.Distro):
+ systemd_hostname_conf_fn = "/etc/hostname"
+ network_conf_dir = "/etc/systemd/network/"
+ systemd_locale_conf_fn = "/etc/locale.conf"
+ resolve_conf_fn = "/etc/systemd/resolved.conf"
+
+ network_conf_fn = {"netplan": "/etc/netplan/50-cloud-init.yaml"}
+ renderer_configs = {
+ "networkd": {
+ "resolv_conf_fn": resolve_conf_fn,
+ "network_conf_dir": network_conf_dir,
+ },
+ "netplan": {
+ "netplan_path": network_conf_fn["netplan"],
+ "netplan_header": NETWORK_FILE_HEADER,
+ "postcmds": "True",
+ },
+ }
+
+ # Should be fqdn if we can use it
+ prefer_fqdn = True
+
+ def __init__(self, name, cfg, paths):
+ photon.Distro.__init__(self, name, cfg, paths)
+ # This will be used to restrict certain
+ # calls from repeatly happening (when they
+ # should only happen say once per instance...)
+ self._runner = helpers.Runners(paths)
+ self.osfamily = "mariner"
+ self.init_cmd = ["systemctl"]
+
+ def _get_localhost_ip(self):
+ return "127.0.0.1"
diff --git a/cloudinit/distros/networking.py b/cloudinit/distros/networking.py
index f14d678d..7edfe965 100644
--- a/cloudinit/distros/networking.py
+++ b/cloudinit/distros/networking.py
@@ -4,6 +4,7 @@ import os
from typing import List, Optional
from cloudinit import net, subp, util
+from cloudinit.distros.parsers import ifconfig
LOG = logging.getLogger(__name__)
@@ -185,17 +186,45 @@ class Networking(metaclass=abc.ABCMeta):
class BSDNetworking(Networking):
"""Implementation of networking functionality shared across BSDs."""
+ def __init__(self):
+ self.ifc = ifconfig.Ifconfig()
+ self.ifs = {}
+ self._update_ifs()
+
+ def _update_ifs(self):
+ ifconf = subp.subp(["ifconfig", "-a"])
+ # ``ifconfig -a`` always returns at least ``lo0``.
+ # So this ``if`` is really just to make testing/mocking easier
+ if ifconf[0]:
+ self.ifs = self.ifc.parse(ifconf[0])
+
def apply_network_config_names(self, netcfg: NetworkConfig) -> None:
LOG.debug("Cannot rename network interface.")
def is_physical(self, devname: DeviceName) -> bool:
- raise NotImplementedError()
+ return self.ifs[devname].is_physical
+
+ def is_bond(self, devname: DeviceName) -> bool:
+ return self.ifs[devname].is_bond
+
+ def is_bridge(self, devname: DeviceName) -> bool:
+ return self.ifs[devname].is_bridge
+
+ def is_vlan(self, devname: DeviceName) -> bool:
+ return self.ifs[devname].is_vlan
+
+ def is_up(self, devname: DeviceName) -> bool:
+ return self.ifs[devname].up
def settle(self, *, exists=None) -> None:
"""BSD has no equivalent to `udevadm settle`; noop."""
def try_set_link_up(self, devname: DeviceName) -> bool:
- raise NotImplementedError()
+ """Try setting the link to up explicitly and return if it is up.
+ Not guaranteed to bring the interface up. The caller is expected to
+ add wait times before retrying."""
+ subp.subp(["ifconfig", devname, "up"])
+ return self.is_up(devname)
class FreeBSDNetworking(BSDNetworking):
@@ -206,6 +235,30 @@ class FreeBSDNetworking(BSDNetworking):
# FreeBSD network script will rename the interface automatically.
pass
+ def is_renamed(self, devname: DeviceName) -> bool:
+ if not self.ifs[devname].is_physical:
+ # Only physical devices can be renamed.
+ # cloned devices can be given any arbitrary name, so it makes no
+ # sense on them anyway
+ return False
+
+ # check that `devinfo -p devname` returns the driver chain:
+ # $ devinfo -p em0
+ # => em0 pci0 pcib0 acpi0 nexus0
+ # if it doesn't, we know something's up:
+ # $ devinfo -p eth0
+ # => devinfo: eth0: Not found
+
+ # we could be catching exit codes here and check if they are 0
+ # (success: not renamed) or 1 (failure: renamed), instead of
+ # ripping thru the stack with an exception.
+ # unfortunately, subp doesn't return exit codes.
+ # so we do the next best thing, and compare the output.
+ _, err = subp.subp(["devinfo", "-p", devname], rcs=[0, 1])
+ if err == "devinfo: {}: Not found\n".format(devname):
+ return True
+ return False
+
class LinuxNetworking(Networking):
"""Implementation of networking functionality common to Linux distros."""
diff --git a/cloudinit/distros/openbsd.py b/cloudinit/distros/openbsd.py
index ccdb8799..72e9bc45 100644
--- a/cloudinit/distros/openbsd.py
+++ b/cloudinit/distros/openbsd.py
@@ -3,7 +3,6 @@
# This file is part of cloud-init. See LICENSE file for license information.
import os
-import platform
import cloudinit.distros.netbsd
from cloudinit import log as logging
@@ -14,6 +13,7 @@ LOG = logging.getLogger(__name__)
class Distro(cloudinit.distros.netbsd.NetBSD):
hostname_conf_fn = "/etc/myname"
+ init_cmd = ["rcctl"]
def _read_hostname(self, filename, default=None):
return util.load_file(self.hostname_conf_fn)
@@ -25,6 +25,26 @@ class Distro(cloudinit.distros.netbsd.NetBSD):
def _get_add_member_to_group_cmd(self, member_name, group_name):
return ["usermod", "-G", group_name, member_name]
+ def manage_service(self, action: str, service: str):
+ """
+ Perform the requested action on a service. This handles OpenBSD's
+ 'rcctl'.
+ May raise ProcessExecutionError
+ """
+ init_cmd = self.init_cmd
+ cmds = {
+ "stop": ["stop", service],
+ "start": ["start", service],
+ "enable": ["enable", service],
+ "disable": ["disable", service],
+ "restart": ["restart", service],
+ "reload": ["restart", service],
+ "try-reload": ["restart", service],
+ "status": ["check", service],
+ }
+ cmd = list(init_cmd) + list(cmds[action])
+ return subp.subp(cmd, capture=True)
+
def lock_passwd(self, name):
try:
subp.subp(["usermod", "-p", "*", name])
@@ -37,13 +57,7 @@ class Distro(cloudinit.distros.netbsd.NetBSD):
def _get_pkg_cmd_environ(self):
"""Return env vars used in OpenBSD package_command operations"""
- os_release = platform.release()
- os_arch = platform.machine()
e = os.environ.copy()
- e["PKG_PATH"] = (
- "ftp://ftp.openbsd.org/pub/OpenBSD/{os_release}/"
- "packages/{os_arch}/"
- ).format(os_arch=os_arch, os_release=os_release)
return e
diff --git a/cloudinit/distros/parsers/hostname.py b/cloudinit/distros/parsers/hostname.py
index 61674082..1d44174e 100644
--- a/cloudinit/distros/parsers/hostname.py
+++ b/cloudinit/distros/parsers/hostname.py
@@ -10,7 +10,7 @@ from cloudinit.distros.parsers import chop_comment
# Parser that knows how to work with /etc/hostname format
-class HostnameConf(object):
+class HostnameConf:
def __init__(self, text):
self._text = text
self._contents = None
diff --git a/cloudinit/distros/parsers/hosts.py b/cloudinit/distros/parsers/hosts.py
index e43880af..7c5febe7 100644
--- a/cloudinit/distros/parsers/hosts.py
+++ b/cloudinit/distros/parsers/hosts.py
@@ -12,7 +12,7 @@ from cloudinit.distros.parsers import chop_comment
# See: man hosts
# or https://linux.die.net/man/5/hosts
# or https://www.freebsd.org/doc/en_US.ISO8859-1/books/handbook/configtuning-configfiles.html # noqa
-class HostsConf(object):
+class HostsConf:
def __init__(self, text):
self._text = text
self._contents = None
diff --git a/cloudinit/distros/parsers/ifconfig.py b/cloudinit/distros/parsers/ifconfig.py
new file mode 100644
index 00000000..35f728e0
--- /dev/null
+++ b/cloudinit/distros/parsers/ifconfig.py
@@ -0,0 +1,277 @@
+# Copyright(C) 2022 FreeBSD Foundation
+#
+# Author: Mina Galić <me+FreeBSD@igalic.co>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import copy
+import re
+from functools import lru_cache
+from ipaddress import IPv4Address, IPv4Interface, IPv6Interface
+from typing import Dict, Optional, Tuple
+
+from cloudinit import log as logging
+
+LOG = logging.getLogger(__name__)
+
+MAC_RE = r"""([0-9a-fA-F]{2}:){5}[0-9a-fA-F]{2}"""
+
+
+class Ifstate:
+ """
+ This class holds the parsed state of a BSD network interface.
+ It is itself side-effect free.
+ All methods with side-effects should be implemented on one of the
+ ``BSDNetworking`` classes.
+ """
+
+ def __init__(self, name):
+ self.name = name
+ self.index: int = 0
+ self.inet = {}
+ self.inet6 = {}
+ self.up = False
+ self.options = []
+ self.nd6 = []
+ self.flags = []
+ self.mtu: int = 0
+ self.metric: int = 0
+ self.groups = []
+ self.description: Optional[str] = None
+ self.media: Optional[str] = None
+ self.status: Optional[str] = None
+ self.mac: Optional[str] = None
+ self.macs = []
+ self.vlan = {}
+ self.members = []
+
+ @property
+ def is_loopback(self) -> bool:
+ return "loopback" in self.flags or "lo" in self.groups
+
+ @property
+ def is_physical(self) -> bool:
+ # OpenBSD makes this very easy:
+ if "egress" in self.groups:
+ return True
+ if self.groups == [] and self.media and "Ethernet" in self.media:
+ return True
+ return False
+
+ @property
+ def is_bridge(self) -> bool:
+ return "bridge" in self.groups
+
+ @property
+ def is_bond(self) -> bool:
+ return "lagg" in self.groups
+
+ @property
+ def is_vlan(self) -> bool:
+ return ("vlan" in self.groups) or (self.vlan != {})
+
+ @property
+ def is_wlan(self) -> bool:
+ return "wlan" in self.groups
+
+
+class Ifconfig:
+ """
+ A parser for BSD style ``ifconfig(8)`` output.
+ For documentation here:
+ - https://man.freebsd.org/ifconfig(8)
+ - https://man.netbsd.org/ifconfig.8
+ - https://man.openbsd.org/ifconfig.8
+ All output is considered equally, and then massaged into a singular form:
+ an ``Ifstate`` object.
+ """
+
+ def __init__(self):
+ self._ifs = {}
+
+ @lru_cache()
+ def parse(self, text: str) -> Dict[str, Ifstate]:
+ """
+ Parse the ``ifconfig -a`` output ``text``, into a dict of ``Ifstate``
+ objects, referenced by ``name`` *and* by ``mac`` address.
+
+ This dict will always be the same, given the same input, so we can
+ ``@lru_cache()`` it. n.b.: ``@lru_cache()`` takes only the
+ ``__hash__()`` of the input (``text``), so it should be fairly quick,
+ despite our giant inputs.
+
+ @param text: The output of ``ifconfig -a``
+ @returns: A dict of ``Ifstate``s, referenced by ``name`` and ``mac``
+ """
+ ifindex = 0
+ for line in text.splitlines():
+ if len(line) == 0:
+ continue
+ if line[0] not in ("\t", " "):
+ # We hit the start of a device block in the ifconfig output
+ # These start with devN: flags=NNNN<flags> and then continue
+ # *indented* for the rest of the config.
+ # Here our loop resets ``curif`` & ``dev`` to this new device
+ ifindex += 1
+ curif = line.split()[0]
+ # current ifconfig pops a ':' on the end of the device
+ if curif.endswith(":"):
+ curif = curif[:-1]
+ dev = Ifstate(curif)
+ dev.index = ifindex
+ self._ifs[curif] = dev
+
+ toks = line.lower().strip().split()
+
+ if len(toks) > 1 and toks[1].startswith("flags="):
+ flags = self._parse_flags(toks)
+ if flags != {}:
+ dev.flags = copy.deepcopy(flags["flags"])
+ dev.up = flags["up"]
+ if "mtu" in flags:
+ dev.mtu = flags["mtu"]
+ if "metric" in flags:
+ dev.metric = flags["metric"]
+ if toks[0].startswith("capabilities="):
+ caps = re.split(r"<|>", toks[0])
+ dev.flags.append(caps)
+
+ if toks[0] == "index":
+ # We have found a real index! override our fake one
+ dev.index = int(toks[1])
+
+ if toks[0] == "description:":
+ dev.description = line[line.index(":") + 2 :]
+
+ if (
+ toks[0].startswith("options=")
+ or toks[0].startswith("ec_capabilities")
+ or toks[0].startswith("ec_enabled")
+ ):
+ options = re.split(r"<|>", toks[0])
+ if len(options) > 1:
+ dev.options += options[1].split(",")
+
+ # We also store the Ifstate reference under all mac addresses
+ # so we can easier reverse-find it.
+ if toks[0] == "ether":
+ dev.mac = toks[1]
+ dev.macs.append(toks[1])
+ self._ifs[toks[1]] = dev
+ if toks[0] == "hwaddr":
+ dev.macs.append(toks[1])
+ self._ifs[toks[1]] = dev
+
+ if toks[0] == "groups:":
+ dev.groups += toks[1:]
+
+ if toks[0] == "media:":
+ dev.media = line[line.index(": ") + 2 :]
+
+ if toks[0] == "nd6":
+ nd6_opts = re.split(r"<|>", toks[0])
+ if len(nd6_opts) > 1:
+ dev.nd6 = nd6_opts[1].split(",")
+
+ if toks[0] == "status":
+ dev.status = toks[1]
+
+ if toks[0] == "inet":
+ ip = self._parse_inet(toks)
+ dev.inet[ip[0]] = copy.deepcopy(ip[1])
+
+ if toks[0] == "inet6":
+ ip = self._parse_inet6(toks)
+ dev.inet6[ip[0]] = copy.deepcopy(ip[1])
+
+ # bridges and ports are kind of the same thing, right?
+ if toks[0] == "member:" or toks[0] == "laggport:":
+ dev.members += toks[1]
+
+ if toks[0] == "vlan:":
+ dev.vlan = {}
+ dev.vlan["id"] = toks[1]
+ for i in range(2, len(toks)):
+ if toks[i] == "interface:":
+ dev.vlan["link"] = toks[i + 1]
+
+ return self._ifs
+
+ def ifs_by_name(self):
+ return {
+ k: v for (k, v) in self._ifs.items() if not re.fullmatch(MAC_RE, k)
+ }
+
+ def ifs_by_mac(self):
+ return {
+ k: v for (k, v) in self._ifs.items() if re.fullmatch(MAC_RE, k)
+ }
+
+ def _parse_inet(self, toks: list) -> Tuple[str, dict]:
+ broadcast = None
+ if "/" in toks[1]:
+ ip = IPv4Interface(toks[1])
+ netmask = str(ip.netmask)
+ if "broadcast" in toks:
+ broadcast = toks[toks.index("broadcast") + 1]
+ else:
+ netmask = str(IPv4Address(int(toks[3], 0)))
+ if "broadcast" in toks:
+ broadcast = toks[toks.index("broadcast") + 1]
+ ip = IPv4Interface("%s/%s" % (toks[1], netmask))
+
+ prefixlen = ip.with_prefixlen.split("/")[1]
+ return (
+ str(ip.ip),
+ {
+ "netmask": netmask,
+ "broadcast": broadcast,
+ "prefixlen": prefixlen,
+ },
+ )
+
+ def _get_prefixlen(self, toks):
+ for i in range(2, len(toks)):
+ if toks[i] == "prefixlen":
+ return toks[i + 1]
+
+ def _parse_inet6(self, toks: list) -> Tuple[str, dict]:
+ scope = None
+ # workaround https://github.com/python/cpython/issues/78969
+ if "%" in toks[1]:
+ scope = "link-local"
+ ip6, rest = toks[1].split("%")
+ if "/" in rest:
+ prefixlen = rest.split("/")[1]
+ else:
+ prefixlen = self._get_prefixlen(toks)
+ ip = IPv6Interface("%s/%s" % (ip6, prefixlen))
+ elif "/" in toks[1]:
+ ip = IPv6Interface(toks[1])
+ prefixlen = toks[1].split("/")[1]
+ else:
+ prefixlen = self._get_prefixlen(toks)
+ ip = IPv6Interface("%s/%s" % (toks[1], prefixlen))
+
+ if not scope and ip.is_link_local:
+ scope = "link-local"
+ elif not scope and ip.is_site_local:
+ scope = "site-local"
+
+ return (str(ip.ip), {"prefixlen": prefixlen, "scope": scope})
+
+ def _parse_flags(self, toks: list) -> dict:
+ flags = re.split(r"<|>", toks[1])
+ ret = {}
+ if len(flags) > 1:
+ ret["flags"] = flags[1].split(",")
+ if "up" in ret["flags"]:
+ ret["up"] = True
+ else:
+ ret["up"] = False
+ for t in range(2, len(toks)):
+ if toks[t] == "metric":
+ ret["metric"] = int(toks[t + 1])
+ elif toks[t] == "mtu":
+ ret["mtu"] = int(toks[t + 1])
+ return ret
diff --git a/cloudinit/distros/parsers/resolv_conf.py b/cloudinit/distros/parsers/resolv_conf.py
index c2bed1bf..d31ffeb1 100644
--- a/cloudinit/distros/parsers/resolv_conf.py
+++ b/cloudinit/distros/parsers/resolv_conf.py
@@ -14,7 +14,7 @@ LOG = logging.getLogger(__name__)
# See: man resolv.conf
-class ResolvConf(object):
+class ResolvConf:
def __init__(self, text):
self._text = text
self._contents = None
diff --git a/cloudinit/filters/launch_index.py b/cloudinit/filters/launch_index.py
index 5aeb0a17..a58ea7d4 100644
--- a/cloudinit/filters/launch_index.py
+++ b/cloudinit/filters/launch_index.py
@@ -17,7 +17,7 @@ from cloudinit import util
LOG = logging.getLogger(__name__)
-class Filter(object):
+class Filter:
def __init__(self, wanted_idx, allow_none=True):
self.wanted_idx = wanted_idx
self.allow_none = allow_none
diff --git a/cloudinit/handlers/jinja_template.py b/cloudinit/handlers/jinja_template.py
index b8196cb1..cf849203 100644
--- a/cloudinit/handlers/jinja_template.py
+++ b/cloudinit/handlers/jinja_template.py
@@ -8,9 +8,13 @@ from typing import Optional, Type
from cloudinit import handlers
from cloudinit import log as logging
+from cloudinit.helpers import Paths
from cloudinit.settings import PER_ALWAYS
-from cloudinit.sources import INSTANCE_JSON_SENSITIVE_FILE
-from cloudinit.templater import MISSING_JINJA_PREFIX, render_string
+from cloudinit.templater import (
+ MISSING_JINJA_PREFIX,
+ detect_template,
+ render_string,
+)
from cloudinit.util import b64d, json_dumps, load_file, load_json
JUndefinedError: Type[Exception]
@@ -25,11 +29,19 @@ except ImportError:
LOG = logging.getLogger(__name__)
+class JinjaLoadError(Exception):
+ pass
+
+
+class NotJinjaError(Exception):
+ pass
+
+
class JinjaTemplatePartHandler(handlers.Handler):
prefixes = ["## template: jinja"]
- def __init__(self, paths, **_kwargs):
+ def __init__(self, paths: Paths, **_kwargs):
handlers.Handler.__init__(self, PER_ALWAYS, version=3)
self.paths = paths
self.sub_handlers = {}
@@ -40,9 +52,7 @@ class JinjaTemplatePartHandler(handlers.Handler):
def handle_part(self, data, ctype, filename, payload, frequency, headers):
if ctype in handlers.CONTENT_SIGNALS:
return
- jinja_json_file = os.path.join(
- self.paths.run_dir, INSTANCE_JSON_SENSITIVE_FILE
- )
+ jinja_json_file = self.paths.get_runpath("instance_data_sensitive")
rendered_payload = render_jinja_payload_from_file(
payload, filename, jinja_json_file
)
@@ -84,21 +94,26 @@ def render_jinja_payload_from_file(
@return: A string of jinja-rendered content with the jinja header removed.
Returns None on error.
"""
+ if detect_template(payload)[0] != "jinja":
+ raise NotJinjaError("Payload is not a jinja template")
instance_data = {}
rendered_payload = None
if not os.path.exists(instance_data_file):
- raise RuntimeError(
+ raise JinjaLoadError(
"Cannot render jinja template vars. Instance data not yet"
" present at %s" % instance_data_file
)
try:
instance_data = load_json(load_file(instance_data_file))
- except (IOError, OSError) as e:
- if e.errno == EACCES:
- raise RuntimeError(
- "Cannot render jinja template vars. No read permission on"
- " '%s'. Try sudo" % instance_data_file
- ) from e
+ except Exception as e:
+ msg = "Loading Jinja instance data failed"
+ if isinstance(e, (IOError, OSError)):
+ if e.errno == EACCES:
+ msg = (
+ "Cannot render jinja template vars. No read permission on"
+ " '%s'. Try sudo" % instance_data_file
+ )
+ raise JinjaLoadError(msg) from e
rendered_payload = render_jinja_payload(
payload, payload_fn, instance_data, debug
@@ -192,6 +207,3 @@ def convert_jinja_instance_data(
if alias_name:
result[alias_name] = copy.deepcopy(result[key])
return result
-
-
-# vi: ts=4 expandtab
diff --git a/cloudinit/helpers.py b/cloudinit/helpers.py
index 406d4582..3e90f07d 100644
--- a/cloudinit/helpers.py
+++ b/cloudinit/helpers.py
@@ -25,11 +25,11 @@ class LockFailure(Exception):
pass
-class DummyLock(object):
+class DummyLock:
pass
-class DummySemaphores(object):
+class DummySemaphores:
def __init__(self):
pass
@@ -47,7 +47,7 @@ class DummySemaphores(object):
pass
-class FileLock(object):
+class FileLock:
def __init__(self, fn):
self.fn = fn
@@ -59,7 +59,7 @@ def canon_sem_name(name):
return name.replace("-", "_")
-class FileSemaphores(object):
+class FileSemaphores:
def __init__(self, sem_path):
self.sem_path = sem_path
@@ -141,7 +141,7 @@ class FileSemaphores(object):
return os.path.join(sem_path, "%s.%s" % (name, freq))
-class Runners(object):
+class Runners:
def __init__(self, paths):
self.paths = paths
self.sems = {}
@@ -186,7 +186,7 @@ class Runners(object):
return (True, results)
-class ConfigMerger(object):
+class ConfigMerger:
def __init__(
self,
paths=None,
@@ -292,7 +292,7 @@ class ConfigMerger(object):
return self._cfg
-class ContentHandlers(object):
+class ContentHandlers:
def __init__(self):
self.registered = {}
self.initialized = []
@@ -333,33 +333,44 @@ class Paths(persistence.CloudInitPickleMixin):
def __init__(self, path_cfgs: dict, ds=None):
self.cfgs = path_cfgs
# Populate all the initial paths
- self.cloud_dir = path_cfgs.get("cloud_dir", "/var/lib/cloud")
- self.run_dir = path_cfgs.get("run_dir", "/run/cloud-init")
- self.instance_link = os.path.join(self.cloud_dir, "instance")
- self.boot_finished = os.path.join(self.instance_link, "boot-finished")
- self.seed_dir = os.path.join(self.cloud_dir, "seed")
+ self.cloud_dir: str = path_cfgs.get("cloud_dir", "/var/lib/cloud")
+ self.run_dir: str = path_cfgs.get("run_dir", "/run/cloud-init")
+ self.instance_link: str = os.path.join(self.cloud_dir, "instance")
+ self.boot_finished: str = os.path.join(
+ self.instance_link, "boot-finished"
+ )
+ self.seed_dir: str = os.path.join(self.cloud_dir, "seed")
# This one isn't joined, since it should just be read-only
- template_dir = path_cfgs.get("templates_dir", "/etc/cloud/templates/")
- self.template_tpl = os.path.join(template_dir, "%s.tmpl")
+ template_dir: str = path_cfgs.get(
+ "templates_dir", "/etc/cloud/templates/"
+ )
+ self.template_tpl: str = os.path.join(template_dir, "%s.tmpl")
self.lookups = {
+ "boothooks": "boothooks",
+ "cloud_config": "cloud-config.txt",
+ "data": "data",
"handlers": "handlers",
+ # File in which public available instance meta-data is written
+ # security-sensitive key values are redacted from this
+ # world-readable file
+ "instance_data": "instance-data.json",
+ # security-sensitive key values are present in this root-readable
+ # file
+ "instance_data_sensitive": "instance-data-sensitive.json",
+ "instance_id": ".instance-id",
+ "manual_clean_marker": "manual-clean",
+ "obj_pkl": "obj.pkl",
"scripts": "scripts",
- "vendor_scripts": "scripts/vendor",
"sem": "sem",
- "boothooks": "boothooks",
- "userdata_raw": "user-data.txt",
"userdata": "user-data.txt.i",
- "obj_pkl": "obj.pkl",
- "cloud_config": "cloud-config.txt",
- "vendor_cloud_config": "vendor-cloud-config.txt",
- "vendor2_cloud_config": "vendor2-cloud-config.txt",
- "data": "data",
- "vendordata_raw": "vendor-data.txt",
- "vendordata2_raw": "vendor-data2.txt",
+ "userdata_raw": "user-data.txt",
"vendordata": "vendor-data.txt.i",
"vendordata2": "vendor-data2.txt.i",
- "instance_id": ".instance-id",
- "manual_clean_marker": "manual-clean",
+ "vendordata2_raw": "vendor-data2.txt",
+ "vendordata_raw": "vendor-data.txt",
+ "vendor2_cloud_config": "vendor2-cloud-config.txt",
+ "vendor_cloud_config": "vendor-cloud-config.txt",
+ "vendor_scripts": "scripts/vendor",
"warnings": "warnings",
}
# Set when a datasource becomes active
@@ -376,6 +387,12 @@ class Paths(persistence.CloudInitPickleMixin):
self.run_dir = Paths(
path_cfgs=self.cfgs, ds=self.datasource
).run_dir
+ if "instance_data" not in self.lookups:
+ self.lookups["instance_data"] = "instance-data.json"
+ if "instance_data_sensitive" not in self.lookups:
+ self.lookups[
+ "instance_data_sensitive"
+ ] = "instance-data-sensitive.json"
# get_ipath_cur: get the current instance path for an item
def get_ipath_cur(self, name=None):
@@ -415,7 +432,7 @@ class Paths(persistence.CloudInitPickleMixin):
else:
return ipath
- def _get_path(self, base, name=None):
+ def _get_path(self, base: str, name=None):
if name is None:
return base
return os.path.join(base, self.lookups[name])
diff --git a/cloudinit/mergers/__init__.py b/cloudinit/mergers/__init__.py
index a7a6a47f..87899305 100644
--- a/cloudinit/mergers/__init__.py
+++ b/cloudinit/mergers/__init__.py
@@ -15,7 +15,7 @@ MERGER_PREFIX = "m_"
MERGER_ATTR = "Merger"
-class UnknownMerger(object):
+class UnknownMerger:
# Named differently so auto-method finding
# doesn't pick this up if there is ever a type
# named "unknown"
diff --git a/cloudinit/mergers/m_dict.py b/cloudinit/mergers/m_dict.py
index 274ccafc..e47d33f1 100644
--- a/cloudinit/mergers/m_dict.py
+++ b/cloudinit/mergers/m_dict.py
@@ -18,7 +18,7 @@ def _has_any(what, *keys):
return False
-class Merger(object):
+class Merger:
def __init__(self, merger, opts):
self._merger = merger
# Affects merging behavior...
diff --git a/cloudinit/mergers/m_list.py b/cloudinit/mergers/m_list.py
index 9dfae8cd..763b64b2 100644
--- a/cloudinit/mergers/m_list.py
+++ b/cloudinit/mergers/m_list.py
@@ -15,7 +15,7 @@ def _has_any(what, *keys):
return False
-class Merger(object):
+class Merger:
def __init__(self, merger, opts):
self._merger = merger
# Affects merging behavior...
diff --git a/cloudinit/mergers/m_str.py b/cloudinit/mergers/m_str.py
index a96bae5e..ae91c048 100644
--- a/cloudinit/mergers/m_str.py
+++ b/cloudinit/mergers/m_str.py
@@ -5,7 +5,7 @@
# This file is part of cloud-init. See LICENSE file for license information.
-class Merger(object):
+class Merger:
def __init__(self, _merger, opts):
self._append = "append" in opts
diff --git a/cloudinit/net/bsd.py b/cloudinit/net/bsd.py
index e0f18366..e8778d27 100644
--- a/cloudinit/net/bsd.py
+++ b/cloudinit/net/bsd.py
@@ -7,10 +7,9 @@ from cloudinit import log as logging
from cloudinit import net, subp, util
from cloudinit.distros import bsd_utils
from cloudinit.distros.parsers.resolv_conf import ResolvConf
+from cloudinit.net import renderer
from cloudinit.net.network_state import NetworkState
-from . import renderer
-
LOG = logging.getLogger(__name__)
diff --git a/cloudinit/net/cmdline.py b/cloudinit/net/cmdline.py
index eab86d9f..5df2f214 100644
--- a/cloudinit/net/cmdline.py
+++ b/cloudinit/net/cmdline.py
@@ -15,8 +15,7 @@ import os
import shlex
from cloudinit import util
-
-from . import get_devicelist, read_sys_net_safe
+from cloudinit.net import get_devicelist, read_sys_net_safe
_OPEN_ISCSI_INTERFACE_FILE = "/run/initramfs/open-iscsi.interface"
diff --git a/cloudinit/net/dhcp.py b/cloudinit/net/dhcp.py
index fd1d4256..a9a1c980 100644
--- a/cloudinit/net/dhcp.py
+++ b/cloudinit/net/dhcp.py
@@ -4,6 +4,7 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
+import contextlib
import logging
import os
import re
@@ -13,7 +14,7 @@ from io import StringIO
import configobj
-from cloudinit import subp, temp_utils, util
+from cloudinit import subp, util
from cloudinit.net import find_fallback_nic, get_devicelist
LOG = logging.getLogger(__name__)
@@ -26,7 +27,7 @@ class NoDHCPLeaseError(Exception):
class InvalidDHCPLeaseFileError(NoDHCPLeaseError):
- """Raised when parsing an empty or invalid dhcp.leases file.
+ """Raised when parsing an empty or invalid dhclient.lease file.
Current uses are DataSourceAzure and DataSourceEc2 during ephemeral
boot to scrape metadata.
@@ -41,7 +42,7 @@ class NoDHCPLeaseMissingDhclientError(NoDHCPLeaseError):
"""Raised when unable to find dhclient."""
-def maybe_perform_dhcp_discovery(nic=None, dhcp_log_func=None):
+def maybe_perform_dhcp_discovery(nic=None, dhcp_log_func=None, tmp_dir=None):
"""Perform dhcp discovery if nic valid and dhclient command exists.
If the nic is invalid or undiscoverable or dhclient command is not found,
@@ -50,6 +51,7 @@ def maybe_perform_dhcp_discovery(nic=None, dhcp_log_func=None):
@param nic: Name of the network interface we want to run dhclient on.
@param dhcp_log_func: A callable accepting the dhclient output and error
streams.
+ @param tmp_dir: Tmp dir with exec permissions.
@return: A list of dicts representing dhcp options for each lease obtained
from the dhclient discovery if run, otherwise an empty list is
returned.
@@ -68,11 +70,7 @@ def maybe_perform_dhcp_discovery(nic=None, dhcp_log_func=None):
if not dhclient_path:
LOG.debug("Skip dhclient configuration: No dhclient command found.")
raise NoDHCPLeaseMissingDhclientError()
- with temp_utils.tempdir(
- rmtree_ignore_errors=True, prefix="cloud-init-dhcp-", needs_exe=True
- ) as tdir:
- # Use /var/tmp because /run/cloud-init/tmp is mounted noexec
- return dhcp_discovery(dhclient_path, nic, tdir, dhcp_log_func)
+ return dhcp_discovery(dhclient_path, nic, dhcp_log_func)
def parse_dhcp_lease_file(lease_file):
@@ -109,36 +107,30 @@ def parse_dhcp_lease_file(lease_file):
return dhcp_leases
-def dhcp_discovery(dhclient_cmd_path, interface, cleandir, dhcp_log_func=None):
+def dhcp_discovery(dhclient_cmd_path, interface, dhcp_log_func=None):
"""Run dhclient on the interface without scripts or filesystem artifacts.
@param dhclient_cmd_path: Full path to the dhclient used.
- @param interface: Name of the network inteface on which to dhclient.
- @param cleandir: The directory from which to run dhclient as well as store
- dhcp leases.
+ @param interface: Name of the network interface on which to dhclient.
@param dhcp_log_func: A callable accepting the dhclient output and error
streams.
@return: A list of dicts of representing the dhcp leases parsed from the
- dhcp.leases file or empty list.
+ dhclient.lease file or empty list.
"""
LOG.debug("Performing a dhcp discovery on %s", interface)
- # XXX We copy dhclient out of /sbin/dhclient to avoid dealing with strict
- # app armor profiles which disallow running dhclient -sf <our-script-file>.
# We want to avoid running /sbin/dhclient-script because of side-effects in
# /etc/resolv.conf any any other vendor specific scripts in
# /etc/dhcp/dhclient*hooks.d.
- sandbox_dhclient_cmd = os.path.join(cleandir, "dhclient")
- util.copy(dhclient_cmd_path, sandbox_dhclient_cmd)
- pid_file = os.path.join(cleandir, "dhclient.pid")
- lease_file = os.path.join(cleandir, "dhcp.leases")
+ pid_file = "/run/dhclient.pid"
+ lease_file = "/run/dhclient.lease"
- # In some cases files in /var/tmp may not be executable, launching dhclient
- # from there will certainly raise 'Permission denied' error. Try launching
- # the original dhclient instead.
- if not os.access(sandbox_dhclient_cmd, os.X_OK):
- sandbox_dhclient_cmd = dhclient_cmd_path
+ # this function waits for these files to exist, clean previous runs
+ # to avoid false positive in wait_for_files
+ with contextlib.suppress(FileNotFoundError):
+ os.remove(pid_file)
+ os.remove(lease_file)
# ISC dhclient needs the interface up to send initial discovery packets.
# Generally dhclient relies on dhclient-script PREINIT action to bring the
@@ -146,7 +138,7 @@ def dhcp_discovery(dhclient_cmd_path, interface, cleandir, dhcp_log_func=None):
# we need to do that "link up" ourselves first.
subp.subp(["ip", "link", "set", "dev", interface, "up"], capture=True)
cmd = [
- sandbox_dhclient_cmd,
+ dhclient_cmd_path,
"-1",
"-v",
"-lf",
diff --git a/cloudinit/net/eni.py b/cloudinit/net/eni.py
index ea0b8e4a..53bd35ca 100644
--- a/cloudinit/net/eni.py
+++ b/cloudinit/net/eni.py
@@ -8,11 +8,9 @@ from typing import Optional
from cloudinit import log as logging
from cloudinit import subp, util
-from cloudinit.net import subnet_is_ipv6
+from cloudinit.net import ParserError, renderer, subnet_is_ipv6
from cloudinit.net.network_state import NetworkState
-from . import ParserError, renderer
-
LOG = logging.getLogger(__name__)
NET_CONFIG_COMMANDS = [
diff --git a/cloudinit/net/ephemeral.py b/cloudinit/net/ephemeral.py
index 81f7079f..5ce41694 100644
--- a/cloudinit/net/ephemeral.py
+++ b/cloudinit/net/ephemeral.py
@@ -17,7 +17,7 @@ from cloudinit.net.dhcp import (
LOG = logging.getLogger(__name__)
-class EphemeralIPv4Network(object):
+class EphemeralIPv4Network:
"""Context manager which sets up temporary static network configuration.
No operations are performed if the provided interface already has the
@@ -309,18 +309,20 @@ class EphemeralIPv6Network:
"""No need to set the link to down state"""
-class EphemeralDHCPv4(object):
+class EphemeralDHCPv4:
def __init__(
self,
iface=None,
connectivity_url_data: Dict[str, Any] = None,
dhcp_log_func=None,
+ tmp_dir=None,
):
self.iface = iface
self._ephipv4 = None
self.lease = None
self.dhcp_log_func = dhcp_log_func
self.connectivity_url_data = connectivity_url_data
+ self.tmp_dir = tmp_dir
def __enter__(self):
"""Setup sandboxed dhcp context, unless connectivity_url can already be
@@ -358,7 +360,9 @@ class EphemeralDHCPv4(object):
"""
if self.lease:
return self.lease
- leases = maybe_perform_dhcp_discovery(self.iface, self.dhcp_log_func)
+ leases = maybe_perform_dhcp_discovery(
+ self.iface, self.dhcp_log_func, self.tmp_dir
+ )
if not leases:
raise NoDHCPLeaseError()
self.lease = leases[-1]
@@ -417,19 +421,28 @@ class EphemeralDHCPv4(object):
class EphemeralIPNetwork:
"""Marries together IPv4 and IPv6 ephemeral context managers"""
- def __init__(self, interface, ipv6: bool = False, ipv4: bool = True):
+ def __init__(
+ self,
+ interface,
+ ipv6: bool = False,
+ ipv4: bool = True,
+ tmp_dir=None,
+ ):
self.interface = interface
self.ipv4 = ipv4
self.ipv6 = ipv6
self.stack = contextlib.ExitStack()
self.state_msg: str = ""
+ self.tmp_dir = tmp_dir
def __enter__(self):
# ipv6 dualstack might succeed when dhcp4 fails
# therefore catch exception unless only v4 is used
try:
if self.ipv4:
- self.stack.enter_context(EphemeralDHCPv4(self.interface))
+ self.stack.enter_context(
+ EphemeralDHCPv4(self.interface, tmp_dir=self.tmp_dir)
+ )
if self.ipv6:
self.stack.enter_context(EphemeralIPv6Network(self.interface))
# v6 link local might be usable
diff --git a/cloudinit/net/netplan.py b/cloudinit/net/netplan.py
index 7b91077d..32fb031c 100644
--- a/cloudinit/net/netplan.py
+++ b/cloudinit/net/netplan.py
@@ -11,11 +11,10 @@ from cloudinit.net import (
IPV6_DYNAMIC_TYPES,
SYS_CLASS_NET,
get_devicelist,
+ renderer,
subnet_is_ipv6,
)
-
-from . import renderer
-from .network_state import NET_CONFIG_TO_V2, NetworkState
+from cloudinit.net.network_state import NET_CONFIG_TO_V2, NetworkState
KNOWN_SNAPD_CONFIG = b"""\
# This is the initial network config.
diff --git a/cloudinit/net/network_manager.py b/cloudinit/net/network_manager.py
index 8053511c..53763d15 100644
--- a/cloudinit/net/network_manager.py
+++ b/cloudinit/net/network_manager.py
@@ -15,11 +15,9 @@ from typing import Optional
from cloudinit import log as logging
from cloudinit import subp, util
-from cloudinit.net import is_ipv6_address, subnet_is_ipv6
+from cloudinit.net import is_ipv6_address, renderer, subnet_is_ipv6
from cloudinit.net.network_state import NetworkState
-from . import renderer
-
NM_RUN_DIR = "/etc/NetworkManager"
NM_LIB_DIR = "/usr/lib/NetworkManager"
NM_CFG_FILE = "/etc/NetworkManager/NetworkManager.conf"
diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py
index e4f7a7fd..dd2ff489 100644
--- a/cloudinit/net/network_state.py
+++ b/cloudinit/net/network_state.py
@@ -138,7 +138,7 @@ class CommandHandlerMeta(type):
return super(CommandHandlerMeta, cls).__new__(cls, name, parents, dct)
-class NetworkState(object):
+class NetworkState:
def __init__(
self, network_state: dict, version: int = NETWORK_STATE_VERSION
):
diff --git a/cloudinit/net/networkd.py b/cloudinit/net/networkd.py
index e0a5d848..3591513f 100644
--- a/cloudinit/net/networkd.py
+++ b/cloudinit/net/networkd.py
@@ -1,7 +1,7 @@
#!/usr/bin/env python3
# vi: ts=4 expandtab
#
-# Copyright (C) 2021 VMware Inc.
+# Copyright (C) 2021-2022 VMware Inc.
#
# Author: Shreenidhi Shedi <yesshedi@gmail.com>
#
@@ -12,10 +12,9 @@ from typing import Optional
from cloudinit import log as logging
from cloudinit import subp, util
+from cloudinit.net import renderer
from cloudinit.net.network_state import NetworkState
-from . import renderer
-
LOG = logging.getLogger(__name__)
@@ -87,7 +86,7 @@ class Renderer(renderer.Renderer):
"network_conf_dir", "/etc/systemd/network/"
)
- def generate_match_section(self, iface, cfg):
+ def generate_match_section(self, iface, cfg: CfgParser):
sec = "Match"
match_dict = {
"name": "Name",
@@ -104,7 +103,7 @@ class Renderer(renderer.Renderer):
return iface["name"]
- def generate_link_section(self, iface, cfg):
+ def generate_link_section(self, iface, cfg: CfgParser):
sec = "Link"
if not iface:
@@ -113,7 +112,7 @@ class Renderer(renderer.Renderer):
if "mtu" in iface and iface["mtu"]:
cfg.update_section(sec, "MTUBytes", iface["mtu"])
- def parse_routes(self, conf, cfg):
+ def parse_routes(self, conf, cfg: CfgParser):
sec = "Route"
route_cfg_map = {
"gateway": "Gateway",
@@ -133,7 +132,7 @@ class Renderer(renderer.Renderer):
v += prefix
cfg.update_section(sec, route_cfg_map[k], v)
- def parse_subnets(self, iface, cfg):
+ def parse_subnets(self, iface, cfg: CfgParser):
dhcp = "no"
sec = "Network"
for e in iface.get("subnets", []):
@@ -175,8 +174,10 @@ class Renderer(renderer.Renderer):
):
cfg.update_section(sec, "IPv6AcceptRA", iface["accept-ra"])
+ return dhcp
+
# This is to accommodate extra keys present in VMware config
- def dhcp_domain(self, d, cfg):
+ def dhcp_domain(self, d, cfg: CfgParser):
for item in ["dhcp4domain", "dhcp6domain"]:
if item not in d:
continue
@@ -194,7 +195,7 @@ class Renderer(renderer.Renderer):
section = "DHCPv6"
cfg.update_section(section, "UseDomains", ret)
- def parse_dns(self, iface, cfg, ns):
+ def parse_dns(self, iface, cfg: CfgParser, ns: NetworkState):
sec = "Network"
dns_cfg_map = {
@@ -216,6 +217,34 @@ class Renderer(renderer.Renderer):
if k in dns and dns[k]:
cfg.update_section(sec, v, " ".join(dns[k]))
+ def parse_dhcp_overrides(self, cfg: CfgParser, device, dhcp, version):
+ dhcp_config_maps = {
+ "UseDNS": "use-dns",
+ "UseDomains": "use-domains",
+ "UseHostname": "use-hostname",
+ "UseNTP": "use-ntp",
+ }
+
+ if version == "4":
+ dhcp_config_maps.update(
+ {
+ "SendHostname": "send-hostname",
+ "Hostname": "hostname",
+ "RouteMetric": "route-metric",
+ "UseMTU": "use-mtu",
+ "UseRoutes": "use-routes",
+ }
+ )
+
+ if f"dhcp{version}-overrides" in device and dhcp in [
+ "yes",
+ f"ipv{version}",
+ ]:
+ dhcp_overrides = device[f"dhcp{version}-overrides"]
+ for k, v in dhcp_config_maps.items():
+ if v in dhcp_overrides:
+ cfg.update_section(f"DHCPv{version}", k, dhcp_overrides[v])
+
def create_network_file(self, link, conf, nwk_dir):
net_fn_owner = "systemd-network"
@@ -241,14 +270,14 @@ class Renderer(renderer.Renderer):
for k, v in ret_dict.items():
self.create_network_file(k, v, network_dir)
- def _render_content(self, ns):
+ def _render_content(self, ns: NetworkState) -> dict:
ret_dict = {}
for iface in ns.iter_interfaces():
cfg = CfgParser()
link = self.generate_match_section(iface, cfg)
self.generate_link_section(iface, cfg)
- self.parse_subnets(iface, cfg)
+ dhcp = self.parse_subnets(iface, cfg)
self.parse_dns(iface, cfg, ns)
for route in ns.iter_routes():
@@ -271,7 +300,25 @@ class Renderer(renderer.Renderer):
name = dev_name
break
if name in ns.config["ethernets"]:
- self.dhcp_domain(ns.config["ethernets"][name], cfg)
+ device = ns.config["ethernets"][name]
+
+ # dhcp{version}domain are extra keys only present in
+ # VMware config
+ self.dhcp_domain(device, cfg)
+ for version in ["4", "6"]:
+ if (
+ f"dhcp{version}domain" in device
+ and "use-domains"
+ in device.get(f"dhcp{version}-overrides", {})
+ ):
+ exception = (
+ f"{name} has both dhcp{version}domain"
+ f" and dhcp{version}-overrides.use-domains"
+ f" configured. Use one"
+ )
+ raise Exception(exception)
+
+ self.parse_dhcp_overrides(cfg, device, dhcp, version)
ret_dict.update({link: cfg.get_final_conf()})
@@ -287,6 +334,6 @@ def available(target=None):
return True
-def network_state_to_networkd(ns):
+def network_state_to_networkd(ns: NetworkState):
renderer = Renderer({})
return renderer._render_content(ns)
diff --git a/cloudinit/net/renderer.py b/cloudinit/net/renderer.py
index d7bc19b1..6bf4703c 100644
--- a/cloudinit/net/renderer.py
+++ b/cloudinit/net/renderer.py
@@ -28,7 +28,7 @@ def filter_by_attr(match_name):
filter_by_physical = filter_by_type("physical")
-class Renderer(object):
+class Renderer:
def __init__(self, config=None):
pass
diff --git a/cloudinit/net/renderers.py b/cloudinit/net/renderers.py
index 7edc34b5..fcf7feba 100644
--- a/cloudinit/net/renderers.py
+++ b/cloudinit/net/renderers.py
@@ -2,7 +2,7 @@
from typing import List, Tuple, Type
-from . import (
+from cloudinit.net import (
RendererNotFoundError,
eni,
freebsd,
diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py
index d5789fb0..453c0522 100644
--- a/cloudinit/net/sysconfig.py
+++ b/cloudinit/net/sysconfig.py
@@ -13,12 +13,11 @@ from cloudinit.net import (
IPV6_DYNAMIC_TYPES,
is_ipv6_address,
net_prefix_to_ipv4_mask,
+ renderer,
subnet_is_ipv6,
)
from cloudinit.net.network_state import NetworkState
-from . import renderer
-
LOG = logging.getLogger(__name__)
KNOWN_DISTROS = [
"almalinux",
@@ -65,7 +64,7 @@ def _quote_value(value):
return value
-class ConfigMap(object):
+class ConfigMap:
"""Sysconfig like dictionary object."""
# Why does redhat prefer yes/no to true/false??
diff --git a/cloudinit/registry.py b/cloudinit/registry.py
index 5044e760..4deb6752 100644
--- a/cloudinit/registry.py
+++ b/cloudinit/registry.py
@@ -5,7 +5,7 @@
import copy
-class DictRegistry(object):
+class DictRegistry:
"""A simple registry for a mapping of objects."""
def __init__(self):
diff --git a/cloudinit/reporting/events.py b/cloudinit/reporting/events.py
index 34c3b875..f87ec3ad 100644
--- a/cloudinit/reporting/events.py
+++ b/cloudinit/reporting/events.py
@@ -13,10 +13,12 @@ import os.path
import time
from typing import List
+from cloudinit.reporting import (
+ available_handlers,
+ instantiated_handler_registry,
+)
from cloudinit.reporting.handlers import ReportingHandler
-from . import available_handlers, instantiated_handler_registry
-
FINISH_EVENT_TYPE = "finish"
START_EVENT_TYPE = "start"
@@ -33,7 +35,7 @@ class _nameset(set):
status = _nameset(("SUCCESS", "WARN", "FAIL"))
-class ReportingEvent(object):
+class ReportingEvent:
"""Encapsulation of event formatting."""
def __init__(
@@ -155,7 +157,7 @@ def report_start_event(event_name, event_description):
return report_event(event)
-class ReportEventStack(object):
+class ReportEventStack:
"""Context Manager for using :py:func:`report_event`
This enables calling :py:func:`report_start_event` and
diff --git a/cloudinit/reporting/handlers.py b/cloudinit/reporting/handlers.py
index d43b80b0..2c1f4998 100644
--- a/cloudinit/reporting/handlers.py
+++ b/cloudinit/reporting/handlers.py
@@ -128,6 +128,7 @@ class WebHookHandler(ReportingHandler):
timeout=args[2],
retries=args[3],
ssl_details=args[4],
+ log_req_resp=False,
)
consecutive_failed = 0
except Exception as e:
@@ -141,10 +142,16 @@ class WebHookHandler(ReportingHandler):
self.queue.task_done()
def publish_event(self, event):
+ event_data = event.as_dict()
+ LOG.debug(
+ "Queuing POST to %s, data: %s",
+ self.endpoint,
+ event_data,
+ )
self.queue.put(
(
self.endpoint,
- json.dumps(event.as_dict()),
+ json.dumps(event_data),
self.timeout,
self.retries,
self.ssl_details,
diff --git a/cloudinit/settings.py b/cloudinit/settings.py
index 32844e71..8684d003 100644
--- a/cloudinit/settings.py
+++ b/cloudinit/settings.py
@@ -47,6 +47,7 @@ CFG_BUILTIN = {
"RbxCloud",
"UpCloud",
"VMware",
+ "NWCS",
# At the end to act as a 'catch' when none of the above work...
"None",
],
diff --git a/cloudinit/simpletable.py b/cloudinit/simpletable.py
index 90281e06..fc73544f 100644
--- a/cloudinit/simpletable.py
+++ b/cloudinit/simpletable.py
@@ -6,7 +6,7 @@
# This file is part of cloud-init. See LICENSE file for license information.
-class SimpleTable(object):
+class SimpleTable:
"""A minimal implementation of PrettyTable
for distribution with cloud-init.
"""
diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
index 803c4f25..db9234db 100644
--- a/cloudinit/sources/DataSourceAzure.py
+++ b/cloudinit/sources/DataSourceAzure.py
@@ -357,6 +357,9 @@ class DataSourceAzure(sources.DataSource):
self._ephemeral_dhcp_ctx = None
self._iso_dev = None
self._wireserver_endpoint = DEFAULT_WIRESERVER_ENDPOINT
+ self._reported_ready_marker_file = os.path.join(
+ self.paths.cloud_dir, "data", "reported_ready"
+ )
def __str__(self):
root = sources.DataSource.__str__(self)
@@ -399,7 +402,9 @@ class DataSourceAzure(sources.DataSource):
LOG.debug("Requested ephemeral networking (iface=%s)", iface)
self._ephemeral_dhcp_ctx = EphemeralDHCPv4(
- iface=iface, dhcp_log_func=dhcp_log_cb
+ iface=iface,
+ dhcp_log_func=dhcp_log_cb,
+ tmp_dir=self.distro.get_tmp_exec_path(),
)
lease = None
@@ -820,6 +825,11 @@ class DataSourceAzure(sources.DataSource):
)
return {}
+ def get_instance_id(self):
+ if not self.metadata or "instance-id" not in self.metadata:
+ return self._iid()
+ return str(self.metadata["instance-id"])
+
def device_name_to_device(self, name):
return self.ds_cfg["disk_aliases"].get(name)
diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py
index 32cc078b..3cdfeeca 100644
--- a/cloudinit/sources/DataSourceCloudStack.py
+++ b/cloudinit/sources/DataSourceCloudStack.py
@@ -27,7 +27,7 @@ from cloudinit.sources.helpers import ec2
LOG = logging.getLogger(__name__)
-class CloudStackPasswordServerClient(object):
+class CloudStackPasswordServerClient:
"""
Implements password fetching from the CloudStack password server.
diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py
index 32659848..139ec7e4 100644
--- a/cloudinit/sources/DataSourceEc2.py
+++ b/cloudinit/sources/DataSourceEc2.py
@@ -37,12 +37,13 @@ AWS_TOKEN_REQ_HEADER = AWS_TOKEN_PUT_HEADER + "-ttl-seconds"
AWS_TOKEN_REDACT = [AWS_TOKEN_PUT_HEADER, AWS_TOKEN_REQ_HEADER]
-class CloudNames(object):
+class CloudNames:
ALIYUN = "aliyun"
AWS = "aws"
BRIGHTBOX = "brightbox"
ZSTACK = "zstack"
E24CLOUD = "e24cloud"
+ OUTSCALE = "outscale"
# UNKNOWN indicates no positive id. If strict_id is 'warn' or 'false',
# then an attempt at the Ec2 Metadata service will be made.
UNKNOWN = "unknown"
@@ -51,6 +52,11 @@ class CloudNames(object):
NO_EC2_METADATA = "no-ec2-metadata"
+# Drop when LP: #1988157 tag handling is fixed
+def skip_404_tag_errors(exception):
+ return exception.code == 404 and "meta-data/tags/" in exception.url
+
+
class DataSourceEc2(sources.DataSource):
dsname = "Ec2"
@@ -127,7 +133,10 @@ class DataSourceEc2(sources.DataSource):
return False
try:
with EphemeralIPNetwork(
- self.fallback_interface, ipv6=True
+ self.fallback_interface,
+ ipv4=True,
+ ipv6=True,
+ tmp_dir=self.distro.get_tmp_exec_path(),
) as netw:
state_msg = f" {netw.state_msg}" if netw.state_msg else ""
self._crawled_metadata = util.log_time(
@@ -527,8 +536,12 @@ class DataSourceEc2(sources.DataSource):
if self.cloud_name == CloudNames.AWS:
exc_cb = self._refresh_stale_aws_token_cb
exc_cb_ud = self._skip_or_refresh_stale_aws_token_cb
- else:
+ skip_cb = None
+ elif self.cloud_name == CloudNames.OUTSCALE:
exc_cb = exc_cb_ud = None
+ skip_cb = skip_404_tag_errors
+ else:
+ exc_cb = exc_cb_ud = skip_cb = None
try:
crawled_metadata["user-data"] = ec2.get_instance_userdata(
api_version,
@@ -543,6 +556,7 @@ class DataSourceEc2(sources.DataSource):
headers_cb=self._get_headers,
headers_redact=redact,
exception_cb=exc_cb,
+ retrieval_exception_ignore_cb=skip_cb,
)
if self.cloud_name == CloudNames.AWS:
identity = ec2.get_instance_identity(
@@ -667,7 +681,7 @@ class DataSourceEc2Local(DataSourceEc2):
perform_dhcp_setup = True # Use dhcp before querying metadata
def get_data(self):
- supported_platforms = (CloudNames.AWS,)
+ supported_platforms = (CloudNames.AWS, CloudNames.OUTSCALE)
if self.cloud_name not in supported_platforms:
LOG.debug(
"Local Ec2 mode only supported on %s, not %s",
@@ -757,6 +771,14 @@ def identify_e24cloud(data):
return CloudNames.E24CLOUD
+def identify_outscale(data):
+ if (
+ data["product_name"] == "3DS Outscale VM".lower()
+ and data["vendor"] == "3DS Outscale".lower()
+ ):
+ return CloudNames.OUTSCALE
+
+
def identify_platform():
# identify the platform and return an entry in CloudNames.
data = _collect_platform_data()
@@ -765,6 +787,7 @@ def identify_platform():
identify_brightbox,
identify_zstack,
identify_e24cloud,
+ identify_outscale,
lambda x: CloudNames.UNKNOWN,
)
for checker in checks:
@@ -787,6 +810,7 @@ def _collect_platform_data():
serial: dmi 'system-serial-number' (/sys/.../product_serial)
asset_tag: 'dmidecode -s chassis-asset-tag'
vendor: dmi 'system-manufacturer' (/sys/.../sys_vendor)
+ product_name: dmi 'system-product-name' (/sys/.../system-manufacturer)
On Ec2 instances experimentation is that product_serial is upper case,
and product_uuid is lower case. This returns lower case values for both.
@@ -818,6 +842,9 @@ def _collect_platform_data():
vendor = dmi.read_dmi_data("system-manufacturer")
data["vendor"] = (vendor if vendor else "").lower()
+ product_name = dmi.read_dmi_data("system-product-name")
+ data["product_name"] = (product_name if product_name else "").lower()
+
return data
diff --git a/cloudinit/sources/DataSourceGCE.py b/cloudinit/sources/DataSourceGCE.py
index 3691a706..29548a60 100644
--- a/cloudinit/sources/DataSourceGCE.py
+++ b/cloudinit/sources/DataSourceGCE.py
@@ -27,7 +27,7 @@ HOSTKEY_NAMESPACE = "hostkeys"
HEADERS = {"Metadata-Flavor": "Google"}
-class GoogleMetadataFetcher(object):
+class GoogleMetadataFetcher:
def __init__(self, metadata_address, num_retries, sec_between_retries):
self.metadata_address = metadata_address
self.num_retries = num_retries
@@ -83,7 +83,10 @@ class DataSourceGCE(sources.DataSource):
url_params = self.get_url_params()
network_context = noop()
if self.perform_dhcp_setup:
- network_context = EphemeralDHCPv4(self.fallback_interface)
+ network_context = EphemeralDHCPv4(
+ self.fallback_interface,
+ tmp_dir=self.distro.get_tmp_exec_path(),
+ )
with network_context:
ret = util.log_time(
LOG.debug,
diff --git a/cloudinit/sources/DataSourceHetzner.py b/cloudinit/sources/DataSourceHetzner.py
index 90531769..c70a23ce 100644
--- a/cloudinit/sources/DataSourceHetzner.py
+++ b/cloudinit/sources/DataSourceHetzner.py
@@ -61,6 +61,7 @@ class DataSourceHetzner(sources.DataSource):
connectivity_url_data={
"url": BASE_URL_V1 + "/metadata/instance-id",
},
+ tmp_dir=self.distro.get_tmp_exec_path(),
):
md = hc_helper.read_metadata(
self.metadata_address,
diff --git a/cloudinit/sources/DataSourceIBMCloud.py b/cloudinit/sources/DataSourceIBMCloud.py
index 18c3848f..d4f544a9 100644
--- a/cloudinit/sources/DataSourceIBMCloud.py
+++ b/cloudinit/sources/DataSourceIBMCloud.py
@@ -105,7 +105,7 @@ LOG = logging.getLogger(__name__)
IBM_CONFIG_UUID = "9796-932E"
-class Platforms(object):
+class Platforms:
TEMPLATE_LIVE_METADATA = "Template/Live/Metadata"
TEMPLATE_LIVE_NODATA = "UNABLE TO BE IDENTIFIED."
TEMPLATE_PROVISIONING_METADATA = "Template/Provisioning/Metadata"
diff --git a/cloudinit/sources/DataSourceLXD.py b/cloudinit/sources/DataSourceLXD.py
index 34e4e00e..49a7567c 100644
--- a/cloudinit/sources/DataSourceLXD.py
+++ b/cloudinit/sources/DataSourceLXD.py
@@ -1,4 +1,4 @@
-"""Datasource for LXD, reads /dev/lxd/sock representaton of instance data.
+"""Datasource for LXD, reads /dev/lxd/sock representation of instance data.
Notes:
* This datasource replaces previous NoCloud datasource for LXD.
@@ -6,14 +6,14 @@ Notes:
still be detected on those images.
* Detect LXD datasource when /dev/lxd/sock is an active socket file.
* Info on dev-lxd API: https://linuxcontainers.org/lxd/docs/master/dev-lxd
- * TODO( Hotplug support using websockets API 1.0/events )
"""
import os
import socket
import stat
+from enum import Flag, auto
from json.decoder import JSONDecodeError
-from typing import Any, Dict, Union, cast
+from typing import Any, Dict, List, Optional, Union, cast
import requests
from requests.adapters import HTTPAdapter
@@ -23,12 +23,14 @@ from urllib3.connection import HTTPConnection
from urllib3.connectionpool import HTTPConnectionPool
from cloudinit import log as logging
-from cloudinit import sources, subp, util
+from cloudinit import sources, subp, url_helper, util
+from cloudinit.net import find_fallback_nic
LOG = logging.getLogger(__name__)
LXD_SOCKET_PATH = "/dev/lxd/sock"
LXD_SOCKET_API_VERSION = "1.0"
+LXD_URL = "http://lxd"
# Config key mappings to alias as top-level instance data keys
CONFIG_KEY_ALIASES = {
@@ -41,18 +43,8 @@ CONFIG_KEY_ALIASES = {
}
-def generate_fallback_network_config() -> dict:
- """Return network config V1 dict representing instance network config."""
- network_v1: Dict[str, Any] = {
- "version": 1,
- "config": [
- {
- "type": "physical",
- "name": "eth0",
- "subnets": [{"type": "dhcp", "control": "auto"}],
- }
- ],
- }
+def _get_fallback_interface_name() -> str:
+ default_name = "eth0"
if subp.which("systemd-detect-virt"):
try:
virt_type, _ = subp.subp(["systemd-detect-virt"])
@@ -62,19 +54,57 @@ def generate_fallback_network_config() -> dict:
" Rendering default network config.",
err,
)
- return network_v1
+ return default_name
if virt_type.strip() in (
"kvm",
"qemu",
): # instance.type VIRTUAL-MACHINE
arch = util.system_info()["uname"][4]
if arch == "ppc64le":
- network_v1["config"][0]["name"] = "enp0s5"
+ return "enp0s5"
elif arch == "s390x":
- network_v1["config"][0]["name"] = "enc9"
+ return "enc9"
else:
- network_v1["config"][0]["name"] = "enp5s0"
- return network_v1
+ return "enp5s0"
+ return default_name
+
+
+def generate_network_config(
+ nics: Optional[List[str]] = None,
+) -> Dict[str, Any]:
+ """Return network config V1 dict representing instance network config."""
+ # TODO: The original intent of this function was to use the nics retrieved
+ # from LXD's devices endpoint to determine the primary nic and write
+ # that out to network config. However, for LXD VMs, the device name
+ # may differ from the interface name in the VM, so we'll instead rely
+ # on our fallback nic code. Once LXD's devices endpoint grows the
+ # ability to provide a MAC address, we should rely on that information
+ # rather than just the glorified guessing that we're doing here.
+ primary_nic = find_fallback_nic()
+ if primary_nic:
+ LOG.debug(
+ "LXD datasource generating network from discovered active"
+ " device: %s",
+ primary_nic,
+ )
+ else:
+ primary_nic = _get_fallback_interface_name()
+ LOG.debug(
+ "LXD datasource generating network from systemd-detect-virt"
+ " platform default device: %s",
+ primary_nic,
+ )
+
+ return {
+ "version": 1,
+ "config": [
+ {
+ "type": "physical",
+ "name": primary_nic,
+ "subnets": [{"type": "dhcp", "control": "auto"}],
+ }
+ ],
+ }
class SocketHTTPConnection(HTTPConnection):
@@ -144,6 +174,12 @@ class DataSourceLXD(sources.DataSource):
"user.user-data",
)
+ skip_hotplug_detect = True
+
+ def _unpickle(self, ci_pkl_version: int) -> None:
+ super()._unpickle(ci_pkl_version)
+ self.skip_hotplug_detect = True
+
def _is_platform_viable(self) -> bool:
"""Check platform environment to report if this datasource may run."""
return is_platform_viable()
@@ -190,7 +226,7 @@ class DataSourceLXD(sources.DataSource):
def check_instance_id(self, sys_cfg) -> str:
"""Return True if instance_id unchanged."""
- response = read_metadata(metadata_only=True)
+ response = read_metadata(metadata_keys=MetaDataKeys.META_DATA)
md = response.get("meta-data", {})
if not isinstance(md, dict):
md = util.load_yaml(md)
@@ -205,14 +241,28 @@ class DataSourceLXD(sources.DataSource):
if self._network_config == sources.UNSET:
if self._crawled_metadata == sources.UNSET:
self._get_data()
- if isinstance(
- self._crawled_metadata, dict
- ) and self._crawled_metadata.get("network-config"):
- self._network_config = self._crawled_metadata.get(
- "network-config", {}
- )
- else:
- self._network_config = generate_fallback_network_config()
+ if isinstance(self._crawled_metadata, dict):
+ if self._crawled_metadata.get("network-config"):
+ LOG.debug("LXD datasource using provided network config")
+ self._network_config = self._crawled_metadata[
+ "network-config"
+ ]
+ elif self._crawled_metadata.get("devices"):
+ # If no explicit network config, but we have net devices
+ # available to us, find the primary and set it up.
+ devices: List[str] = [
+ k
+ for k, v in self._crawled_metadata["devices"].items()
+ if v["type"] == "nic"
+ ]
+ self._network_config = generate_network_config(devices)
+ if self._network_config == sources.UNSET:
+ # We know nothing about network, so setup fallback
+ LOG.debug(
+ "LXD datasource generating network config using fallback."
+ )
+ self._network_config = generate_network_config()
+
return cast(dict, self._network_config)
@@ -223,14 +273,132 @@ def is_platform_viable() -> bool:
return False
+def _get_json_response(
+ session: requests.Session, url: str, do_raise: bool = True
+):
+ url_response = _do_request(session, url, do_raise)
+ try:
+ return url_response.json()
+ except JSONDecodeError as exc:
+ raise sources.InvalidMetaDataException(
+ "Unable to process LXD config at {url}."
+ " Expected JSON but found: {resp}".format(
+ url=url, resp=url_response.text
+ )
+ ) from exc
+
+
+def _do_request(
+ session: requests.Session, url: str, do_raise: bool = True
+) -> requests.Response:
+ response = session.get(url)
+ LOG.debug("[GET] [HTTP:%d] %s", response.status_code, url)
+ if do_raise and not response.ok:
+ raise sources.InvalidMetaDataException(
+ "Invalid HTTP response [{code}] from {route}: {resp}".format(
+ code=response.status_code,
+ route=url,
+ resp=response.text,
+ )
+ )
+ return response
+
+
+class MetaDataKeys(Flag):
+ NONE = auto()
+ CONFIG = auto()
+ DEVICES = auto()
+ META_DATA = auto()
+ ALL = CONFIG | DEVICES | META_DATA
+
+
+class _MetaDataReader:
+ def __init__(self, api_version: str = LXD_SOCKET_API_VERSION):
+ self.api_version = api_version
+ self._version_url = url_helper.combine_url(LXD_URL, self.api_version)
+
+ def _process_config(self, session: requests.Session) -> dict:
+ """Iterate on LXD API config items. Promoting CONFIG_KEY_ALIASES
+
+ Any CONFIG_KEY_ALIASES which affect cloud-init behavior are promoted
+ as top-level configuration keys: user-data, network-data, vendor-data.
+
+ LXD's cloud-init.* config keys override any user.* config keys.
+ Log debug messages if any user.* keys are overridden by the related
+ cloud-init.* key.
+ """
+ config: dict = {"config": {}}
+ config_url = url_helper.combine_url(self._version_url, "config")
+ # Represent all advertized/available config routes under
+ # the dict path {LXD_SOCKET_API_VERSION: {config: {...}}.
+ config_routes = _get_json_response(session, config_url)
+
+ # Sorting keys to ensure we always process in alphabetical order.
+ # cloud-init.* keys will sort before user.* keys which is preferred
+ # precedence.
+ for config_route in sorted(config_routes):
+ config_route_url = url_helper.combine_url(LXD_URL, config_route)
+ config_route_response = _do_request(
+ session, config_route_url, do_raise=False
+ )
+ if not config_route_response.ok:
+ LOG.debug(
+ "Skipping %s on [HTTP:%d]:%s",
+ config_route_url,
+ config_route_response.status_code,
+ config_route_response.text,
+ )
+ continue
+
+ cfg_key = config_route.rpartition("/")[-1]
+ # Leave raw data values/format unchanged to represent it in
+ # instance-data.json for cloud-init query or jinja template
+ # use.
+ config["config"][cfg_key] = config_route_response.text
+ # Promote common CONFIG_KEY_ALIASES to top-level keys.
+ if cfg_key in CONFIG_KEY_ALIASES:
+ # Due to sort of config_routes, promote cloud-init.*
+ # aliases before user.*. This allows user.* keys to act as
+ # fallback config on old LXD, with new cloud-init images.
+ if CONFIG_KEY_ALIASES[cfg_key] not in config:
+ config[
+ CONFIG_KEY_ALIASES[cfg_key]
+ ] = config_route_response.text
+ else:
+ LOG.warning(
+ "Ignoring LXD config %s in favor of %s value.",
+ cfg_key,
+ cfg_key.replace("user", "cloud-init", 1),
+ )
+ return config
+
+ def __call__(self, *, metadata_keys: MetaDataKeys) -> dict:
+ with requests.Session() as session:
+ session.mount(self._version_url, LXDSocketAdapter())
+ # Document API version read
+ md: dict = {"_metadata_api_version": self.api_version}
+ if MetaDataKeys.META_DATA in metadata_keys:
+ md_route = url_helper.combine_url(
+ self._version_url, "meta-data"
+ )
+ md["meta-data"] = _do_request(session, md_route).text
+ if MetaDataKeys.CONFIG in metadata_keys:
+ md.update(self._process_config(session))
+ if MetaDataKeys.DEVICES in metadata_keys:
+ url = url_helper.combine_url(self._version_url, "devices")
+ md["devices"] = _get_json_response(session, url)
+ return md
+
+
def read_metadata(
- api_version: str = LXD_SOCKET_API_VERSION, metadata_only: bool = False
+ api_version: str = LXD_SOCKET_API_VERSION,
+ metadata_keys: MetaDataKeys = MetaDataKeys.ALL,
) -> dict:
"""Fetch metadata from the /dev/lxd/socket routes.
Perform a number of HTTP GETs on known routes on the devlxd socket API.
- Minimally all containers must respond to http://lxd/1.0/meta-data when
- the LXD configuration setting `security.devlxd` is true.
+ Minimally all containers must respond to <LXD_SOCKET_API_VERSION>/meta-data
+ when the LXD configuration setting `security.devlxd` is true.
When `security.devlxd` is false, no /dev/lxd/socket file exists. This
datasource will return False from `is_platform_viable` in that case.
@@ -245,99 +413,21 @@ def read_metadata(
- user.vendor-data -> vendor-data
- user.network-config -> network-config
+ :param api_version:
+ LXD API version to operated with.
+ :param metadata_keys:
+ Instance of `MetaDataKeys` indicating what keys to fetch.
:return:
- A dict with the following mandatory key: meta-data.
- Optional keys: user-data, vendor-data, network-config, network_mode
+ A dict with the following optional keys: meta-data, user-data,
+ vendor-data, network-config, network_mode, devices.
Below <LXD_SOCKET_API_VERSION> is a dict representation of all raw
configuration keys and values provided to the container surfaced by
the socket under the /1.0/config/ route.
"""
- md: dict = {}
- lxd_url = "http://lxd"
- version_url = lxd_url + "/" + api_version + "/"
- with requests.Session() as session:
- session.mount(version_url, LXDSocketAdapter())
- # Raw meta-data as text
- md_route = "{route}meta-data".format(route=version_url)
- response = session.get(md_route)
- LOG.debug("[GET] [HTTP:%d] %s", response.status_code, md_route)
- if not response.ok:
- raise sources.InvalidMetaDataException(
- "Invalid HTTP response [{code}] from {route}: {resp}".format(
- code=response.status_code,
- route=md_route,
- resp=response.text,
- )
- )
-
- md["meta-data"] = response.text
- if metadata_only:
- return md # Skip network-data, vendor-data, user-data
-
- md = {
- "_metadata_api_version": api_version, # Document API version read
- "config": {},
- "meta-data": md["meta-data"],
- }
-
- config_url = version_url + "config"
- # Represent all advertized/available config routes under
- # the dict path {LXD_SOCKET_API_VERSION: {config: {...}}.
- response = session.get(config_url)
- LOG.debug("[GET] [HTTP:%d] %s", response.status_code, config_url)
- if not response.ok:
- raise sources.InvalidMetaDataException(
- "Invalid HTTP response [{code}] from {route}: {resp}".format(
- code=response.status_code,
- route=config_url,
- resp=response.text,
- )
- )
- try:
- config_routes = response.json()
- except JSONDecodeError as exc:
- raise sources.InvalidMetaDataException(
- "Unable to determine cloud-init config from {route}."
- " Expected JSON but found: {resp}".format(
- route=config_url, resp=response.text
- )
- ) from exc
-
- # Sorting keys to ensure we always process in alphabetical order.
- # cloud-init.* keys will sort before user.* keys which is preferred
- # precedence.
- for config_route in sorted(config_routes):
- url = "http://lxd{route}".format(route=config_route)
- response = session.get(url)
- LOG.debug("[GET] [HTTP:%d] %s", response.status_code, url)
- if response.ok:
- cfg_key = config_route.rpartition("/")[-1]
- # Leave raw data values/format unchanged to represent it in
- # instance-data.json for cloud-init query or jinja template
- # use.
- md["config"][cfg_key] = response.text
- # Promote common CONFIG_KEY_ALIASES to top-level keys.
- if cfg_key in CONFIG_KEY_ALIASES:
- # Due to sort of config_routes, promote cloud-init.*
- # aliases before user.*. This allows user.* keys to act as
- # fallback config on old LXD, with new cloud-init images.
- if CONFIG_KEY_ALIASES[cfg_key] not in md:
- md[CONFIG_KEY_ALIASES[cfg_key]] = response.text
- else:
- LOG.warning(
- "Ignoring LXD config %s in favor of %s value.",
- cfg_key,
- cfg_key.replace("user", "cloud-init", 1),
- )
- else:
- LOG.debug(
- "Skipping %s on [HTTP:%d]:%s",
- url,
- response.status_code,
- response.text,
- )
- return md
+ return _MetaDataReader(api_version=api_version)(
+ metadata_keys=metadata_keys
+ )
# Used to match classes to dependencies
@@ -357,5 +447,6 @@ if __name__ == "__main__":
description = """Query LXD metadata and emit a JSON object."""
parser = argparse.ArgumentParser(description=description)
parser.parse_args()
- print(util.json_dumps(read_metadata()))
+ print(util.json_dumps(read_metadata(metadata_keys=MetaDataKeys.ALL)))
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/DataSourceNWCS.py b/cloudinit/sources/DataSourceNWCS.py
new file mode 100644
index 00000000..e21383d2
--- /dev/null
+++ b/cloudinit/sources/DataSourceNWCS.py
@@ -0,0 +1,168 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+from requests import exceptions
+
+from cloudinit import dmi
+from cloudinit import log as logging
+from cloudinit import net, sources, subp, url_helper, util
+from cloudinit.net.dhcp import NoDHCPLeaseError
+from cloudinit.net.ephemeral import EphemeralDHCPv4
+
+LOG = logging.getLogger(__name__)
+
+BASE_URL_V1 = "http://169.254.169.254/api/v1"
+
+BUILTIN_DS_CONFIG = {
+ "metadata_url": BASE_URL_V1 + "/metadata",
+}
+
+MD_RETRIES = 30
+MD_TIMEOUT = 5
+MD_WAIT_RETRY = 5
+
+
+class DataSourceNWCS(sources.DataSource):
+
+ dsname = "NWCS"
+
+ def __init__(self, sys_cfg, distro, paths):
+ sources.DataSource.__init__(self, sys_cfg, distro, paths)
+ self.distro = distro
+ self.metadata = dict()
+ self.ds_cfg = util.mergemanydict(
+ [
+ util.get_cfg_by_path(sys_cfg, ["datasource", "NWCS"], {}),
+ BUILTIN_DS_CONFIG,
+ ]
+ )
+ self.metadata_address = self.ds_cfg["metadata_url"]
+ self.retries = self.ds_cfg.get("retries", MD_RETRIES)
+ self.timeout = self.ds_cfg.get("timeout", MD_TIMEOUT)
+ self.wait_retry = self.ds_cfg.get("wait_retry", MD_WAIT_RETRY)
+ self._network_config = sources.UNSET
+ self.dsmode = sources.DSMODE_NETWORK
+
+ def _get_data(self):
+ LOG.info("Detecting if machine is a NWCS instance")
+ on_nwcs = get_nwcs_data()
+
+ if not on_nwcs:
+ LOG.info("Machine is not a NWCS instance")
+ return False
+
+ LOG.info("Machine is a NWCS instance")
+
+ md = self.get_metadata()
+
+ if md is None:
+ raise Exception("failed to get metadata")
+
+ self.metadata_full = md
+
+ self.metadata["instance-id"] = md["instance-id"]
+ self.metadata["public-keys"] = md["public-keys"]
+ self.metadata["network"] = md["network"]
+ self.metadata["local-hostname"] = md["hostname"]
+
+ self.userdata_raw = md.get("userdata", None)
+
+ self.vendordata_raw = md.get("vendordata", None)
+
+ return True
+
+ def get_metadata(self):
+ try:
+ LOG.info("Attempting to get metadata via DHCP")
+
+ with EphemeralDHCPv4(
+ iface=net.find_fallback_nic(),
+ connectivity_url_data={
+ "url": BASE_URL_V1 + "/metadata/instance-id",
+ },
+ ):
+ return read_metadata(
+ self.metadata_address,
+ timeout=self.timeout,
+ sec_between=self.wait_retry,
+ retries=self.retries,
+ )
+
+ except (
+ NoDHCPLeaseError,
+ subp.ProcessExecutionError,
+ RuntimeError,
+ exceptions.RequestException,
+ ) as e:
+ LOG.error("DHCP failure: %s", e)
+ raise
+
+ @property
+ def network_config(self):
+ LOG.debug("Attempting network configuration")
+
+ if self._network_config is None:
+ LOG.warning(
+ "Found None as cached _network_config, resetting to %s",
+ sources.UNSET,
+ )
+ self._network_config = sources.UNSET
+
+ if self._network_config != sources.UNSET:
+ return self._network_config
+
+ if not self.metadata["network"]["config"]:
+ raise Exception("Unable to get metadata from server")
+
+ # metadata sends interface names, but we dont want to use them
+ for i in self.metadata["network"]["config"]:
+ iface_name = get_interface_name(i["mac_address"])
+
+ if iface_name:
+ LOG.info("Overriding %s with %s", i["name"], iface_name)
+ i["name"] = iface_name
+
+ self._network_config = self.metadata["network"]
+
+ return self._network_config
+
+
+def get_nwcs_data():
+ vendor_name = dmi.read_dmi_data("system-manufacturer")
+
+ if vendor_name != "NWCS":
+ return False
+
+ return True
+
+
+def get_interface_name(mac):
+ macs_to_nic = net.get_interfaces_by_mac()
+
+ if mac not in macs_to_nic:
+ return None
+
+ return macs_to_nic.get(mac)
+
+
+# Return a list of data sources that match this set of dependencies
+def get_datasource_list(depends):
+ return sources.list_from_depends(depends, datasources)
+
+
+def read_metadata(url, timeout=2, sec_between=2, retries=30):
+ response = url_helper.readurl(
+ url, timeout=timeout, sec_between=sec_between, retries=retries
+ )
+
+ if not response.ok():
+ raise RuntimeError("unable to read metadata at %s" % url)
+
+ return util.load_json(response.contents.decode())
+
+
+# Used to match classes to dependencies
+datasources = [
+ (DataSourceNWCS, (sources.DEP_FILESYSTEM,)),
+]
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py
index e46f920d..a9744fa1 100644
--- a/cloudinit/sources/DataSourceOpenNebula.py
+++ b/cloudinit/sources/DataSourceOpenNebula.py
@@ -135,7 +135,7 @@ class BrokenContextDiskDir(Exception):
pass
-class OpenNebulaNetwork(object):
+class OpenNebulaNetwork:
def __init__(self, context, distro, system_nics_by_mac=None):
self.context = context
if system_nics_by_mac is None:
diff --git a/cloudinit/sources/DataSourceOpenStack.py b/cloudinit/sources/DataSourceOpenStack.py
index 292e0efc..915ed0c0 100644
--- a/cloudinit/sources/DataSourceOpenStack.py
+++ b/cloudinit/sources/DataSourceOpenStack.py
@@ -32,8 +32,13 @@ DMI_ASSET_TAG_OPENTELEKOM = "OpenTelekomCloud"
# See github.com/sapcc/helm-charts/blob/master/openstack/nova/values.yaml
# -> compute.defaults.vmware.smbios_asset_tag for this value
DMI_ASSET_TAG_SAPCCLOUD = "SAP CCloud VM"
+DMI_ASSET_TAG_HUAWEICLOUD = "HUAWEICLOUD"
VALID_DMI_ASSET_TAGS = VALID_DMI_PRODUCT_NAMES
-VALID_DMI_ASSET_TAGS += [DMI_ASSET_TAG_OPENTELEKOM, DMI_ASSET_TAG_SAPCCLOUD]
+VALID_DMI_ASSET_TAGS += [
+ DMI_ASSET_TAG_HUAWEICLOUD,
+ DMI_ASSET_TAG_OPENTELEKOM,
+ DMI_ASSET_TAG_SAPCCLOUD,
+]
class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
@@ -150,7 +155,10 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
if self.perform_dhcp_setup: # Setup networking in init-local stage.
try:
- with EphemeralDHCPv4(self.fallback_interface):
+ with EphemeralDHCPv4(
+ self.fallback_interface,
+ tmp_dir=self.distro.get_tmp_exec_path(),
+ ):
results = util.log_time(
logfunc=LOG.debug,
msg="Crawl of metadata service",
diff --git a/cloudinit/sources/DataSourceOracle.py b/cloudinit/sources/DataSourceOracle.py
index bf7c0c3a..08daa4f6 100644
--- a/cloudinit/sources/DataSourceOracle.py
+++ b/cloudinit/sources/DataSourceOracle.py
@@ -14,6 +14,7 @@ Notes:
"""
import base64
+import ipaddress
from collections import namedtuple
from typing import Optional, Tuple
@@ -155,6 +156,7 @@ class DataSourceOracle(sources.DataSource):
"url": METADATA_PATTERN.format(version=2, path="instance"),
"headers": V2_HEADERS,
},
+ tmp_dir=self.distro.get_tmp_exec_path(),
)
fetch_primary_nic = not self._is_iscsi_root()
fetch_secondary_nics = self.ds_cfg.get(
@@ -286,7 +288,8 @@ class DataSourceOracle(sources.DataSource):
vnics_data = self._vnics_data if set_primary else self._vnics_data[1:]
- for vnic_dict in vnics_data:
+ for index, vnic_dict in enumerate(vnics_data):
+ is_primary = set_primary and index == 0
mac_address = vnic_dict["macAddr"].lower()
if mac_address not in interfaces_by_mac:
LOG.warning(
@@ -295,29 +298,40 @@ class DataSourceOracle(sources.DataSource):
)
continue
name = interfaces_by_mac[mac_address]
+ network = ipaddress.ip_network(vnic_dict["subnetCidrBlock"])
if self._network_config["version"] == 1:
- subnet = {
- "type": "static",
- "address": vnic_dict["privateIp"],
- }
- self._network_config["config"].append(
- {
- "name": name,
- "type": "physical",
- "mac_address": mac_address,
- "mtu": MTU,
- "subnets": [subnet],
+ if is_primary:
+ subnet = {"type": "dhcp"}
+ else:
+ subnet = {
+ "type": "static",
+ "address": (
+ f"{vnic_dict['privateIp']}/{network.prefixlen}"
+ ),
}
- )
+ interface_config = {
+ "name": name,
+ "type": "physical",
+ "mac_address": mac_address,
+ "mtu": MTU,
+ "subnets": [subnet],
+ }
+ self._network_config["config"].append(interface_config)
elif self._network_config["version"] == 2:
- self._network_config["ethernets"][name] = {
- "addresses": [vnic_dict["privateIp"]],
+ # Why does this elif exist???
+ # Are there plans to switch to v2?
+ interface_config = {
"mtu": MTU,
- "dhcp4": False,
- "dhcp6": False,
"match": {"macaddress": mac_address},
+ "dhcp6": False,
+ "dhcp4": is_primary,
}
+ if not is_primary:
+ interface_config["addresses"] = [
+ f"{vnic_dict['privateIp']}/{network.prefixlen}"
+ ]
+ self._network_config["ethernets"][name] = interface_config
def _read_system_uuid() -> Optional[str]:
diff --git a/cloudinit/sources/DataSourceScaleway.py b/cloudinit/sources/DataSourceScaleway.py
index 0ba0dec3..6983f275 100644
--- a/cloudinit/sources/DataSourceScaleway.py
+++ b/cloudinit/sources/DataSourceScaleway.py
@@ -210,7 +210,10 @@ class DataSourceScaleway(sources.DataSource):
if self._fallback_interface is None:
self._fallback_interface = net.find_fallback_nic()
try:
- with EphemeralDHCPv4(self._fallback_interface):
+ with EphemeralDHCPv4(
+ self._fallback_interface,
+ tmp_dir=self.distro.get_tmp_exec_path(),
+ ):
util.log_time(
logfunc=LOG.debug,
msg="Crawl of metadata service",
diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py
index 11168f6a..266daf68 100644
--- a/cloudinit/sources/DataSourceSmartOS.py
+++ b/cloudinit/sources/DataSourceSmartOS.py
@@ -370,7 +370,7 @@ class JoyentMetadataTimeoutException(JoyentMetadataFetchException):
pass
-class JoyentMetadataClient(object):
+class JoyentMetadataClient:
"""
A client implementing v2 of the Joyent Metadata Protocol Specification.
diff --git a/cloudinit/sources/DataSourceUpCloud.py b/cloudinit/sources/DataSourceUpCloud.py
index d6b74bc1..f114dad4 100644
--- a/cloudinit/sources/DataSourceUpCloud.py
+++ b/cloudinit/sources/DataSourceUpCloud.py
@@ -71,7 +71,9 @@ class DataSourceUpCloud(sources.DataSource):
LOG.debug("Finding a fallback NIC")
nic = cloudnet.find_fallback_nic()
LOG.debug("Discovering metadata via DHCP interface %s", nic)
- with EphemeralDHCPv4(nic):
+ with EphemeralDHCPv4(
+ nic, tmp_dir=self.distro.get_tmp_exec_path()
+ ):
md = util.log_time(
logfunc=LOG.debug,
msg="Reading from metadata service",
diff --git a/cloudinit/sources/DataSourceVultr.py b/cloudinit/sources/DataSourceVultr.py
index 8c2e82c2..93b04829 100644
--- a/cloudinit/sources/DataSourceVultr.py
+++ b/cloudinit/sources/DataSourceVultr.py
@@ -99,6 +99,7 @@ class DataSourceVultr(sources.DataSource):
self.ds_cfg["retries"],
self.ds_cfg["wait"],
self.ds_cfg["user-agent"],
+ tmp_dir=self.distro.get_tmp_exec_path(),
)
# Compare subid as instance id
diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py
index c399beb6..85e094ac 100644
--- a/cloudinit/sources/__init__.py
+++ b/cloudinit/sources/__init__.py
@@ -26,6 +26,7 @@ from cloudinit.atomic_helper import write_json
from cloudinit.distros import Distro
from cloudinit.event import EventScope, EventType
from cloudinit.filters import launch_index
+from cloudinit.helpers import Paths
from cloudinit.persistence import CloudInitPickleMixin
from cloudinit.reporting import events
@@ -46,11 +47,6 @@ EXPERIMENTAL_TEXT = (
)
-# File in which public available instance meta-data is written
-# security-sensitive key values are redacted from this world-readable file
-INSTANCE_JSON_FILE = "instance-data.json"
-# security-sensitive key values are present in this root-readable file
-INSTANCE_JSON_SENSITIVE_FILE = "instance-data-sensitive.json"
REDACT_SENSITIVE_VALUE = "redacted for non-root user"
# Key which can be provide a cloud's official product name to cloud-init
@@ -255,9 +251,13 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta):
"security-credentials",
)
+ # True on datasources that may not see hotplugged devices reflected
+ # in the updated metadata
+ skip_hotplug_detect = False
+
_ci_pkl_version = 1
- def __init__(self, sys_cfg, distro: Distro, paths, ud_proc=None):
+ def __init__(self, sys_cfg, distro: Distro, paths: Paths, ud_proc=None):
self.sys_cfg = sys_cfg
self.distro = distro
self.paths = paths
@@ -286,6 +286,8 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta):
self.vendordata2 = None
if not hasattr(self, "vendordata2_raw"):
self.vendordata2_raw = None
+ if not hasattr(self, "skip_hotplug_detect"):
+ self.skip_hotplug_detect = False
if hasattr(self, "userdata") and self.userdata is not None:
# If userdata stores MIME data, on < python3.6 it will be
# missing the 'policy' attribute that exists on >=python3.6.
@@ -429,9 +431,7 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta):
except UnicodeDecodeError as e:
LOG.warning("Error persisting instance-data.json: %s", str(e))
return False
- json_sensitive_file = os.path.join(
- self.paths.run_dir, INSTANCE_JSON_SENSITIVE_FILE
- )
+ json_sensitive_file = self.paths.get_runpath("instance_data_sensitive")
cloud_id = instance_data["v1"].get("cloud_id", "none")
cloud_id_file = os.path.join(self.paths.run_dir, "cloud-id")
util.write_file(f"{cloud_id_file}-{cloud_id}", f"{cloud_id}\n")
@@ -443,7 +443,7 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta):
if prev_cloud_id_file != cloud_id_file:
util.del_file(prev_cloud_id_file)
write_json(json_sensitive_file, processed_data, mode=0o600)
- json_file = os.path.join(self.paths.run_dir, INSTANCE_JSON_FILE)
+ json_file = self.paths.get_runpath("instance_data")
# World readable
write_json(json_file, redact_sensitive_keys(processed_data))
return True
diff --git a/cloudinit/sources/helpers/cloudsigma.py b/cloudinit/sources/helpers/cloudsigma.py
index 19fa1669..5d39946f 100644
--- a/cloudinit/sources/helpers/cloudsigma.py
+++ b/cloudinit/sources/helpers/cloudsigma.py
@@ -33,7 +33,7 @@ if platform.system() == "Windows":
SERIAL_PORT = "COM2"
-class Cepko(object):
+class Cepko:
"""
One instance of that object could be use for one or more
queries to the serial port.
@@ -58,7 +58,7 @@ class Cepko(object):
return self.get(key, request_pattern)
-class CepkoResult(object):
+class CepkoResult:
"""
CepkoResult executes the request to the virtual serial port as soon
as the instance is initialized and stores the result in both raw and
diff --git a/cloudinit/sources/helpers/ec2.py b/cloudinit/sources/helpers/ec2.py
index d4019557..c792f6e8 100644
--- a/cloudinit/sources/helpers/ec2.py
+++ b/cloudinit/sources/helpers/ec2.py
@@ -16,7 +16,7 @@ LOG = logging.getLogger(__name__)
SKIP_USERDATA_CODES = frozenset([url_helper.NOT_FOUND])
-class MetadataLeafDecoder(object):
+class MetadataLeafDecoder:
"""Decodes a leaf blob into something meaningful."""
def _maybe_json_object(self, text):
@@ -51,7 +51,7 @@ class MetadataLeafDecoder(object):
# See: http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/
# ec2-instance-metadata.html
-class MetadataMaterializer(object):
+class MetadataMaterializer:
def __init__(self, blob, base_url, caller, leaf_decoder=None):
self._blob = blob
self._md = None
@@ -190,6 +190,7 @@ def _get_instance_metadata(
headers_cb=None,
headers_redact=None,
exception_cb=None,
+ retrieval_exception_ignore_cb=None,
):
md_url = url_helper.combine_url(metadata_address, api_version, tree)
caller = functools.partial(
@@ -203,7 +204,17 @@ def _get_instance_metadata(
)
def mcaller(url):
- return caller(url).contents
+ try:
+ return caller(url).contents
+ except url_helper.UrlError as e:
+ if (
+ not retrieval_exception_ignore_cb
+ or not retrieval_exception_ignore_cb(e)
+ ):
+ raise
+ else:
+ LOG.warning("Skipped retrieval of the content of %s", url)
+ return "(skipped)"
try:
response = caller(md_url)
@@ -229,6 +240,7 @@ def get_instance_metadata(
headers_cb=None,
headers_redact=None,
exception_cb=None,
+ retrieval_exception_ignore_cb=None,
):
# Note, 'meta-data' explicitly has trailing /.
# this is required for CloudStack (LP: #1356855)
@@ -243,6 +255,7 @@ def get_instance_metadata(
headers_redact=headers_redact,
headers_cb=headers_cb,
exception_cb=exception_cb,
+ retrieval_exception_ignore_cb=retrieval_exception_ignore_cb,
)
diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py
index 1f48dac6..cd787d0d 100644
--- a/cloudinit/sources/helpers/openstack.py
+++ b/cloudinit/sources/helpers/openstack.py
@@ -82,7 +82,7 @@ class NonReadable(IOError):
pass
-class SourceMixin(object):
+class SourceMixin:
def _ec2_name_to_device(self, name):
if not self.ec2_metadata:
return None
diff --git a/cloudinit/sources/helpers/vmware/imc/boot_proto.py b/cloudinit/sources/helpers/vmware/imc/boot_proto.py
index a5c67bb7..38ed4f30 100644
--- a/cloudinit/sources/helpers/vmware/imc/boot_proto.py
+++ b/cloudinit/sources/helpers/vmware/imc/boot_proto.py
@@ -6,7 +6,7 @@
# This file is part of cloud-init. See LICENSE file for license information.
-class BootProtoEnum(object):
+class BootProtoEnum:
"""Specifies the NIC Boot Settings."""
DHCP = "dhcp"
diff --git a/cloudinit/sources/helpers/vmware/imc/config.py b/cloudinit/sources/helpers/vmware/imc/config.py
index 39dacee0..8b2deb65 100644
--- a/cloudinit/sources/helpers/vmware/imc/config.py
+++ b/cloudinit/sources/helpers/vmware/imc/config.py
@@ -6,10 +6,10 @@
# This file is part of cloud-init. See LICENSE file for license information.
-from .nic import Nic
+from cloudinit.sources.helpers.vmware.imc.nic import Nic
-class Config(object):
+class Config:
"""
Stores the Contents specified in the Customization
Specification file.
diff --git a/cloudinit/sources/helpers/vmware/imc/config_custom_script.py b/cloudinit/sources/helpers/vmware/imc/config_custom_script.py
index 8240ea8f..73334c47 100644
--- a/cloudinit/sources/helpers/vmware/imc/config_custom_script.py
+++ b/cloudinit/sources/helpers/vmware/imc/config_custom_script.py
@@ -18,7 +18,7 @@ class CustomScriptNotFound(Exception):
pass
-class CustomScriptConstant(object):
+class CustomScriptConstant:
CUSTOM_TMP_DIR = "/root/.customization"
# The user defined custom script
@@ -29,7 +29,7 @@ class CustomScriptConstant(object):
POST_CUSTOM_SCRIPT_NAME = "post-customize-guest.sh"
-class RunCustomScript(object):
+class RunCustomScript:
def __init__(self, scriptname, directory):
self.scriptname = scriptname
self.directory = directory
diff --git a/cloudinit/sources/helpers/vmware/imc/config_file.py b/cloudinit/sources/helpers/vmware/imc/config_file.py
index 4def10f1..37185cba 100644
--- a/cloudinit/sources/helpers/vmware/imc/config_file.py
+++ b/cloudinit/sources/helpers/vmware/imc/config_file.py
@@ -8,7 +8,7 @@
import configparser
import logging
-from .config_source import ConfigSource
+from cloudinit.sources.helpers.vmware.imc.config_source import ConfigSource
logger = logging.getLogger(__name__)
diff --git a/cloudinit/sources/helpers/vmware/imc/config_namespace.py b/cloudinit/sources/helpers/vmware/imc/config_namespace.py
index 3b3b2d5a..d44f4c01 100644
--- a/cloudinit/sources/helpers/vmware/imc/config_namespace.py
+++ b/cloudinit/sources/helpers/vmware/imc/config_namespace.py
@@ -5,7 +5,7 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-from .config_source import ConfigSource
+from cloudinit.sources.helpers.vmware.imc.config_source import ConfigSource
class ConfigNamespace(ConfigSource):
diff --git a/cloudinit/sources/helpers/vmware/imc/config_nic.py b/cloudinit/sources/helpers/vmware/imc/config_nic.py
index 6c135f48..7b9e0974 100644
--- a/cloudinit/sources/helpers/vmware/imc/config_nic.py
+++ b/cloudinit/sources/helpers/vmware/imc/config_nic.py
@@ -31,7 +31,7 @@ def gen_subnet(ip, netmask):
return ".".join([str(x) for x in result])
-class NicConfigurator(object):
+class NicConfigurator:
def __init__(self, nics, use_system_devices=True):
"""
Initialize the Nic Configurator
diff --git a/cloudinit/sources/helpers/vmware/imc/config_passwd.py b/cloudinit/sources/helpers/vmware/imc/config_passwd.py
index 4d3967a1..2cc5fb12 100644
--- a/cloudinit/sources/helpers/vmware/imc/config_passwd.py
+++ b/cloudinit/sources/helpers/vmware/imc/config_passwd.py
@@ -14,7 +14,7 @@ from cloudinit import subp, util
LOG = logging.getLogger(__name__)
-class PasswordConfigurator(object):
+class PasswordConfigurator:
"""
Class for changing configurations related to passwords in a VM. Includes
setting and expiring passwords.
diff --git a/cloudinit/sources/helpers/vmware/imc/config_source.py b/cloudinit/sources/helpers/vmware/imc/config_source.py
index e99f9b43..ef0d0069 100644
--- a/cloudinit/sources/helpers/vmware/imc/config_source.py
+++ b/cloudinit/sources/helpers/vmware/imc/config_source.py
@@ -6,7 +6,7 @@
# This file is part of cloud-init. See LICENSE file for license information.
-class ConfigSource(object):
+class ConfigSource:
"""Specifies a source for the Config Content."""
diff --git a/cloudinit/sources/helpers/vmware/imc/guestcust_error.py b/cloudinit/sources/helpers/vmware/imc/guestcust_error.py
index eda84cfb..05008146 100644
--- a/cloudinit/sources/helpers/vmware/imc/guestcust_error.py
+++ b/cloudinit/sources/helpers/vmware/imc/guestcust_error.py
@@ -6,7 +6,7 @@
# This file is part of cloud-init. See LICENSE file for license information.
-class GuestCustErrorEnum(object):
+class GuestCustErrorEnum:
"""Specifies different errors of Guest Customization engine"""
GUESTCUST_ERROR_SUCCESS = 0
diff --git a/cloudinit/sources/helpers/vmware/imc/guestcust_event.py b/cloudinit/sources/helpers/vmware/imc/guestcust_event.py
index 33169a7e..b604b7ec 100644
--- a/cloudinit/sources/helpers/vmware/imc/guestcust_event.py
+++ b/cloudinit/sources/helpers/vmware/imc/guestcust_event.py
@@ -6,7 +6,7 @@
# This file is part of cloud-init. See LICENSE file for license information.
-class GuestCustEventEnum(object):
+class GuestCustEventEnum:
"""Specifies different types of Guest Customization Events"""
GUESTCUST_EVENT_CUSTOMIZE_FAILED = 100
diff --git a/cloudinit/sources/helpers/vmware/imc/guestcust_state.py b/cloudinit/sources/helpers/vmware/imc/guestcust_state.py
index c74fbc8b..e3141439 100644
--- a/cloudinit/sources/helpers/vmware/imc/guestcust_state.py
+++ b/cloudinit/sources/helpers/vmware/imc/guestcust_state.py
@@ -6,7 +6,7 @@
# This file is part of cloud-init. See LICENSE file for license information.
-class GuestCustStateEnum(object):
+class GuestCustStateEnum:
"""Specifies different states of Guest Customization engine"""
GUESTCUST_STATE_RUNNING = 4
diff --git a/cloudinit/sources/helpers/vmware/imc/guestcust_util.py b/cloudinit/sources/helpers/vmware/imc/guestcust_util.py
index f7d676e8..5b5f02ca 100644
--- a/cloudinit/sources/helpers/vmware/imc/guestcust_util.py
+++ b/cloudinit/sources/helpers/vmware/imc/guestcust_util.py
@@ -11,9 +11,12 @@ import re
import time
from cloudinit import subp
-
-from .guestcust_event import GuestCustEventEnum
-from .guestcust_state import GuestCustStateEnum
+from cloudinit.sources.helpers.vmware.imc.guestcust_event import (
+ GuestCustEventEnum,
+)
+from cloudinit.sources.helpers.vmware.imc.guestcust_state import (
+ GuestCustStateEnum,
+)
logger = logging.getLogger(__name__)
diff --git a/cloudinit/sources/helpers/vmware/imc/ipv4_mode.py b/cloudinit/sources/helpers/vmware/imc/ipv4_mode.py
index 673204a0..f290a36f 100644
--- a/cloudinit/sources/helpers/vmware/imc/ipv4_mode.py
+++ b/cloudinit/sources/helpers/vmware/imc/ipv4_mode.py
@@ -6,7 +6,7 @@
# This file is part of cloud-init. See LICENSE file for license information.
-class Ipv4ModeEnum(object):
+class Ipv4ModeEnum:
"""
The IPv4 configuration mode which directly represents the user's goal.
diff --git a/cloudinit/sources/helpers/vmware/imc/nic.py b/cloudinit/sources/helpers/vmware/imc/nic.py
index 7b742d0f..ff161933 100644
--- a/cloudinit/sources/helpers/vmware/imc/nic.py
+++ b/cloudinit/sources/helpers/vmware/imc/nic.py
@@ -5,8 +5,12 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-from .boot_proto import BootProtoEnum
-from .nic_base import NicBase, StaticIpv4Base, StaticIpv6Base
+from cloudinit.sources.helpers.vmware.imc.boot_proto import BootProtoEnum
+from cloudinit.sources.helpers.vmware.imc.nic_base import (
+ NicBase,
+ StaticIpv4Base,
+ StaticIpv6Base,
+)
class Nic(NicBase):
diff --git a/cloudinit/sources/helpers/vmware/imc/nic_base.py b/cloudinit/sources/helpers/vmware/imc/nic_base.py
index 37d9602f..522372a9 100644
--- a/cloudinit/sources/helpers/vmware/imc/nic_base.py
+++ b/cloudinit/sources/helpers/vmware/imc/nic_base.py
@@ -6,7 +6,7 @@
# This file is part of cloud-init. See LICENSE file for license information.
-class NicBase(object):
+class NicBase:
"""
Define what are expected of each nic.
The following properties should be provided in an implementation class.
@@ -81,7 +81,7 @@ class NicBase(object):
raise NotImplementedError("Check constraints on properties")
-class StaticIpv4Base(object):
+class StaticIpv4Base:
"""
Define what are expected of a static IPv4 setting
The following properties should be provided in an implementation class.
@@ -112,7 +112,7 @@ class StaticIpv4Base(object):
raise NotImplementedError("Ipv4 GATEWAY")
-class StaticIpv6Base(object):
+class StaticIpv6Base:
"""Define what are expected of a static IPv6 setting
The following properties should be provided in an implementation class.
"""
diff --git a/cloudinit/sources/helpers/vultr.py b/cloudinit/sources/helpers/vultr.py
index adbcfbe5..de2d0eb0 100644
--- a/cloudinit/sources/helpers/vultr.py
+++ b/cloudinit/sources/helpers/vultr.py
@@ -18,7 +18,7 @@ LOG = log.getLogger(__name__)
@lru_cache()
-def get_metadata(url, timeout, retries, sec_between, agent):
+def get_metadata(url, timeout, retries, sec_between, agent, tmp_dir=None):
# Bring up interface (and try untill one works)
exception = RuntimeError("Failed to DHCP")
@@ -26,7 +26,9 @@ def get_metadata(url, timeout, retries, sec_between, agent):
for iface in get_interface_list():
try:
with EphemeralDHCPv4(
- iface=iface, connectivity_url_data={"url": url}
+ iface=iface,
+ connectivity_url_data={"url": url},
+ tmp_dir=tmp_dir,
):
# Check for the metadata route, skip if not there
if not check_route(url):
diff --git a/cloudinit/ssh_util.py b/cloudinit/ssh_util.py
index 5bbbc724..a1a6964f 100644
--- a/cloudinit/ssh_util.py
+++ b/cloudinit/ssh_util.py
@@ -66,7 +66,7 @@ DISABLE_USER_OPTS = (
)
-class AuthKeyLine(object):
+class AuthKeyLine:
def __init__(
self, source, keytype=None, base64=None, comment=None, options=None
):
@@ -95,7 +95,7 @@ class AuthKeyLine(object):
return " ".join(toks)
-class AuthKeyLineParser(object):
+class AuthKeyLineParser:
"""
AUTHORIZED_KEYS FILE FORMAT
AuthorizedKeysFile specifies the file containing public keys for public
@@ -476,7 +476,7 @@ def setup_user_keys(keys, username, options=None):
util.write_file(auth_key_fn, content, preserve_mode=True)
-class SshdConfigLine(object):
+class SshdConfigLine:
def __init__(self, line, k=None, v=None):
self.line = line
self._key = k
diff --git a/cloudinit/stages.py b/cloudinit/stages.py
index 132dd83b..635a31e8 100644
--- a/cloudinit/stages.py
+++ b/cloudinit/stages.py
@@ -94,7 +94,7 @@ def update_event_enabled(
return False
-class Init(object):
+class Init:
def __init__(self, ds_deps: Optional[List[str]] = None, reporter=None):
if ds_deps is not None:
self.ds_deps = ds_deps
@@ -234,11 +234,14 @@ class Init(object):
def _read_cfg(self, extra_fns):
no_cfg_paths = helpers.Paths({}, self.datasource)
+ instance_data_file = no_cfg_paths.get_runpath(
+ "instance_data_sensitive"
+ )
merger = helpers.ConfigMerger(
paths=no_cfg_paths,
datasource=self.datasource,
additional_fns=extra_fns,
- base_cfg=fetch_base_config(),
+ base_cfg=fetch_base_config(instance_data_file=instance_data_file),
)
return merger.cfg
@@ -506,7 +509,7 @@ class Init(object):
self._get_ipath(datasource), str(processed_data), 0o600
)
- def _default_handlers(self, opts=None):
+ def _default_handlers(self, opts=None) -> List[handlers.Handler]:
if opts is None:
opts = {}
@@ -526,11 +529,10 @@ class Init(object):
ShellScriptByFreqPartHandler(PER_INSTANCE, **opts),
ShellScriptByFreqPartHandler(PER_ONCE, **opts),
BootHookPartHandler(**opts),
+ JinjaTemplatePartHandler(
+ **opts, sub_handlers=[cloudconfig_handler, shellscript_handler]
+ ),
]
- opts.update(
- {"sub_handlers": [cloudconfig_handler, shellscript_handler]}
- )
- def_handlers.append(JinjaTemplatePartHandler(**opts))
return def_handlers
def _default_userdata_handlers(self):
@@ -958,13 +960,15 @@ def read_runtime_config():
return util.read_conf(RUN_CLOUD_CONFIG)
-def fetch_base_config():
+def fetch_base_config(*, instance_data_file=None) -> dict:
return util.mergemanydict(
[
# builtin config, hardcoded in settings.py.
util.get_builtin_cfg(),
# Anything in your conf.d or 'default' cloud.cfg location.
- util.read_conf_with_confd(CLOUD_CONFIG),
+ util.read_conf_with_confd(
+ CLOUD_CONFIG, instance_data_file=instance_data_file
+ ),
# runtime config. I.e., /run/cloud-init/cloud.cfg
read_runtime_config(),
# Kernel/cmdline parameters override system config
@@ -972,6 +976,3 @@ def fetch_base_config():
],
reverse=True,
)
-
-
-# vi: ts=4 expandtab
diff --git a/cloudinit/temp_utils.py b/cloudinit/temp_utils.py
index e23b6599..a1bf9934 100644
--- a/cloudinit/temp_utils.py
+++ b/cloudinit/temp_utils.py
@@ -6,12 +6,28 @@ import os
import shutil
import tempfile
+from cloudinit import log as logging
+from cloudinit import util
+
+LOG = logging.getLogger(__name__)
_TMPDIR = None
_ROOT_TMPDIR = "/run/cloud-init/tmp"
_EXE_ROOT_TMPDIR = "/var/tmp/cloud-init"
-def _tempfile_dir_arg(odir=None, needs_exe=False):
+def get_tmp_ancestor(odir=None, needs_exe: bool = False):
+ if odir is not None:
+ return odir
+ if needs_exe:
+ return _EXE_ROOT_TMPDIR
+ if _TMPDIR:
+ return _TMPDIR
+ if os.getuid() == 0:
+ return _ROOT_TMPDIR
+ return os.environ.get("TMPDIR", "/tmp")
+
+
+def _tempfile_dir_arg(odir=None, needs_exe: bool = False):
"""Return the proper 'dir' argument for tempfile functions.
When root, cloud-init will use /run/cloud-init/tmp to avoid
@@ -25,29 +41,23 @@ def _tempfile_dir_arg(odir=None, needs_exe=False):
@param needs_exe: Boolean specifying whether or not exe permissions are
needed for tempdir. This is needed because /run is mounted noexec.
"""
- if odir is not None:
- return odir
-
- if needs_exe:
- tdir = _EXE_ROOT_TMPDIR
- if not os.path.isdir(tdir):
- os.makedirs(tdir)
- os.chmod(tdir, 0o1777)
- return tdir
-
- global _TMPDIR
- if _TMPDIR:
- return _TMPDIR
-
- if os.getuid() == 0:
- tdir = _ROOT_TMPDIR
- else:
- tdir = os.environ.get("TMPDIR", "/tmp")
+ tdir = get_tmp_ancestor(odir, needs_exe)
if not os.path.isdir(tdir):
os.makedirs(tdir)
os.chmod(tdir, 0o1777)
- _TMPDIR = tdir
+ if needs_exe:
+ if util.has_mount_opt(tdir, "noexec"):
+ LOG.warning(
+ "Requested temporal dir with exe permission `%s` is"
+ " mounted as noexec",
+ tdir,
+ )
+
+ if odir is None and not needs_exe:
+ global _TMPDIR
+ _TMPDIR = tdir
+
return tdir
@@ -93,18 +103,14 @@ def tempdir(rmtree_ignore_errors=False, **kwargs):
shutil.rmtree(tdir, ignore_errors=rmtree_ignore_errors)
-def mkdtemp(**kwargs):
- kwargs["dir"] = _tempfile_dir_arg(
- kwargs.pop("dir", None), kwargs.pop("needs_exe", False)
- )
- return tempfile.mkdtemp(**kwargs)
+def mkdtemp(dir=None, needs_exe: bool = False, **kwargs):
+ dir = _tempfile_dir_arg(dir, needs_exe)
+ return tempfile.mkdtemp(dir=dir, **kwargs)
-def mkstemp(**kwargs):
- kwargs["dir"] = _tempfile_dir_arg(
- kwargs.pop("dir", None), kwargs.pop("needs_exe", False)
- )
- return tempfile.mkstemp(**kwargs)
+def mkstemp(dir=None, needs_exe: bool = False, **kwargs):
+ dir = _tempfile_dir_arg(dir, needs_exe)
+ return tempfile.mkstemp(dir=dir, **kwargs)
# vi: ts=4 expandtab
diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py
index 291b8d4d..3d4e4639 100644
--- a/cloudinit/url_helper.py
+++ b/cloudinit/url_helper.py
@@ -90,7 +90,7 @@ def read_file_or_url(url, **kwargs) -> Union["FileResponse", "UrlResponse"]:
# Made to have same accessors as UrlResponse so that the
# read_file_or_url can return this or that object and the
# 'user' of those objects will not need to know the difference.
-class StringResponse(object):
+class StringResponse:
def __init__(self, contents, code=200):
self.code = code
self.headers = {}
@@ -112,7 +112,7 @@ class FileResponse(StringResponse):
self.url = path
-class UrlResponse(object):
+class UrlResponse:
def __init__(self, response: requests.Response):
self._response = response
@@ -680,7 +680,7 @@ def wait_for_url(
return False, None
-class OauthUrlHelper(object):
+class OauthUrlHelper:
def __init__(
self,
consumer_key=None,
diff --git a/cloudinit/user_data.py b/cloudinit/user_data.py
index 05c66741..51e4cd63 100644
--- a/cloudinit/user_data.py
+++ b/cloudinit/user_data.py
@@ -74,7 +74,7 @@ def _handle_error(error_message, source_exception=None):
LOG.warning(error_message)
-class UserDataProcessor(object):
+class UserDataProcessor:
def __init__(self, paths):
self.paths = paths
self.ssl_details = util.fetch_ssl_details(paths)
diff --git a/cloudinit/util.py b/cloudinit/util.py
index 77e7f66b..078ce1c2 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -35,7 +35,7 @@ from base64 import b64decode, b64encode
from collections import deque, namedtuple
from errno import EACCES, ENOENT
from functools import lru_cache, total_ordering
-from typing import Callable, List, TypeVar
+from typing import Callable, Deque, Dict, List, TypeVar
from urllib import parse
from cloudinit import importer
@@ -162,7 +162,7 @@ def fully_decoded_payload(part):
return cte_payload
-class SeLinuxGuard(object):
+class SeLinuxGuard:
def __init__(self, path, recursive=False):
# Late import since it might not always
# be possible to use this
@@ -287,15 +287,52 @@ def rand_dict_key(dictionary, postfix=None):
return newkey
-def read_conf(fname):
+def read_conf(fname, *, instance_data_file=None) -> Dict:
+ """Read a yaml config with optional template, and convert to dict"""
+ # Avoid circular import
+ from cloudinit.handlers.jinja_template import (
+ JinjaLoadError,
+ NotJinjaError,
+ render_jinja_payload_from_file,
+ )
+
try:
- return load_yaml(load_file(fname), default={})
+ config_file = load_file(fname)
except IOError as e:
if e.errno == ENOENT:
return {}
else:
raise
+ if instance_data_file and os.path.exists(instance_data_file):
+ try:
+ config_file = render_jinja_payload_from_file(
+ config_file,
+ fname,
+ instance_data_file,
+ )
+ LOG.debug(
+ "Applied instance data in '%s' to "
+ "configuration loaded from '%s'",
+ instance_data_file,
+ fname,
+ )
+ except NotJinjaError:
+ # A log isn't appropriate here as we generally expect most
+ # cloud.cfgs to not be templated. The other path is logged
+ pass
+ except JinjaLoadError as e:
+ LOG.warning(
+ "Could not apply Jinja template '%s' to '%s'. "
+ "Exception: %s",
+ instance_data_file,
+ config_file,
+ repr(e),
+ )
+ if config_file is None:
+ return {}
+ return load_yaml(config_file, default={}) # pyright: ignore
+
# Merges X lists, and then keeps the
# unique ones, but orders by sort order
@@ -599,6 +636,7 @@ def _get_variant(info):
"debian",
"eurolinux",
"fedora",
+ "mariner",
"miraclelinux",
"openeuler",
"openmandriva",
@@ -976,7 +1014,7 @@ def read_seeded(base="", ext="", timeout=5, retries=10, file_retries=0):
return (md, ud, vd)
-def read_conf_d(confd):
+def read_conf_d(confd, *, instance_data_file=None) -> dict:
"""Read configuration directory."""
# Get reverse sorted list (later trumps newer)
confs = sorted(os.listdir(confd), reverse=True)
@@ -991,7 +1029,12 @@ def read_conf_d(confd):
cfgs = []
for fn in confs:
try:
- cfgs.append(read_conf(os.path.join(confd, fn)))
+ cfgs.append(
+ read_conf(
+ os.path.join(confd, fn),
+ instance_data_file=instance_data_file,
+ )
+ )
except OSError as e:
if e.errno == EACCES:
LOG.warning(
@@ -1001,18 +1044,29 @@ def read_conf_d(confd):
return mergemanydict(cfgs)
-def read_conf_with_confd(cfgfile):
- cfgs = deque()
+def read_conf_with_confd(cfgfile, *, instance_data_file=None) -> dict:
+ """Read yaml file along with optional ".d" directory, return merged config
+
+ Given a yaml file, load the file as a dictionary. Additionally, if there
+ exists a same-named directory with .d extension, read all files from
+ that directory in order and return the merged config. The template
+ file is optional and will be applied to any applicable jinja file
+ in the configs.
+
+ For example, this function can read both /etc/cloud/cloud.cfg and all
+ files in /etc/cloud/cloud.cfg.d and merge all configs into a single dict.
+ """
+ cfgs: Deque[Dict] = deque()
cfg: dict = {}
try:
- cfg = read_conf(cfgfile)
+ cfg = read_conf(cfgfile, instance_data_file=instance_data_file)
except OSError as e:
if e.errno == EACCES:
LOG.warning("REDACTED config part %s for non-root user", cfgfile)
else:
cfgs.append(cfg)
- confd = False
+ confd = ""
if "conf_d" in cfg:
confd = cfg["conf_d"]
if confd:
@@ -1023,12 +1077,12 @@ def read_conf_with_confd(cfgfile):
)
else:
confd = str(confd).strip()
- elif os.path.isdir("%s.d" % cfgfile):
- confd = "%s.d" % cfgfile
+ elif os.path.isdir(f"{cfgfile}.d"):
+ confd = f"{cfgfile}.d"
if confd and os.path.isdir(confd):
# Conf.d settings override input configuration
- confd_cfg = read_conf_d(confd)
+ confd_cfg = read_conf_d(confd, instance_data_file=instance_data_file)
cfgs.appendleft(confd_cfg)
return mergemanydict(cfgs)
@@ -2592,6 +2646,11 @@ def get_mount_info(path, log=LOG, get_mnt_opts=False):
return parse_mount(path)
+def has_mount_opt(path, opt: str) -> bool:
+ *_, mnt_opts = get_mount_info(path, get_mnt_opts=True)
+ return opt in mnt_opts.split(",")
+
+
T = TypeVar("T")
@@ -2875,6 +2934,10 @@ def mount_is_read_write(mount_point):
def udevadm_settle(exists=None, timeout=None):
"""Invoke udevadm settle with optional exists and timeout parameters"""
+ if not subp.which("udevadm"):
+ # a distro, such as Alpine, may not have udev installed if
+ # it relies on a udev alternative such as mdev/mdevd.
+ return
settle_cmd = ["udevadm", "settle"]
if exists:
# skip the settle if the requested path already exists
@@ -2947,6 +3010,17 @@ class Version(namedtuple("Version", ["major", "minor", "patch", "rev"])):
and self.rev == other.rev
)
+ def __iter__(self):
+ """Iterate over the version (drop sentinels)"""
+ for n in (self.major, self.minor, self.patch, self.rev):
+ if n != -1:
+ yield str(n)
+ else:
+ break
+
+ def __str__(self):
+ return ".".join(self)
+
def _compare_version(self, other) -> int:
"""
return values:
diff --git a/cloudinit/version.py b/cloudinit/version.py
index ec2621f7..b9b42af3 100644
--- a/cloudinit/version.py
+++ b/cloudinit/version.py
@@ -4,7 +4,7 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-__VERSION__ = "22.3"
+__VERSION__ = "22.4"
_PACKAGED_VERSION = "@@PACKAGED_VERSION@@"
FEATURES = [
diff --git a/config/cloud.cfg.tmpl b/config/cloud.cfg.tmpl
index a6096f47..0f234a7d 100644
--- a/config/cloud.cfg.tmpl
+++ b/config/cloud.cfg.tmpl
@@ -1,7 +1,9 @@
## template:jinja
+
# The top level settings are used as module
-# and system configuration.
+# and base configuration.
{% set is_bsd = variant in ["dragonfly", "freebsd", "netbsd", "openbsd"] %}
+{% set is_rhel = variant in ["rhel", "centos"] %}
{% if is_bsd %}
syslog_fix_perms: root:wheel
{% elif variant in ["suse"] %}
@@ -32,9 +34,9 @@ disable_root: false
disable_root: true
{% endif %}
-{% if variant in ["almalinux", "alpine", "amazon", "centos", "cloudlinux", "eurolinux",
- "fedora", "miraclelinux", "openEuler", "openmandriva", "rhel", "rocky", "virtuozzo"] %}
-{% if variant == "rhel" %}
+{% if variant in ["almalinux", "alpine", "amazon", "cloudlinux", "eurolinux",
+ "fedora", "miraclelinux", "openEuler", "openmandriva", "rocky", "virtuozzo"] or is_rhel %}
+{% if is_rhel %}
mount_default_fields: [~, ~, 'auto', 'defaults,nofail,x-systemd.requires=cloud-init.service,_netdev', '0', '2']
{% else %}
mount_default_fields: [~, ~, 'auto', 'defaults,nofail', '0', '2']
@@ -70,7 +72,7 @@ network:
config: disabled
{% endif %}
-{% if variant == "rhel" %}
+{% if is_rhel %}
# Default redhat settings:
ssh_deletekeys: true
ssh_genkeytypes: ['rsa', 'ecdsa', 'ed25519']
@@ -122,17 +124,17 @@ cloud_config_modules:
{% endif %}
{% if variant not in ["photon"] %}
- ssh-import-id
-{% if variant not in ["rhel"] %}
+{% if not is_rhel %}
- keyboard
{% endif %}
- locale
{% endif %}
- set-passwords
-{% if variant in ["rhel"] %}
+{% if is_rhel %}
- rh_subscription
{% endif %}
-{% if variant in ["rhel", "fedora", "openmandriva", "photon"] %}
-{% if variant not in ["photon"] %}
+{% if variant in ["fedora", "mariner", "openmandriva", "photon"] or is_rhel %}
+{% if variant not in ["mariner", "photon"] %}
- spacewalk
{% endif %}
- yum-add-repo
@@ -151,9 +153,7 @@ cloud_config_modules:
{% if variant in ["alpine"] %}
- apk-configure
{% endif %}
-{% if variant not in ["freebsd", "netbsd"] %}
- ntp
-{% endif %}
- timezone
- disable-ec2-metadata
- runcmd
@@ -197,9 +197,9 @@ cloud_final_modules:
# (not accessible to handlers/transforms)
system_info:
# This will affect which distro class gets used
-{% if variant in ["almalinux", "alpine", "amazon", "arch", "centos", "cloudlinux", "debian",
- "eurolinux", "fedora", "freebsd", "gentoo", "netbsd", "miraclelinux", "openbsd", "openEuler",
- "openmandriva", "photon", "rhel", "rocky", "suse", "ubuntu", "virtuozzo"] %}
+{% if variant in ["almalinux", "alpine", "amazon", "arch", "cloudlinux", "debian",
+ "eurolinux", "fedora", "freebsd", "gentoo", "netbsd", "mariner", "miraclelinux", "openbsd", "openEuler",
+ "openmandriva", "photon", "rocky", "suse", "ubuntu", "virtuozzo"] or is_rhel %}
distro: {{ variant }}
{% elif variant in ["dragonfly"] %}
distro: dragonflybsd
@@ -252,15 +252,15 @@ system_info:
primary: http://ports.ubuntu.com/ubuntu-ports
security: http://ports.ubuntu.com/ubuntu-ports
ssh_svcname: ssh
-{% elif variant in ["almalinux", "alpine", "amazon", "arch", "centos", "cloudlinux", "eurolinux",
- "fedora", "gentoo", "miraclelinux", "openEuler", "openmandriva", "rhel", "rocky", "suse", "virtuozzo"] %}
+{% elif variant in ["almalinux", "alpine", "amazon", "arch", "cloudlinux", "eurolinux",
+ "fedora", "gentoo", "miraclelinux", "openEuler", "openmandriva", "rocky", "suse", "virtuozzo"] or is_rhel %}
# Default user name + that default users groups (if added/used)
default_user:
{% if variant == "amazon" %}
name: ec2-user
lock_passwd: True
gecos: EC2 Default User
-{% elif variant == "rhel" %}
+{% elif is_rhel %}
name: cloud-user
lock_passwd: true
gecos: Cloud User
@@ -285,7 +285,7 @@ system_info:
groups: [wheel, users]
{% elif variant == "openmandriva" %}
groups: [wheel, users, systemd-journal]
-{% elif variant == "rhel" %}
+{% elif is_rhel %}
groups: [adm, systemd-journal]
{% else %}
groups: [wheel, adm, systemd-journal]
@@ -356,6 +356,22 @@ system_info:
# In Photon, we have default network settings, hence if network settings are
# not explicitly given in metadata, don't use fallback network config.
disable_fallback_netcfg: true
+{% elif variant in ["mariner"] %}
+ default_user:
+ name: mariner
+ lock_passwd: True
+ gecos: MarinerOS
+ groups: [wheel]
+ sudo: ["ALL=(ALL) NOPASSWD:ALL"]
+ shell: /bin/bash
+ # Other config here will be given to the distro class and/or path classes
+ paths:
+ cloud_dir: /var/lib/cloud/
+ templates_dir: /etc/cloud/templates/
+ network:
+ renderers: ['networkd']
+
+ ssh_svcname: sshd
{% endif %}
{% if variant in ["freebsd", "netbsd", "openbsd"] %}
network:
@@ -363,7 +379,7 @@ system_info:
{% elif variant in ["dragonfly"] %}
network:
renderers: ['freebsd']
-{% elif variant in ["rhel", "fedora"] %}
+{% elif variant in ["fedora"] or is_rhel %}
network:
renderers: ['netplan', 'network-manager', 'networkd', 'sysconfig', 'eni']
{% elif variant == "openmandriva" %}
diff --git a/conftest.py b/conftest.py
index 3979eb0a..83dfca68 100644
--- a/conftest.py
+++ b/conftest.py
@@ -7,7 +7,6 @@ Any imports that are performed at the top-level here must be installed wherever
any of these tests run: that is to say, they must be listed in
``integration-requirements.txt`` and in ``test-requirements.txt``.
"""
-import os
from unittest import mock
import pytest
@@ -167,27 +166,11 @@ def fixture_utils():
@pytest.fixture
-def httpretty():
- """
- Enable HTTPretty for duration of the testcase, resetting before and after.
-
- This will also ensure allow_net_connect is set to False, and temporarily
- unset http_proxy in os.environ if present (to work around
- https://github.com/gabrielfalcao/HTTPretty/issues/122).
- """
- import httpretty as _httpretty
-
- restore_proxy = os.environ.pop("http_proxy", None)
- _httpretty.HTTPretty.allow_net_connect = False
- _httpretty.reset()
- _httpretty.enable()
-
- yield _httpretty
+def mocked_responses():
+ import responses as _responses
- _httpretty.disable()
- _httpretty.reset()
- if restore_proxy is not None:
- os.environ["http_proxy"] = restore_proxy
+ with _responses.RequestsMock(assert_all_requests_are_fired=False) as rsps:
+ yield rsps
@pytest.fixture
diff --git a/debian/changelog b/debian/changelog
index 711fa812..51a7c25c 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,13 @@
+cloud-init (22.4-0ubuntu1) lunar; urgency=medium
+
+ * d/control: drop python3-httpretty from Build-Depends
+ * d/cloud-init.templates: Add NWCS to datasource list
+ * Upstream snapshot based on 22.4 upstream release. List of changes
+ from upstream can be found at
+ https://raw.githubusercontent.com/canonical/cloud-init/22.4/ChangeLog
+
+ -- James Falcon <james.falcon@canonical.com> Tue, 15 Nov 2022 14:23:44 -0600
+
cloud-init (22.3-13-g70ce6442-0ubuntu1~22.10.1) kinetic; urgency=medium
* New upstream snapshot.
@@ -14,7 +24,7 @@ cloud-init (22.3-13-g70ce6442-0ubuntu1~22.10.1) kinetic; urgency=medium
[Emanuele Giuseppe Esposito]
+ Fix check_format_tip (#1679)
- -- Chad Smith <chad.smith@canonical.com> Tue, 30 Aug 2022 13:29:28 -0600
+ -- Brett Holman <brett.holman@canonical.com> Tue, 30 Aug 2022 14:28:27 -0600
cloud-init (22.3-3-g9f0efc47-0ubuntu1~22.10.1) kinetic; urgency=medium
diff --git a/debian/cloud-init.templates b/debian/cloud-init.templates
index 3cf4eafb..26f920f4 100644
--- a/debian/cloud-init.templates
+++ b/debian/cloud-init.templates
@@ -1,8 +1,8 @@
Template: cloud-init/datasources
Type: multiselect
-Default: NoCloud, ConfigDrive, OpenNebula, DigitalOcean, Azure, AltCloud, OVF, MAAS, GCE, OpenStack, CloudSigma, SmartOS, Bigstep, Scaleway, AliYun, Ec2, CloudStack, Hetzner, IBMCloud, Oracle, Exoscale, RbxCloud, UpCloud, VMware, Vultr, LXD, None
-Choices-C: NoCloud, ConfigDrive, OpenNebula, DigitalOcean, Azure, AltCloud, OVF, MAAS, GCE, OpenStack, CloudSigma, SmartOS, Bigstep, Scaleway, AliYun, Ec2, CloudStack, Hetzner, IBMCloud, Oracle, Exoscale, RbxCloud, UpCloud, VMware, Vultr, LXD, None
-__Choices: NoCloud: Reads info from /var/lib/cloud/seed only, ConfigDrive: Reads data from Openstack Config Drive, OpenNebula: read from OpenNebula context disk, DigitalOcean: reads data from Droplet datasource, Azure: read from MS Azure cdrom. Requires walinux-agent, AltCloud: config disks for RHEVm and vSphere, OVF: Reads data from OVF Transports, MAAS: Reads data from Ubuntu MAAS, GCE: google compute metadata service, OpenStack: native openstack metadata service, CloudSigma: metadata over serial for cloudsigma.com, SmartOS: Read from SmartOS metadata service, Bigstep: Bigstep metadata service, Scaleway: Scaleway metadata service, AliYun: Alibaba metadata service, Ec2: reads data from EC2 Metadata service, CloudStack: Read from CloudStack metadata service, Hetzner: Hetzner Cloud, IBMCloud: IBM Cloud. Previously softlayer or bluemix., Oracle: Oracle Compute Infrastructure, Exoscale: Exoscale, RbxCloud: HyperOne and Rootbox platforms, UpCloud: UpCloud, VMware: reads data from guestinfo table or env vars, Vultr: Vultr Cloud, LXD: Reads /dev/lxd/sock representation of instance data, None: Failsafe datasource
+Default: NoCloud, ConfigDrive, OpenNebula, DigitalOcean, Azure, AltCloud, OVF, MAAS, GCE, OpenStack, CloudSigma, SmartOS, Bigstep, Scaleway, AliYun, Ec2, CloudStack, Hetzner, IBMCloud, Oracle, Exoscale, RbxCloud, UpCloud, VMware, Vultr, LXD, NWCS, None
+Choices-C: NoCloud, ConfigDrive, OpenNebula, DigitalOcean, Azure, AltCloud, OVF, MAAS, GCE, OpenStack, CloudSigma, SmartOS, Bigstep, Scaleway, AliYun, Ec2, CloudStack, Hetzner, IBMCloud, Oracle, Exoscale, RbxCloud, UpCloud, VMware, Vultr, LXD, NWCS, None
+__Choices: NoCloud: Reads info from /var/lib/cloud/seed only, ConfigDrive: Reads data from Openstack Config Drive, OpenNebula: read from OpenNebula context disk, DigitalOcean: reads data from Droplet datasource, Azure: read from MS Azure cdrom. Requires walinux-agent, AltCloud: config disks for RHEVm and vSphere, OVF: Reads data from OVF Transports, MAAS: Reads data from Ubuntu MAAS, GCE: google compute metadata service, OpenStack: native openstack metadata service, CloudSigma: metadata over serial for cloudsigma.com, SmartOS: Read from SmartOS metadata service, Bigstep: Bigstep metadata service, Scaleway: Scaleway metadata service, AliYun: Alibaba metadata service, Ec2: reads data from EC2 Metadata service, CloudStack: Read from CloudStack metadata service, Hetzner: Hetzner Cloud, IBMCloud: IBM Cloud. Previously softlayer or bluemix., Oracle: Oracle Compute Infrastructure, Exoscale: Exoscale, RbxCloud: HyperOne and Rootbox platforms, UpCloud: UpCloud, VMware: reads data from guestinfo table or env vars, Vultr: Vultr Cloud, LXD: Reads /dev/lxd/sock representation of instance data, NWCS: NWCS, None: Failsafe datasource
_Description: Which data sources should be searched?
Cloud-init supports searching different "Data Sources" for information
that it uses to configure a cloud instance.
diff --git a/debian/control b/debian/control
index a4c9e9a7..9e3193c1 100644
--- a/debian/control
+++ b/debian/control
@@ -11,7 +11,6 @@ Build-Depends: debhelper-compat (= 13),
python3,
python3-configobj,
python3-debconf,
- python3-httpretty,
python3-jinja2,
python3-jsonpatch,
python3-jsonschema,
diff --git a/debian/po/templates.pot b/debian/po/templates.pot
index ba0fb51b..3171457c 100644
--- a/debian/po/templates.pot
+++ b/debian/po/templates.pot
@@ -176,6 +176,12 @@ msgstr ""
#. Type: multiselect
#. Choices
#: ../cloud-init.templates:1001
+msgid "NWCS: NWCS"
+msgstr ""
+
+#. Type: multiselect
+#. Choices
+#: ../cloud-init.templates:1001
msgid "None: Failsafe datasource"
msgstr ""
diff --git a/doc-requirements.txt b/doc-requirements.txt
index 3207e6c6..6f48062e 100644
--- a/doc-requirements.txt
+++ b/doc-requirements.txt
@@ -1,11 +1,6 @@
-# doc8 1.0.0 depends on docutils 0.18.1 or later which added Node.findall()
-doc8==0.11.2
+doc8
+furo
m2r2
-sphinx==4.3.0
-sphinx_rtd_theme==1.0.0
pyyaml
-sphinx-panels
-
-# Indirect dependencies
-jinja2<3.1.0 # https://github.com/readthedocs/readthedocs.org/issues/9037
-docutils==0.16 # https://github.com/readthedocs/sphinx_rtd_theme/issues/1115
+sphinx
+sphinx-design
diff --git a/doc/examples/cloud-config-ansible-controller.txt b/doc/examples/cloud-config-ansible-controller.txt
new file mode 100644
index 00000000..389f8f88
--- /dev/null
+++ b/doc/examples/cloud-config-ansible-controller.txt
@@ -0,0 +1,140 @@
+#cloud-config
+#
+# Demonstrate setting up an ansible controller host on boot.
+# This example installs a playbook repository from a remote private repository
+# and then runs two of the plays.
+
+packages_update: true
+packages_upgrade: true
+packages:
+ - git
+ - python3-pip
+
+# Set up an ansible user
+# ----------------------
+# In this case I give the local ansible user passwordless sudo so that ansible
+# may write to a local root-only file.
+users:
+- name: ansible
+ gecos: Ansible User
+ shell: /bin/bash
+ groups: users,admin,wheel,lxd
+ sudo: ALL=(ALL) NOPASSWD:ALL
+
+# Initialize lxd using cloud-init.
+# --------------------------------
+# In this example, a lxd container is
+# started using ansible on boot, so having lxd initialized is required.
+lxd:
+ init:
+ storage_backend: dir
+
+# Configure and run ansible on boot
+# ---------------------------------
+# Install ansible using pip, ensure that community.general collection is
+# installed [1].
+# Use a deploy key to clone a remote private repository then run two playbooks.
+# The first playbook starts a lxd container and creates a new inventory file.
+# The second playbook connects to and configures the container using ansible.
+# The public version of the playbooks can be inspected here [2]
+#
+# [1] community.general is likely already installed by pip
+# [2] https://github.com/holmanb/ansible-lxd-public
+#
+ansible:
+ install_method: pip
+ package_name: ansible
+ run_user: ansible
+ galaxy:
+ actions:
+ - ["ansible-galaxy", "collection", "install", "community.general"]
+
+ setup_controller:
+ repositories:
+ - path: /home/ansible/my-repo/
+ source: git@github.com:holmanb/ansible-lxd-private.git
+ run_ansible:
+ - playbook_dir: /home/ansible/my-repo
+ playbook_name: start-lxd.yml
+ timeout: 120
+ forks: 1
+ private_key: /home/ansible/.ssh/id_rsa
+ - playbook_dir: /home/ansible/my-repo
+ playbook_name: configure-lxd.yml
+ become_user: ansible
+ timeout: 120
+ forks: 1
+ private_key: /home/ansible/.ssh/id_rsa
+ inventory: new_ansible_hosts
+
+# Write a deploy key to the filesystem for ansible.
+# -------------------------------------------------
+# This deploy key is tied to a private github repository [1]
+# This key exists to demonstrate deploy key usage in ansible
+# a duplicate public copy of the repository exists here[2]
+#
+# [1] https://github.com/holmanb/ansible-lxd-private
+# [2] https://github.com/holmanb/ansible-lxd-public
+#
+write_files:
+ - path: /home/ansible/.ssh/known_hosts
+ owner: ansible:ansible
+ permissions: 0o600
+ defer: true
+ content: |
+ |1|YJEFAk6JjnXpUjUSLFiBQS55W9E=|OLNePOn3eBa1PWhBBmt5kXsbGM4= ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOMqqnkVzrm0SdG6UOoqKLsabgH5C9okWi0dh2l9GKJl
+ |1|PGGnpCpqi0aakERS4BWnYxMkMwM=|Td0piZoS4ZVC0OzeuRwKcH1MusM= ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ==
+ |1|OJ89KrsNcFTOvoCP/fPGKpyUYFo=|cu7mNzF+QB/5kR0spiYmUJL7DAI= ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBEmKSENjQEezOmxkZMy7opKgwFB9nkt5YRrYMjNuG5N87uRgg6CLrbo5wAdT/y6v0mKV0U2w0WZ2YB/++Tpockg=
+
+ - path: /home/ansible/.ssh/id_rsa
+ owner: ansible:ansible
+ permissions: 0o600
+ defer: true
+ encoding: base64
+ content: |
+ LS0tLS1CRUdJTiBPUEVOU1NIIFBSSVZBVEUgS0VZLS0tLS0KYjNCbGJuTnphQzFyWlhrdGRqRUFB
+ QUFBQkc1dmJtVUFBQUFFYm05dVpRQUFBQUFBQUFBQkFBQUJsd0FBQUFkemMyZ3RjbgpOaEFBQUFB
+ d0VBQVFBQUFZRUEwUWlRa05WQS9VTEpWZzBzT1Q4TEwyMnRGckg5YVR1SWFNT1FiVFdtWjlNUzJh
+ VTZ0cDZoClJDYklWSkhmOHdsaGV3MXNvWmphWVVQSFBsUHNISm5UVlhJTnFTTlpEOGF0Rldjd1gy
+ ZTNBOElZNEhpN0NMMDE3MVBoMVUKYmJGNGVIT1JaVkY2VVkzLzhmbXQ3NmhVYnpiRVhkUXhQdVdh
+ a0IyemxXNTdFclpOejJhYVdnY2pJUGdHV1RNZWVqbEpOcQpXUW9MNlFzSStpeUlzYXNMc1RTajha
+ aVgrT1VjanJEMUY4QXNKS3ZWQStKbnVZNUxFeno1TGQ2SGxGc05XVWtoZkJmOWVOClpxRnJCc1Vw
+ M2VUY1FtejFGaHFFWDJIQjNQT3VSTzlKemVGcTJaRE8wUlNQN09acjBMYm8vSFVTK3V5VkJNTDNi
+ eEF6dEIKQWM5dFJWZjRqcTJuRjNkcUpwVTFFaXZzR0sxaHJZc0VNQklLK0srVzRwc1F5c3ZTL0ZK
+ V2lXZmpqWVMwei9IbkV4MkpHbApOUXUrYkMxL1dXSGVXTGFvNGpSckRSZnNIVnVscTE2MElsbnNx
+ eGl1MmNHd081V29Fc1NHdThucXB5ZzQzWkhDYjBGd21CCml6UFFEQVNsbmlXanFjS21mblRycHpB
+ eTNlVldhd3dsTnBhUWtpZFRBQUFGZ0dLU2o4ZGlrby9IQUFBQUIzTnphQzF5YzIKRUFBQUdCQU5F
+ SWtKRFZRUDFDeVZZTkxEay9DeTl0clJheC9XazdpR2pEa0cwMXBtZlRFdG1sT3JhZW9VUW15RlNS
+ My9NSgpZWHNOYktHWTJtRkR4ejVUN0J5WjAxVnlEYWtqV1EvR3JSVm5NRjludHdQQ0dPQjR1d2k5
+ TmU5VDRkVkcyeGVIaHprV1ZSCmVsR04vL0g1cmUrb1ZHODJ4RjNVTVQ3bG1wQWRzNVZ1ZXhLMlRj
+ OW1tbG9ISXlENEJsa3pIbm81U1RhbGtLQytrTENQb3MKaUxHckM3RTBvL0dZbC9qbEhJNnc5UmZB
+ TENTcjFRUGlaN21PU3hNOCtTM2VoNVJiRFZsSklYd1gvWGpXYWhhd2JGS2QzawozRUpzOVJZYWhG
+ OWh3ZHp6cmtUdlNjM2hhdG1RenRFVWorem1hOUMyNlB4MUV2cnNsUVRDOTI4UU03UVFIUGJVVlgr
+ STZ0CnB4ZDNhaWFWTlJJcjdCaXRZYTJMQkRBU0N2aXZsdUtiRU1yTDB2eFNWb2xuNDQyRXRNL3g1
+ eE1kaVJwVFVMdm13dGYxbGgKM2xpMnFPSTBhdzBYN0IxYnBhdGV0Q0paN0tzWXJ0bkJzRHVWcUJM
+ RWhydko2cWNvT04yUndtOUJjSmdZc3owQXdFcFo0bApvNm5DcG41MDY2Y3dNdDNsVm1zTUpUYVdr
+ SkluVXdBQUFBTUJBQUVBQUFHQUV1ejc3SHU5RUVaeXVqTE9kVG5BVzlhZlJ2ClhET1pBNnBTN3lX
+ RXVmanc1Q1NsTUx3aXNSODN5d3cwOXQxUVd5dmhScUV5WW12T0JlY3NYZ2FTVXRuWWZmdFd6NDRh
+ cHkKL2dRWXZNVkVMR0thSkFDL3E3dmpNcEd5cnhVUGt5TE1oY2tBTFUyS1lnVisvcmovajZwQk1l
+ VmxjaG1rM3Bpa1lyZmZVWApKRFk5OTBXVk8xOTREbTBidUxSekp2Zk1LWUYyQmNmRjRUdmFyak9Y
+ V0F4U3VSOHd3dzA1MG9KOEhkS2FoVzdDbTVTMHBvCkZSbk5YRkdNbkxBNjJ2TjAwdkpXOFY3ajd2
+ dWk5dWtCYmhqUldhSnVZNXJkRy9VWW16QWU0d3ZkSUVucGs5eEluNkpHQ3AKRlJZVFJuN2xUaDUr
+ L1FsUTZGWFJQOElyMXZYWkZuaEt6bDBLOFZxaDJzZjRNNzlNc0lVR0FxR3hnOXhkaGpJYTVkbWdw
+ OApOMThJRURvTkVWS1ViS3VLZS9aNXlmOFo5dG1leGZIMVl0dGptWE1Pb2pCdlVISWpSUzVoZEk5
+ TnhuUEdSTFkya2pBemNtCmdWOVJ2M3Z0ZEYvK3phbGszZkFWTGVLOGhYSytkaS83WFR2WXBmSjJF
+ WkJXaU5yVGVhZ2ZOTkdpWXlkc1F5M3pqWkFBQUEKd0JOUmFrN1VycW5JSE1abjdwa0NUZ2NlYjFN
+ ZkJ5YUZ0bE56ZCtPYmFoNTRIWUlRajVXZFpUQkFJVFJlTVpOdDlTNU5BUgpNOHNRQjhVb1pQYVZT
+ QzNwcElMSU9mTGhzNktZajZSckdkaVl3eUloTVBKNWtSV0Y4eEdDTFVYNUNqd0gyRU9xN1hoSVd0
+ Ck13RUZ0ZC9nRjJEdTdIVU5GUHNaR256SjNlN3BES0RuRTd3MmtoWjhDSXBURmdENzY5dUJZR0F0
+ azQ1UVlURG81SnJvVk0KWlBEcTA4R2IvUmhJZ0pMbUlwTXd5cmVWcExMTGU4U3dvTUpKK3JpaG1u
+ Slp4TzhnQUFBTUVBMGxoaUtlemVUc2hodDR4dQpyV2MwTnh4RDg0YTI5Z1NHZlRwaERQT3JsS1NF
+ WWJrU1hoanFDc0FaSGQ4UzhrTXIzaUY2cG9PazNJV1N2Rko2bWJkM2llCnFkUlRnWEg5VGh3azRL
+ Z3BqVWhOc1F1WVJIQmJJNTlNbytCeFNJMUIxcXptSlNHZG1DQkw1NHd3elptRktEUVBRS1B4aUwK
+ bjBNbGM3R29vaURNalQxdGJ1Vy9PMUVMNUVxVFJxd2dXUFRLaEJBNnI0UG5HRjE1MGhaUklNb29a
+ a0Qyelg2YjFzR29qawpRcHZLa0V5a1R3bktDekY1VFhPOCt3SjNxYmNFbzlBQUFBd1FEK1owcjY4
+ YzJZTU5wc215ajNaS3RaTlBTdkpOY0xteUQvCmxXb05KcTNkakpONHMySmJLOGw1QVJVZFczeFNG
+ RURJOXl4L3dwZnNYb2FxV255Z1AzUG9GdzJDTTRpMEVpSml5dnJMRlUKcjNKTGZEVUZSeTNFSjI0
+ UnNxYmlnbUVzZ1FPelRsM3hmemVGUGZ4Rm9PaG9rU3ZURzg4UFFqaTFBWUh6NWtBN3A2WmZhegpP
+ azExckpZSWU3K2U5QjBsaGt1MEFGd0d5cWxXUW1TL01oSXBuakhJazV0UDRoZUhHU216S1FXSkRi
+ VHNrTldkNmFxMUc3CjZIV2ZEcFg0SGdvTThBQUFBTGFHOXNiV0Z1WWtCaGNtTT0KLS0tLS1FTkQg
+ T1BFTlNTSCBQUklWQVRFIEtFWS0tLS0tCg==
diff --git a/doc/examples/cloud-config-ansible-managed.txt b/doc/examples/cloud-config-ansible-managed.txt
new file mode 100644
index 00000000..b6508d21
--- /dev/null
+++ b/doc/examples/cloud-config-ansible-managed.txt
@@ -0,0 +1,64 @@
+#cloud-config
+#
+# A common use-case for cloud-init is to bootstrap user and ssh
+# settings to be managed by a remote configuration management tool,
+# such as ansible.
+#
+# This example assumes a default Ubuntu cloud image, which should contain
+# the required software to be managed remotely by Ansible.
+#
+ssh_pwauth: false
+
+users:
+- name: ansible
+ gecos: Ansible User
+ groups: users,admin,wheel
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ shell: /bin/bash
+ lock_passwd: true
+ ssh_authorized_keys:
+ - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDRCJCQ1UD9QslWDSw5Pwsvba0Wsf1pO4how5BtNaZn0xLZpTq2nqFEJshUkd/zCWF7DWyhmNphQ8c+U+wcmdNVcg2pI1kPxq0VZzBfZ7cDwhjgeLsIvTXvU+HVRtsXh4c5FlUXpRjf/x+a3vqFRvNsRd1DE+5ZqQHbOVbnsStk3PZppaByMg+AZZMx56OUk2pZCgvpCwj6LIixqwuxNKPxmJf45RyOsPUXwCwkq9UD4me5jksTPPkt3oeUWw1ZSSF8F/141moWsGxSnd5NxCbPUWGoRfYcHc865E70nN4WrZkM7RFI/s5mvQtuj8dRL67JUEwvdvEDO0EBz21FV/iOracXd2omlTUSK+wYrWGtiwQwEgr4r5bimxDKy9L8UlaJZ+ONhLTP8ecTHYkaU1C75sLX9ZYd5YtqjiNGsNF+wdW6WrXrQiWeyrGK7ZwbA7lagSxIa7yeqnKDjdkcJvQXCYGLM9AMBKWeJaOpwqZ+dOunMDLd5VZrDCU2lpCSJ1M="
+
+
+# use the following passwordless demonstration key for testing or
+# replace with your own key pair
+#
+# -----BEGIN OPENSSH PRIVATE KEY-----
+# b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAABlwAAAAdzc2gtcn
+# NhAAAAAwEAAQAAAYEA0QiQkNVA/ULJVg0sOT8LL22tFrH9aTuIaMOQbTWmZ9MS2aU6tp6h
+# RCbIVJHf8wlhew1soZjaYUPHPlPsHJnTVXINqSNZD8atFWcwX2e3A8IY4Hi7CL0171Ph1U
+# bbF4eHORZVF6UY3/8fmt76hUbzbEXdQxPuWakB2zlW57ErZNz2aaWgcjIPgGWTMeejlJNq
+# WQoL6QsI+iyIsasLsTSj8ZiX+OUcjrD1F8AsJKvVA+JnuY5LEzz5Ld6HlFsNWUkhfBf9eN
+# ZqFrBsUp3eTcQmz1FhqEX2HB3POuRO9JzeFq2ZDO0RSP7OZr0Lbo/HUS+uyVBML3bxAztB
+# Ac9tRVf4jq2nF3dqJpU1EivsGK1hrYsEMBIK+K+W4psQysvS/FJWiWfjjYS0z/HnEx2JGl
+# NQu+bC1/WWHeWLao4jRrDRfsHVulq160Ilnsqxiu2cGwO5WoEsSGu8nqpyg43ZHCb0FwmB
+# izPQDASlniWjqcKmfnTrpzAy3eVWawwlNpaQkidTAAAFgGKSj8diko/HAAAAB3NzaC1yc2
+# EAAAGBANEIkJDVQP1CyVYNLDk/Cy9trRax/Wk7iGjDkG01pmfTEtmlOraeoUQmyFSR3/MJ
+# YXsNbKGY2mFDxz5T7ByZ01VyDakjWQ/GrRVnMF9ntwPCGOB4uwi9Ne9T4dVG2xeHhzkWVR
+# elGN//H5re+oVG82xF3UMT7lmpAds5VuexK2Tc9mmloHIyD4BlkzHno5STalkKC+kLCPos
+# iLGrC7E0o/GYl/jlHI6w9RfALCSr1QPiZ7mOSxM8+S3eh5RbDVlJIXwX/XjWahawbFKd3k
+# 3EJs9RYahF9hwdzzrkTvSc3hatmQztEUj+zma9C26Px1EvrslQTC928QM7QQHPbUVX+I6t
+# pxd3aiaVNRIr7BitYa2LBDASCvivluKbEMrL0vxSVoln442EtM/x5xMdiRpTULvmwtf1lh
+# 3li2qOI0aw0X7B1bpatetCJZ7KsYrtnBsDuVqBLEhrvJ6qcoON2Rwm9BcJgYsz0AwEpZ4l
+# o6nCpn5066cwMt3lVmsMJTaWkJInUwAAAAMBAAEAAAGAEuz77Hu9EEZyujLOdTnAW9afRv
+# XDOZA6pS7yWEufjw5CSlMLwisR83yww09t1QWyvhRqEyYmvOBecsXgaSUtnYfftWz44apy
+# /gQYvMVELGKaJAC/q7vjMpGyrxUPkyLMhckALU2KYgV+/rj/j6pBMeVlchmk3pikYrffUX
+# JDY990WVO194Dm0buLRzJvfMKYF2BcfF4TvarjOXWAxSuR8www050oJ8HdKahW7Cm5S0po
+# FRnNXFGMnLA62vN00vJW8V7j7vui9ukBbhjRWaJuY5rdG/UYmzAe4wvdIEnpk9xIn6JGCp
+# FRYTRn7lTh5+/QlQ6FXRP8Ir1vXZFnhKzl0K8Vqh2sf4M79MsIUGAqGxg9xdhjIa5dmgp8
+# N18IEDoNEVKUbKuKe/Z5yf8Z9tmexfH1YttjmXMOojBvUHIjRS5hdI9NxnPGRLY2kjAzcm
+# gV9Rv3vtdF/+zalk3fAVLeK8hXK+di/7XTvYpfJ2EZBWiNrTeagfNNGiYydsQy3zjZAAAA
+# wBNRak7UrqnIHMZn7pkCTgceb1MfByaFtlNzd+Obah54HYIQj5WdZTBAITReMZNt9S5NAR
+# M8sQB8UoZPaVSC3ppILIOfLhs6KYj6RrGdiYwyIhMPJ5kRWF8xGCLUX5CjwH2EOq7XhIWt
+# MwEFtd/gF2Du7HUNFPsZGnzJ3e7pDKDnE7w2khZ8CIpTFgD769uBYGAtk45QYTDo5JroVM
+# ZPDq08Gb/RhIgJLmIpMwyreVpLLLe8SwoMJJ+rihmnJZxO8gAAAMEA0lhiKezeTshht4xu
+# rWc0NxxD84a29gSGfTphDPOrlKSEYbkSXhjqCsAZHd8S8kMr3iF6poOk3IWSvFJ6mbd3ie
+# qdRTgXH9Thwk4KgpjUhNsQuYRHBbI59Mo+BxSI1B1qzmJSGdmCBL54wwzZmFKDQPQKPxiL
+# n0Mlc7GooiDMjT1tbuW/O1EL5EqTRqwgWPTKhBA6r4PnGF150hZRIMooZkD2zX6b1sGojk
+# QpvKkEykTwnKCzF5TXO8+wJ3qbcEo9AAAAwQD+Z0r68c2YMNpsmyj3ZKtZNPSvJNcLmyD/
+# lWoNJq3djJN4s2JbK8l5ARUdW3xSFEDI9yx/wpfsXoaqWnygP3PoFw2CM4i0EiJiyvrLFU
+# r3JLfDUFRy3EJ24RsqbigmEsgQOzTl3xfzeFPfxFoOhokSvTG88PQji1AYHz5kA7p6Zfaz
+# Ok11rJYIe7+e9B0lhku0AFwGyqlWQmS/MhIpnjHIk5tP4heHGSmzKQWJDbTskNWd6aq1G7
+# 6HWfDpX4HgoM8AAAALaG9sbWFuYkBhcmM=
+# -----END OPENSSH PRIVATE KEY-----
+#
diff --git a/doc/examples/cloud-config-ansible.txt b/doc/examples/cloud-config-ansible-pull.txt
index a3e7c273..62acc5a9 100644
--- a/doc/examples/cloud-config-ansible.txt
+++ b/doc/examples/cloud-config-ansible-pull.txt
@@ -1,5 +1,4 @@
#cloud-config
-version: v1
packages_update: true
packages_upgrade: true
@@ -7,10 +6,9 @@ packages_upgrade: true
# wish to manually install ansible to avoid multiple calls
# to your package manager
packages:
- - ansible
- git
ansible:
- install-method: pip
+ install_method: pip
pull:
url: "https://github.com/holmanb/vmboot.git"
- playbook-name: ubuntu.yml
+ playbook_name: ubuntu.yml
diff --git a/doc/examples/cloud-config-user-groups.txt b/doc/examples/cloud-config-user-groups.txt
index a66b0d75..13f42c49 100644
--- a/doc/examples/cloud-config-user-groups.txt
+++ b/doc/examples/cloud-config-user-groups.txt
@@ -1,6 +1,6 @@
#cloud-config
# Add groups to the system
-# The following example adds the ubuntu group with members 'root' and 'sys'
+# The following example adds the 'admingroup' group with members 'root' and 'sys'
# and the empty group cloud-users.
groups:
- admingroup: [root,sys]
diff --git a/doc/examples/cloud-config.txt b/doc/examples/cloud-config.txt
index 7f4ded8c..15d788f3 100644
--- a/doc/examples/cloud-config.txt
+++ b/doc/examples/cloud-config.txt
@@ -143,17 +143,6 @@ bootcmd:
- echo 192.168.1.130 us.archive.ubuntu.com > /etc/hosts
- [ cloud-init-per, once, mymkfs, mkfs, /dev/vdb ]
-# cloud_config_modules:
-# default:
-# cloud_config_modules:
-# - mounts
-# - ssh
-# - apt-update-upgrade
-# - puppet
-# - updates-check
-# - disable-ec2-metadata
-# - runcmd
-#
# This is an array of arrays or strings.
# if item is a string, then it is read as a module name
# if the item is an array it is of the form:
@@ -171,7 +160,6 @@ cloud_config_modules:
- grub-dpkg
- [ apt-update-upgrade, always ]
- puppet
- - updates-check
- disable-ec2-metadata
- runcmd
- byobu
diff --git a/doc/examples/include.txt b/doc/examples/include.txt
index 5bdc7991..77f0f79c 100644
--- a/doc/examples/include.txt
+++ b/doc/examples/include.txt
@@ -1,5 +1,5 @@
#include
-# entries are one url per line. comment lines beginning with '#' are allowed
-# urls are passed to urllib.urlopen, so the format must be supported there
-http://www.ubuntu.com/robots.txt
-http://www.w3schools.com/html/lastpage.htm
+# Entries are one URL per line. Comment lines beginning with '#' are allowed.
+# URLs are passed to urllib.urlopen, so the format must be supported there.
+https://raw.githubusercontent.com/canonical/cloud-init/403f70b930e3ce0f05b9b6f0e1a38d383d058b53/doc/examples/cloud-config-run-cmds.txt
+https://raw.githubusercontent.com/canonical/cloud-init/403f70b930e3ce0f05b9b6f0e1a38d383d058b53/doc/examples/cloud-config-boot-cmds.txt
diff --git a/doc/rtd/conf.py b/doc/rtd/conf.py
index 1e9539a9..a549a444 100644
--- a/doc/rtd/conf.py
+++ b/doc/rtd/conf.py
@@ -1,8 +1,6 @@
import os
import sys
-import sphinx_rtd_theme
-
from cloudinit import version
# If extensions (or modules to document with autodoc) are in another directory,
@@ -31,8 +29,7 @@ needs_sphinx = "4.0"
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
"m2r2",
- "sphinx_rtd_theme",
- "sphinx_panels",
+ "sphinx_design",
"sphinx.ext.autodoc",
"sphinx.ext.autosectionlabel",
"sphinx.ext.viewcode",
@@ -65,11 +62,15 @@ show_authors = False
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
-html_theme = "sphinx_rtd_theme"
+html_theme = "furo"
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
-html_logo = "static/logo.png"
+html_static_path = ["static"]
+html_theme_options = {
+ "light_logo": "logo.png",
+ "dark_logo": "logo-dark-mode.png",
+}
# Make sure the target is unique
autosectionlabel_prefix_document = True
diff --git a/doc/rtd/index.rst b/doc/rtd/index.rst
index 1516a5cb..159113f4 100644
--- a/doc/rtd/index.rst
+++ b/doc/rtd/index.rst
@@ -4,26 +4,36 @@ cloud-init Documentation
########################
Cloud-init is the *industry standard* multi-distribution method for
-cross-platform cloud instance initialization. It is supported across all
-major public cloud providers, provisioning systems for private cloud
-infrastructure, and bare-metal installations.
+cross-platform cloud instance initialization.
-On instance boot, cloud-init will identify the cloud it is running on, read
-any provided metadata from the cloud, and initialize the system accordingly.
-This may involve setting up the network and storage devices, configuring SSH
-access keys, and setting up many other aspects of a system. Later,
-cloud-init will parse and process any optional user or vendor data that was
-passed to the instance.
+During boot, cloud-init identifies the cloud it is running on and initializes
+the system accordingly. Cloud instances will automatically be provisioned
+during first boot with networking, storage, ssh keys, packages and various
+other system aspects already configured.
-Getting help
-************
+Cloud-init provides the necessary glue between launching a cloud instance and
+connecting to it so that it works as expected.
+
+For cloud users, cloud-init provides no-install first-boot configuration
+management of a cloud instance. For cloud providers, it provides instance setup
+that can be integrated with your cloud.
+
+Project and community
+*********************
+Cloud-init is an open source project that warmly welcomes community
+projects, contributions, suggestions, fixes and constructive feedback.
+
+* `Code of conduct <https://ubuntu.com/community/code-of-conduct>`_
+* Ask questions in IRC on ``#cloud-init`` on Libera
+* `Mailing list <https://launchpad.net/~cloud-init>`_
+* `Contribute on Github <https://github.com/canonical/cloud-init/blob/main/CONTRIBUTING.rst>`_
+* `Release schedule <https://discourse.ubuntu.com/search?q=cloud-init%20release%20schedule%20order%3Alatest>`_
Having trouble? We would like to help!
+**************************************
- Check out the :ref:`lxd_tutorial` if you're new to cloud-init
- Try the :ref:`FAQ` for answers to some common questions
-- Ask a question in the ``#cloud-init`` IRC channel on Libera
-- Join and ask questions on the `cloud-init mailing list <https://launchpad.net/~cloud-init>`_
- Find a bug? `Report bugs on Launchpad <https://bugs.launchpad.net/cloud-init/+filebug>`_
.. toctree::
@@ -41,12 +51,18 @@ Having trouble? We would like to help!
.. toctree::
:hidden:
:titlesonly:
+ :caption: Explanation
+
+ topics/configuration.rst
+
+.. toctree::
+ :hidden:
+ :titlesonly:
:caption: User Data
topics/format.rst
topics/examples.rst
topics/events.rst
- topics/modules.rst
topics/merging.rst
.. toctree::
@@ -62,6 +78,14 @@ Having trouble? We would like to help!
.. toctree::
:hidden:
:titlesonly:
+ :caption: Reference
+
+ topics/base_config_reference.rst
+ topics/modules.rst
+
+.. toctree::
+ :hidden:
+ :titlesonly:
:caption: Development
topics/contributing.rst
@@ -75,5 +99,3 @@ Having trouble? We would like to help!
topics/docs.rst
topics/testing.rst
topics/integration_tests.rst
-
-.. vi: textwidth=79
diff --git a/doc/rtd/static/logo-dark-mode.png b/doc/rtd/static/logo-dark-mode.png
new file mode 100644
index 00000000..90d63411
--- /dev/null
+++ b/doc/rtd/static/logo-dark-mode.png
Binary files differ
diff --git a/doc/rtd/topics/availability.rst b/doc/rtd/topics/availability.rst
index aa978237..bf4de71d 100644
--- a/doc/rtd/topics/availability.rst
+++ b/doc/rtd/topics/availability.rst
@@ -60,6 +60,7 @@ environments in the public cloud:
- UpCloud
- Vultr
- Zadara Edge Cloud Platform
+- 3DS Outscale
Additionally, cloud-init is supported on these private clouds:
diff --git a/doc/rtd/topics/base_config_reference.rst b/doc/rtd/topics/base_config_reference.rst
new file mode 100644
index 00000000..97abcff5
--- /dev/null
+++ b/doc/rtd/topics/base_config_reference.rst
@@ -0,0 +1,363 @@
+.. _base_config_reference:
+
+Base Configuration
+******************
+
+.. warning::
+ This documentation is intended for custom image creators, such as
+ distros and cloud providers, not
+ end users. Modifying the base configuration should not be necessary for
+ end users and can result in a system that may be unreachable or
+ may no longer boot.
+
+Cloud-init base config is primarily defined in two places:
+
+* **/etc/cloud/cloud.cfg**
+* **/etc/cloud/cloud.cfg.d/*.cfg**
+
+See the :ref:`configuration sources explanation<configuration>` for more
+information on how these files get sourced and combined with other
+configuration.
+
+Generation
+==========
+
+``cloud.cfg`` isn't present in any of cloud-init's source files. The
+`configuration is templated`_ and customized for each
+distribution supported by cloud-init.
+
+Base Configuration Keys
+=======================
+
+Module Keys
+-----------
+
+Modules are grouped into the following keys:
+
+* **cloud_init_modules**: Modules run during
+ :ref:`network<topics/boot:network>` timeframe.
+* **cloud_config_modules**: Modules run during
+ :ref:`config<topics/boot:config>` timeframe.
+* **cloud_final_modules**: Modules run during
+ :ref:`final<topics/boot:final>` timeframe.
+
+Each ``modules`` definition contains an array of strings, where each string
+is the name of the module. Each name is taken directly from the
+module filename,
+with the ``cc_`` prefix and ``.py`` suffix removed, and with
+``-`` and ``_`` being interchangeable.
+
+Alternatively, in place of the module name, an array of
+`<name>, <frequency>[, <args>]` args may be specified. See
+:ref:`the module creation guidelines<topics/module_creation:guidelines>` for
+more information on ``frequency`` and ``args``.
+
+.. note::
+ Most modules won't run at all if they're not triggered via a
+ respective user data key, so removing modules or changing the run
+ frequency is **not** a recommended way to reduce instance boot time.
+
+Examples
+^^^^^^^^
+
+To specify that only `cc_final_message.py`_ run during final
+timeframe:
+
+.. code-block:: yaml
+
+ cloud_final_modules:
+ - final_message
+
+To change the frequency from the default of ``ALWAYS`` to ``ONCE``:
+
+.. code-block:: yaml
+
+ cloud_final_modules:
+ - [final_message, once]
+
+To include default arguments to the module (that may be overridden by
+user data):
+
+.. code-block:: yaml
+
+ cloud_final_modules:
+ - [final_message, once, "my final message"]
+
+.. _datasource_base_config:
+
+Datasource Keys
+---------------
+
+Many datasources allow configuration of the datasource for use in
+querying the datasource for metadata using the ``datasource`` key.
+This configuration is datasource dependent and can be found under
+each datasource's respective :ref:`documentation<datasources>`. It will
+generally take the form of:
+
+.. code-block:: yaml
+
+ datasource:
+ <datasource_name>:
+ ...
+
+System Info Keys
+----------------
+These keys are used for setup of cloud-init itself, or the datasource
+or distro. Anything under the ``system_info`` cannot be overridden by
+vendor data, user data, or any other handlers or transforms. In some cases,
+there may be a ``system_info`` key used for the distro, while the same
+key is used outside of ``system_info`` for a userdata module.
+Both keys will be processed independently.
+
+* **system_info**: Top-level key
+
+ - **paths**: Definitions of common paths used by cloud-init
+
+ + **cloud_dir**: Defaults to ``/var/lib/cloud``
+ + **templates_dir**: Defaults to ``/etc/cloud/templates``
+
+ - **distro**: Name of distro being used.
+ - **default_user**: Defines the default user for the system using the same
+ user configuration as :ref:`topics/modules:users and groups`. Note that
+ this CAN be overridden if a ``users`` configuration
+ is specified without a ``- default`` entry.
+ - **ntp_client**: The default ntp client for the distro. Takes the same
+ form as ``ntp_client`` defined in :ref:`topics/modules:ntp`.
+ - **package_mirrors**: Defines the package mirror info for apt.
+ - **ssh_svcname**: The ssh service name. For most distros this will be
+ either ``ssh`` or ``sshd``.
+ - **network**: Top-level key for distro-specific networking configuration
+
+ + **renderers**: Prioritized list of networking configurations to try
+ on this system. The first valid entry found will be used.
+ Options are:
+
+ * **eni** - For /etc/network/interfaces
+ * **network-manager**
+ * **netplan**
+ * **networkd** - For systemd-networkd
+ * **freebsd**
+ * **netbsd**
+ * **openbsd**
+
+ + **activators**: Prioritized list of networking tools to try to activate
+ network on this system. The first valid entry found will be used.
+ Options are:
+
+ * **eni** - For ``ifup``/``ifdown``
+ * **netplan** - For ``netplan generate``/``netplan apply``
+ * **network-manager** - For ``nmcli connection load``/
+ ``nmcli connection up``
+ * **networkd** - For ``ip link set up``/``ip link set down``
+
+Logging Keys
+------------
+
+See :ref:`the logging explanation<logging>` for a comprehensive
+logging explanation. Note that cloud-init has a default logging
+definition that shouldn't need to be altered. It is defined on the
+instance at ``/etc/cloud/cloud.cfg.d/05_logging.cfg``.
+
+The logging keys used in the base configuration are as follows:
+
+**logcfg**: A standard python `fileConfig`_ formatted log configuration.
+This is the primary logging configuration key and will take precedence over
+**log_cfgs** or **log_basic** keys.
+
+**log_cfgs**: A list of logging configs in `fileConfig`_ format to apply
+when running cloud-init. Note that **log_cfgs** is used in
+``/etc/cloud.cfg.d/05_logging.cfg``.
+
+**log_basic**: Boolean value to determine if cloud-init should apply a
+basic default logging configuration if none has been provided. Defaults
+to ``true`` but only takes effect if **logcfg** or **log_cfgs** hasn't
+been defined.
+
+**output**: If and how to redirect stdout/stderr. Defined in
+``/etc/cloud.cfg.d/05_logging.cfg`` and explained in
+:ref:`the logging explanation<logging_command_output>`.
+
+**syslog_fix_perms**: Takes a list of ``<owner:group>`` strings and will set
+the owner of **def_log_file** accordingly.
+
+**def_log_file**: Only used in conjunction with **syslog_fix_perms**.
+Specifies the filename to be used for setting permissions. Defaults
+to ``/var/log/cloud-init.log``.
+
+Other Keys
+----------
+
+**network**: The :ref:`network_config` to be applied to this instance.
+
+**datasource_pkg_list**: Prioritized list of python packages to search when
+finding a datasource. Automatically includes ``cloudinit.sources``.
+
+**datasource_list**: Prioritized list of datasources that cloud-init will
+attempt to find on boot. By default, this will be defined in
+``/etc/cloud/cloud.cfg.d``. There are two primary use cases for modifying
+the datasource_list:
+
+1. Remove known invalid datasources. This may avoid long timeouts attempting
+ to detect datasources on any system without a systemd-generator hook
+ that invokes ds-identify.
+2. Override default datasource ordering to discover a different datasource
+ type than would typically be prioritized.
+
+If **datasource_list** has only a single entry (or a single entry + ``None``),
+:ref:`cloud-init's generator script<topics/boot:generator>`
+will automatically assume and use this datasource without
+attempting detection.
+
+**vendor_data**/**vendor_data2**: Allows the user to disable ``vendor_data``
+or ``vendor_data2`` along with providing a prefix for any executed scripts.
+
+Format is a dict with ``enabled`` and ``prefix`` keys:
+
+* **enabled**: Boolean indicating whether to enable or disable the vendor_data
+* **prefix**: A path to prepend to any vendor_data provided script
+
+Example
+=======
+
+On an ubuntu system, ``/etc/cloud/cloud.cfg`` should look similar to:
+
+.. code-block:: yaml
+
+ # The top level settings are used as module and base configuration.
+ # A set of users which may be applied and/or used by various modules
+ # when a 'default' entry is found it will reference the 'default_user'
+ # from the distro configuration specified below
+ users:
+ - default
+
+
+ # If this is set, 'root' will not be able to ssh in and they
+ # will get a message to login instead as the default $user
+ disable_root: true
+
+ # This will cause the set+update hostname module to not operate (if true)
+ preserve_hostname: false
+
+ # If you use datasource_list array, keep array items in a single line.
+ # If you use multi line array, ds-identify script won't read array items.
+ # Example datasource config
+ # datasource:
+ # Ec2:
+ # metadata_urls: [ 'blah.com' ]
+ # timeout: 5 # (defaults to 50 seconds)
+ # max_wait: 10 # (defaults to 120 seconds)
+
+ # The modules that run in the 'init' stage
+ cloud_init_modules:
+ - migrator
+ - seed_random
+ - bootcmd
+ - write-files
+ - growpart
+ - resizefs
+ - disk_setup
+ - mounts
+ - set_hostname
+ - update_hostname
+ - update_etc_hosts
+ - ca-certs
+ - rsyslog
+ - users-groups
+ - ssh
+
+ # The modules that run in the 'config' stage
+ cloud_config_modules:
+ - snap
+ - ssh-import-id
+ - keyboard
+ - locale
+ - set-passwords
+ - grub-dpkg
+ - apt-pipelining
+ - apt-configure
+ - ubuntu-advantage
+ - ntp
+ - timezone
+ - disable-ec2-metadata
+ - runcmd
+ - byobu
+
+ # The modules that run in the 'final' stage
+ cloud_final_modules:
+ - package-update-upgrade-install
+ - fan
+ - landscape
+ - lxd
+ - ubuntu-drivers
+ - write-files-deferred
+ - puppet
+ - chef
+ - mcollective
+ - salt-minion
+ - reset_rmc
+ - refresh_rmc_and_interface
+ - rightscale_userdata
+ - scripts-vendor
+ - scripts-per-once
+ - scripts-per-boot
+ - scripts-per-instance
+ - scripts-user
+ - ssh-authkey-fingerprints
+ - keys-to-console
+ - install-hotplug
+ - phone-home
+ - final-message
+ - power-state-change
+
+ # System and/or distro specific settings
+ # (not accessible to handlers/transforms)
+ system_info:
+ # This will affect which distro class gets used
+ distro: ubuntu
+ # Default user name + that default users groups (if added/used)
+ default_user:
+ name: ubuntu
+ lock_passwd: True
+ gecos: Ubuntu
+ groups: [adm, audio, cdrom, dialout, dip, floppy, lxd, netdev, plugdev, sudo, video]
+ sudo: ["ALL=(ALL) NOPASSWD:ALL"]
+ shell: /bin/bash
+ network:
+ renderers: ['netplan', 'eni', 'sysconfig']
+ # Automatically discover the best ntp_client
+ ntp_client: auto
+ # Other config here will be given to the distro class and/or path classes
+ paths:
+ cloud_dir: /var/lib/cloud/
+ templates_dir: /etc/cloud/templates/
+ package_mirrors:
+ - arches: [i386, amd64]
+ failsafe:
+ primary: http://archive.ubuntu.com/ubuntu
+ security: http://security.ubuntu.com/ubuntu
+ search:
+ primary:
+ - http://%(ec2_region)s.ec2.archive.ubuntu.com/ubuntu/
+ - http://%(availability_zone)s.clouds.archive.ubuntu.com/ubuntu/
+ - http://%(region)s.clouds.archive.ubuntu.com/ubuntu/
+ security: []
+ - arches: [arm64, armel, armhf]
+ failsafe:
+ primary: http://ports.ubuntu.com/ubuntu-ports
+ security: http://ports.ubuntu.com/ubuntu-ports
+ search:
+ primary:
+ - http://%(ec2_region)s.ec2.ports.ubuntu.com/ubuntu-ports/
+ - http://%(availability_zone)s.clouds.ports.ubuntu.com/ubuntu-ports/
+ - http://%(region)s.clouds.ports.ubuntu.com/ubuntu-ports/
+ security: []
+ - arches: [default]
+ failsafe:
+ primary: http://ports.ubuntu.com/ubuntu-ports
+ security: http://ports.ubuntu.com/ubuntu-ports
+ ssh_svcname: ssh
+
+
+.. _configuration is templated: https://github.com/canonical/cloud-init/blob/main/config/cloud.cfg.tmpl
+.. _cc_final_message.py: https://github.com/canonical/cloud-init/blob/main/cloudinit/config/cc_final_message.py
+.. _fileConfig: https://docs.python.org/3/library/logging.config.html#logging-config-fileformat
diff --git a/doc/rtd/topics/boot.rst b/doc/rtd/topics/boot.rst
index db6621a7..ba9bd40b 100644
--- a/doc/rtd/topics/boot.rst
+++ b/doc/rtd/topics/boot.rst
@@ -153,7 +153,7 @@ Things that run here include:
* user-defined scripts (i.e. shell scripts passed as user-data)
For scripts external to cloud-init looking to wait until cloud-init is
-finished, the ``cloud-init status`` subcommand can help block external
+finished, the ``cloud-init status --wait`` subcommand can help block external
scripts until cloud-init is done without having to write your own systemd
units dependency chains. See :ref:`cli_status` for more info.
diff --git a/doc/rtd/topics/cli.rst b/doc/rtd/topics/cli.rst
index 1a5f5e2d..bd7cac62 100644
--- a/doc/rtd/topics/cli.rst
+++ b/doc/rtd/topics/cli.rst
@@ -277,8 +277,9 @@ single
Attempt to run a single named cloud config module.
* ``--name``: the cloud-config module name to run
-* ``--frequency``: optionally override the declared module frequency
- with one of (always|once-per-instance|once)
+* ``--frequency``: module frequency for this run.
+ One of (always|once-per-instance|once)
+* ``--report``: enable reporting
The following example re-runs the cc_set_hostname module ignoring the module
default frequency of once-per-instance:
@@ -303,6 +304,8 @@ non-zero if an error is detected in cloud-init.
* ``--long``: detailed status information
* ``--wait``: block until cloud-init completes
+* ``--format [yaml|json|tabular]``: machine-readable JSON or YAML detailed
+ output
Below are examples of output when cloud-init is running, showing status and
the currently running modules, as well as when it is done.
@@ -323,8 +326,19 @@ the currently running modules, as well as when it is done.
$ cloud-init status --long
status: done
- time: Wed, 17 Jan 2018 20:41:59 +0000
+ boot_status_code: enabled-by-generator
+ last_update: Tue, 16 Aug 2022 19:12:58 +0000
detail:
DataSourceNoCloud [seed=/var/lib/cloud/seed/nocloud-net][dsmode=net]
+ $ cloud-init status --format=json
+ {
+ "boot_status_code": "enabled-by-generator",
+ "datasource": "nocloud",
+ "detail": "DataSourceNoCloud [seed=/var/lib/cloud/seed/nocloud-net][dsmode=net]",
+ "errors": [],
+ "last_update": "Tue, 16 Aug 2022 19:12:58 +0000",
+ "status": "done"
+ }
+
.. _More details on machine-id: https://www.freedesktop.org/software/systemd/man/machine-id.html
diff --git a/doc/rtd/topics/configuration.rst b/doc/rtd/topics/configuration.rst
new file mode 100644
index 00000000..14716f29
--- /dev/null
+++ b/doc/rtd/topics/configuration.rst
@@ -0,0 +1,79 @@
+.. _configuration:
+
+Configuration Sources
+*********************
+
+Internally, cloud-init builds a single configuration that is then referenced
+throughout the life of cloud-init. The configuration is built from multiple
+sources such that if a key is defined in multiple sources, the higher priority
+source overwrites the lower priority source.
+
+Base Configuration
+==================
+
+From lowest priority to highest, configuration sources are:
+
+* **Hardcoded config**: Config_ that lives within the source of cloud-init
+ and cannot be changed.
+* **Configuration directory**: Anything defined in ``/etc/cloud/cloud.cfg`` and
+ ``/etc/cloud/cloud.cfg.d``.
+* **Runtime config**: Anything defined in ``/run/cloud-init/cloud.cfg``.
+* **Kernel command line**: On the kernel command line, anything found between
+ ``cc:`` and ``end_cc`` will be interpreted as cloud-config user data.
+
+These four sources make up the base configuration.
+
+Vendor and User Data
+====================
+Added to the base configuration are:
+
+* **Vendor data**: :ref:`Data<vendordata>` provided by the datasource
+* **User data**: :ref:`Data<user_data_formats>` also provided by
+ the datasource
+
+These get fetched from the datasource and are defined at instance launch.
+
+.. note::
+ While much of what is defined in the base configuration can be overridden by
+ vendor data and user data, base configuration sources do not conform to
+ :ref:`#cloud-config<topics/format:Cloud Config Data>`
+
+Network Configuration
+=====================
+Network configuration happens independently from other cloud-init
+configuration. See :ref:`network configuration documentation<default_behavior>`
+for more information.
+
+Specifying Configuration
+==========================
+
+End users
+---------
+Pass :ref:`user data<user_data_formats>` to the cloud provider.
+Every platform supporting cloud-init will provide a method of supplying
+user data. If you're unsure how to do this, reference the documentation
+provided by the cloud platform you're on. Additionally, there may be
+related cloud-init documentation in the :ref:`datasource<datasources>`
+section.
+
+Once an instance has been initialized, the user data may not be edited.
+It is sourced directly from the cloud, so even if you find a local file
+that contains user data, it will likely be overwritten next boot.
+
+Distro Providers
+----------------
+Modify the base config. This often involves submitting a PR to modify
+the base `cloud.cfg template`_, which is used to customize
+`/etc/cloud/cloud.cfg` per distro. Additionally, a file can be added to
+``/etc/cloud/cloud.cfg.d`` to override a piece of the base configuration.
+
+Cloud Providers
+---------------
+Pass vendor data. This is the preferred method for clouds to provide
+their own customization. In some cases, it may make sense to modify the
+base config in the same manner as distro providers on cloud-supported
+images.
+
+
+.. _Config: https://github.com/canonical/cloud-init/blob/b861ea8a5e1fd0eb33096f60f54eeff42d80d3bd/cloudinit/settings.py#L22
+.. _cloud.cfg template: https://github.com/canonical/cloud-init/blob/main/config/cloud.cfg.tmpl
diff --git a/doc/rtd/topics/datasources.rst b/doc/rtd/topics/datasources.rst
index fc08bb7d..0867564d 100644
--- a/doc/rtd/topics/datasources.rst
+++ b/doc/rtd/topics/datasources.rst
@@ -52,6 +52,7 @@ The following is a list of documents for each supported datasource:
datasources/vmware.rst
datasources/vultr.rst
datasources/zstack.rst
+ datasources/nwcs.rst
Creation
========
diff --git a/doc/rtd/topics/datasources/exoscale.rst b/doc/rtd/topics/datasources/exoscale.rst
index 9074edc6..2d2e4544 100644
--- a/doc/rtd/topics/datasources/exoscale.rst
+++ b/doc/rtd/topics/datasources/exoscale.rst
@@ -38,8 +38,9 @@ Configuration
Users of this datasource are discouraged from changing the default settings
unless instructed to by Exoscale support.
-The following settings are available and can be set for the datasource in
-system configuration (in `/etc/cloud/cloud.cfg.d/`).
+The following settings are available and can be set for the
+:ref:`datasource base configuration<datasource_base_config>`
+(in `/etc/cloud/cloud.cfg.d/`).
The settings available are:
diff --git a/doc/rtd/topics/datasources/lxd.rst b/doc/rtd/topics/datasources/lxd.rst
index 99b42cfa..3b523d50 100644
--- a/doc/rtd/topics/datasources/lxd.rst
+++ b/doc/rtd/topics/datasources/lxd.rst
@@ -75,3 +75,36 @@ of static NoCloud seed files.
.. _LXD socket device: https://linuxcontainers.org/lxd/docs/master/dev-lxd
.. vi: textwidth=79
+
+Hotplug
+-------
+
+Network hotplug functionality is supported for the LXD datasource as described
+in the :ref:`events` documentation. As hotplug functionality relies on the
+cloud provided network metadata, the LXD datasource will only meaningfully
+react to a hotplug event if it has the configuration necessary to respond to
+the change has been provided to LXD. Practically, this means that
+even with hotplug enabled, **the default behavior for adding a new virtual
+NIC will result no change**.
+
+To update the configuration to be used by hotplug, first pass the network
+configuration via the ``cloud-init.network-config`` (or
+``user.network-config`` on older versions).
+
+For example, given an LXD instance named ``my-lxd`` with hotplug enabled and
+an LXD bridge named ``my-bridge``, the following will allow for additional
+DHCP configuration of ``eth1``:
+
+.. code-block:: shell-session
+
+ $ cat /tmp/cloud-network-config.yaml
+ version: 2
+ ethernets:
+ eth0:
+ dhcp4: true
+ eth1:
+ dhcp4: true
+
+ $ lxc config set my-lxd cloud-init.network-config="$(cat /tmp/cloud-network-config.yaml)"
+ $ lxc config device add my-lxd eth1 nic name=eth1 nictype=bridged parent=my-bridge
+ Device eth1 added to my-lxd
diff --git a/doc/rtd/topics/datasources/nocloud.rst b/doc/rtd/topics/datasources/nocloud.rst
index 6080d288..aedc0f58 100644
--- a/doc/rtd/topics/datasources/nocloud.rst
+++ b/doc/rtd/topics/datasources/nocloud.rst
@@ -68,8 +68,8 @@ sufficient disk by following the example below.
## 1) create user-data and meta-data files that will be used
## to modify image on first boot
- $ echo "instance-id: iid-local01\nlocal-hostname: cloudimg" > meta-data
- $ echo "#cloud-config\npassword: passw0rd\nchpasswd: { expire: False }\nssh_pwauth: True\n" > user-data
+ $ echo -e "instance-id: iid-local01\nlocal-hostname: cloudimg" > meta-data
+ $ echo -e "#cloud-config\npassword: passw0rd\nchpasswd: { expire: False }\nssh_pwauth: True\n" > user-data
## 2a) create a disk to attach with some user-data and meta-data
$ genisoimage -output seed.iso -volid cidata -joliet -rock user-data meta-data
diff --git a/doc/rtd/topics/datasources/nwcs.rst b/doc/rtd/topics/datasources/nwcs.rst
new file mode 100644
index 00000000..2b6543d3
--- /dev/null
+++ b/doc/rtd/topics/datasources/nwcs.rst
@@ -0,0 +1,30 @@
+.. _datasource_nwcs:
+
+NWCS
+=====
+
+The NWCS datasource retrieves basic configuration values from the locally
+accessible metadata service. All data is served over HTTP from the address
+169.254.169.254.
+
+Configuration
+-------------
+
+NWCS' datasource can be configured as follows:
+
+ datasource:
+ NWCS:
+ url: 'http://169.254.169.254'
+ retries: 3
+ timeout: 2
+ wait: 2
+
+- *url*: The URL used to acquire the metadata configuration from
+- *retries*: Determines the number of times to attempt to connect to the
+ metadata service
+- *timeout*: Determines the timeout in seconds to wait for a response from the
+ metadata service
+- *wait*: Determines the timeout in seconds to wait before retrying after
+ accessible failure
+
+.. vi: textwidth=79
diff --git a/doc/rtd/topics/datasources/openstack.rst b/doc/rtd/topics/datasources/openstack.rst
index f523c142..7818507a 100644
--- a/doc/rtd/topics/datasources/openstack.rst
+++ b/doc/rtd/topics/datasources/openstack.rst
@@ -19,9 +19,9 @@ checks the following environment attributes as a potential OpenStack platform:
* **/proc/1/environ**: Nova-lxd contains *product_name=OpenStack Nova*
* **DMI product_name**: Either *Openstack Nova* or *OpenStack Compute*
- * **DMI chassis_asset_tag** is *OpenTelekomCloud*, *SAP CCloud VM*,
- *OpenStack Nova* (since 19.2) or *OpenStack Compute* (since 19.2)
-
+ * **DMI chassis_asset_tag** is *HUAWEICLOUD*, *OpenTelekomCloud*,
+ *SAP CCloud VM*, *OpenStack Nova* (since 19.2) or
+ *OpenStack Compute* (since 19.2)
Configuration
-------------
diff --git a/doc/rtd/topics/datasources/smartos.rst b/doc/rtd/topics/datasources/smartos.rst
index 55604ffb..6fe45c73 100644
--- a/doc/rtd/topics/datasources/smartos.rst
+++ b/doc/rtd/topics/datasources/smartos.rst
@@ -125,7 +125,9 @@ are provided by SmartOS:
* user-data
* user-script
-This list can be changed through system config of variable 'no_base64_decode'.
+This list can be changed through
+:ref:`datasource base configuration<datasource_base_config>` variable
+'no_base64_decode'.
This means that user-script and user-data as well as other values can be
base64 encoded. Since Cloud-init can only guess as to whether or not something
diff --git a/doc/rtd/topics/examples.rst b/doc/rtd/topics/examples.rst
index 353e22d8..3f260947 100644
--- a/doc/rtd/topics/examples.rst
+++ b/doc/rtd/topics/examples.rst
@@ -41,10 +41,24 @@ Install and run `chef`_ recipes
:language: yaml
:linenos:
-Install and run `ansible`_
-==========================
+Install and run `ansible-pull`
+===============================
+
+.. literalinclude:: ../../examples/cloud-config-ansible-pull.txt
+ :language: yaml
+ :linenos:
+
+Configure Instance to be Managed by Ansible
+===========================================
+
+.. literalinclude:: ../../examples/cloud-config-ansible-managed.txt
+ :language: yaml
+ :linenos:
+
+Configure Instance to be An Ansible Controller
+==============================================
-.. literalinclude:: ../../examples/cloud-config-ansible.txt
+.. literalinclude:: ../../examples/cloud-config-ansible-controller.txt
:language: yaml
:linenos:
diff --git a/doc/rtd/topics/faq.rst b/doc/rtd/topics/faq.rst
index 2815f492..8dae49e9 100644
--- a/doc/rtd/topics/faq.rst
+++ b/doc/rtd/topics/faq.rst
@@ -47,6 +47,9 @@ Cloud-init config is provided in two places:
These files can define the modules that run during instance initialization,
the datasources to evaluate on boot, and other settings.
+See the :ref:`configuration sources explanation<configuration>` and
+:ref:`configuration reference<base_config_reference>` for more information.
+
Where are the data files?
=========================
@@ -160,7 +163,7 @@ To check if cloud-init is running still, run:
$ cloud-init status
-To wait for clous-init to complete, run:
+To wait for cloud-init to complete, run:
.. code-block:: shell-session
@@ -171,7 +174,7 @@ is not exhaustive, but attempts to enumerate potential causes:
External reasons:
-----------------
-- failed dependant services in the boot
+- failed dependent services in the boot
- bugs in the kernel or drivers
- bugs in external userspace tools that are called by cloud-init
@@ -182,6 +185,21 @@ Internal reasons:
- nonstandard configurations that disable timeouts or set extremely high
values ("never" is used in a loose sense here)
+Failing to Complete on Systemd:
+-------------------------------
+
+Cloud-init consists of multiple services on systemd. If a service
+that cloud-init depends on stalls, cloud-init will not continue.
+If reporting a bug related to cloud-init failing to complete on
+systemd, please make sure to include the following logs.
+
+.. code-block:: shell-session
+
+ $ systemd-analyze critical-chain cloud-init.target
+ $ journalctl --boot=-1
+ $ systemctl --failed
+
+
How can I make a module run on every boot?
==========================================
Modules have a default frequency that can be overridden. This is done
diff --git a/doc/rtd/topics/instancedata.rst b/doc/rtd/topics/instancedata.rst
index cf2f7e10..575dbf1b 100644
--- a/doc/rtd/topics/instancedata.rst
+++ b/doc/rtd/topics/instancedata.rst
@@ -10,68 +10,166 @@ Instance Metadata
kernel-cmdline.rst
-What is instance data?
+What is instance-data?
========================
-Instance data is the collection of all configuration data that cloud-init
-processes to configure the instance. This configuration typically
-comes from any number of sources:
-
-* cloud-provided metadata services (aka metadata)
-* custom config-drive attached to the instance
-* cloud-config seed files in the booted cloud image or distribution
-* vendordata provided from files or cloud metadata services
-* userdata provided at instance creation
-* :ref:`kernel_cmdline`
-
-Each cloud provider presents unique configuration metadata in different
-formats to the instance. Cloud-init provides a cache of any crawled metadata
-as well as a versioned set of standardized instance data keys which it makes
-available on all platforms.
-
-Cloud-init produces a simple json object in
-``/run/cloud-init/instance-data.json`` which represents standardized and
-versioned representation of the metadata it consumes during initial boot. The
-intent is to provide the following benefits to users or scripts on any system
-deployed with cloud-init:
-
-* simple static object to query to obtain a instance's metadata
-* speed: avoid costly network transactions for metadata that is already cached
- on the filesystem
-* reduce need to recrawl metadata services for static metadata that is already
- cached
-* leverage cloud-init's best practices for crawling cloud-metadata services
-* avoid rolling unique metadata crawlers on each cloud platform to get
- metadata configuration values
-
-Cloud-init stores any instance data processed in the following files:
+Each cloud provider presents unique
+configuration metadata to a launched cloud instance. Cloud-init
+crawls this metadata and then caches and exposes this information
+as a standarized and versioned JSON object known as instance-data.
+This instance-data may then be
+queried or later used by cloud-init in templated configuration and scripts.
+An example of a small subset of instance-data on a launched EC2 instance:
+
+.. code-block:: json
+
+ {
+ "v1": {
+ "cloud_name": "aws",
+ "distro": "ubuntu",
+ "distro_release": "jammy",
+ "distro_version": "22.04",
+ "instance_id": "i-06b5687b4d7b8595d",
+ "machine": "x86_64",
+ "platform": "ec2",
+ "python_version": "3.10.4",
+ "region": "us-east-2",
+ "variant": "ubuntu"
+ }
+ }
+
+
+Discovery
+=========
+
+One way to easily explore what instance-data variables are available on
+your machine is to use the :ref:`cloud-init query<cli_query>` tool.
+Warnings or exceptions will be raised on invalid instance-data keys,
+paths or invalid syntax.
+
+The **query** command also publishes ``userdata`` and ``vendordata`` keys to
+the root user which will contain the decoded user and vendor data provided to
+this instance. Non-root users referencing userdata or vendordata keys will
+see only redacted values.
+
+.. note::
+ To save time designing a user-data template for a specific cloud's
+ instance-data.json, use the 'render' cloud-init command on an
+ instance booted on your favorite cloud. See :ref:`cli_devel` for more
+ information.
+
+Using instance-data
+===================
+
+Instance-data can be used in:
+
+* :ref:`User-data scripts<topics/format:User-Data Script>`
+* :ref:`Cloud-config data<topics/format:Cloud Config Data>`
+* :ref:`Base configuration<configuration>`
+* Command line interface via **cloud-init query** or
+ **cloud-init devel render**
+
+The aforementioned configuration sources support jinja template rendering.
+When the first line of the provided configuration begins with
+**## template: jinja**, cloud-init will use jinja to render that file.
+Any instance-data variables are surfaced as jinja template
+variables.
+
+.. note::
+ Trying to reference jinja variables that don't exist in instance-data
+ will result in warnings in ``/var/log/cloud-init.log`` and the following
+ string in your rendered user-data:
+ ``CI_MISSING_JINJA_VAR/<your_varname>``.
+
+Sensitive data such as user passwords may be contained in
+instance-data. Cloud-init separates this sensitive data such that
+is it only readable by root. In the case that a non-root user attempts
+to read sensitive instance-data, they will receive redacted data or same
+warnings and text that occur if a variable does not exist.
+
+Example Usage
+-------------
+
+Cloud config with instance-data
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. code-block:: yaml
+
+ ## template: jinja
+ #cloud-config
+ runcmd:
+ - echo 'EC2 public hostname allocated to instance: {{
+ ds.meta_data.public_hostname }}' > /tmp/instance_metadata
+ - echo 'EC2 availability zone: {{ v1.availability_zone }}' >>
+ /tmp/instance_metadata
+ - curl -X POST -d '{"hostname": "{{ds.meta_data.public_hostname }}",
+ "availability-zone": "{{ v1.availability_zone }}"}'
+ https://example.com
+
+User-data script with instance-data
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. code-block:: jinja
+
+ ## template: jinja
+ #!/bin/bash
+ {% if v1.region == 'us-east-2' -%}
+ echo 'Installing custom proxies for {{ v1.region }}
+ sudo apt-get install my-xtra-fast-stack
+ {%- endif %}
+ ...
+
+CLI discovery of instance-data
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. code-block:: shell-session
+
+ # List all instance-data keys and values as root user
+ % sudo cloud-init query --all
+ {...}
+
+ # List all top-level instance-data keys available
+ % cloud-init query --list-keys
+
+ # Introspect nested keys on an object
+ % cloud-init query -f "{{ds.keys()}}"
+ dict_keys(['meta_data', '_doc'])
+
+ # Failure to reference valid dot-delimited key path on a known top-level key
+ % cloud-init query v1.not_here
+ ERROR: instance-data 'v1' has no 'not_here'
+
+ # Test expected value using valid instance-data key path
+ % cloud-init query -f "My AMI: {{ds.meta_data.ami_id}}"
+ My AMI: ami-0fecc35d3c8ba8d60
+
+ # The --format command renders jinja templates, this can also be used
+ # to develop and test jinja template constructs
+ % cat > test-templating.yaml <<EOF
+ {% for val in ds.meta_data.keys() %}
+ - {{ val }}
+ {% endfor %}
+ EOF
+ % cloud-init query --format="$( cat test-templating.yaml )"
+ - instance_id
+ - dsmode
+ - local_hostname
+
+Reference
+=========
+
+Storage Locations
+-----------------
* ``/run/cloud-init/instance-data.json``: world-readable json containing
standardized keys, sensitive keys redacted
* ``/run/cloud-init/instance-data-sensitive.json``: root-readable unredacted
json blob
-* ``/var/lib/cloud/instance/user-data.txt``: root-readable sensitive raw
- userdata
-* ``/var/lib/cloud/instance/vendor-data.txt``: root-readable sensitive raw
- vendordata
-
-Cloud-init redacts any security sensitive content from instance-data.json,
-stores ``/run/cloud-init/instance-data.json`` as a world-readable json file.
-Because user-data and vendor-data can contain passwords both of these files
-are readonly for *root* as well. The *root* user can also read
-``/run/cloud-init/instance-data-sensitive.json`` which is all instance data
-from instance-data.json as well as unredacted sensitive content.
-
Format of instance-data.json
-============================
+----------------------------
-The instance-data.json and instance-data-sensitive.json files are well-formed
-JSON and record the set of keys and values for any metadata processed by
-cloud-init. Cloud-init standardizes the format for this content so that it
-can be generalized across different cloud platforms.
-
-There are three basic top-level keys:
+Top-level keys:
* **base64_encoded_keys**: A list of forward-slash delimited key paths into
the instance-data.json object whose value is base64encoded for json
@@ -83,10 +181,10 @@ There are three basic top-level keys:
'security sensitive'. Only the keys listed here will be redacted from
instance-data.json for non-root users.
-* **merged_cfg**: Merged cloud-init 'system_config' from `/etc/cloud/cloud.cfg`
- and `/etc/cloud/cloud-cfg.d`. Values under this key could contain sensitive
- information such as passwords, so it is included in the **sensitive-keys**
- list which is only readable by root.
+* **merged_cfg**: Merged cloud-init :ref:`base_config_reference` from
+ `/etc/cloud/cloud.cfg` and `/etc/cloud/cloud-cfg.d`. Values under this key
+ could contain sensitive information such as passwords, so it is included in
+ the **sensitive-keys** list which is only readable by root.
* **ds**: Datasource-specific metadata crawled for the specific cloud
platform. It should closely represent the structure of the cloud metadata
@@ -103,6 +201,20 @@ There are three basic top-level keys:
and format and will be carried forward even if cloud-init introduces a new
version of standardized keys with **v2**.
+To cut down on keystrokes on the command line, cloud-init also provides
+top-level key aliases for any standardized ``v#`` keys present. The preceding
+``v1`` is not required of ``v1.var_name`` These aliases will represent the
+value of the highest versioned standard key. For example, ``cloud_name``
+value will be ``v2.cloud_name`` if both ``v1`` and ``v2`` keys are present in
+instance-data.json.
+
+cloud-init also provides jinja-safe key aliases for any instance-data
+keys which contain jinja operator characters such as +, -, ., /, etc. Any
+jinja operator will be replaced with underscores in the jinja-safe key
+alias. This allows for cloud-init templates to use aliased variable
+references which allow for jinja's dot-notation reference such as
+``{{ ds.v1_0.my_safe_key }}`` instead of ``{{ ds["v1.0"]["my/safe-key"] }}``.
+
The standardized keys present:
v1._beta_keys
@@ -263,7 +375,7 @@ EC2 instance:
"availability_zone": "us-east-1b",
"base64_encoded_keys": [],
"merged_cfg": {
- "_doc": "Merged cloud-init system config from /etc/cloud/cloud.cfg and /etc/cloud/cloud.cfg.d/",
+ "_doc": "Merged cloud-init base config from /etc/cloud/cloud.cfg and /etc/cloud/cloud.cfg.d/",
"_log": [
"[loggers]\nkeys=root,cloudinit\n\n[handlers]\nkeys=consoleHandler,cloudLogHandler\n\n[formatters]\nkeys=simpleFormatter,arg0Formatter\n\n[logger_root]\nlevel=DEBUG\nhandlers=consoleHandler,cloudLogHandler\n\n[logger_cloudinit]\nlevel=DEBUG\nqualname=cloudinit\nhandlers=\npropagate=1\n\n[handler_consoleHandler]\nclass=StreamHandler\nlevel=WARNING\nformatter=arg0Formatter\nargs=(sys.stderr,)\n\n[formatter_arg0Formatter]\nformat=%(asctime)s - %(filename)s[%(levelname)s]: %(message)s\n\n[formatter_simpleFormatter]\nformat=[CLOUDINIT] %(filename)s[%(levelname)s]: %(message)s\n",
"[handler_cloudLogHandler]\nclass=FileHandler\nlevel=DEBUG\nformatter=arg0Formatter\nargs=('/var/log/cloud-init.log',)\n",
@@ -510,155 +622,3 @@ EC2 instance:
"variant": "ubuntu",
"vendordata": ""
}
-
-
-Using instance-data
-===================
-
-As of cloud-init v. 18.4, any instance-data can be used in:
-
-* User-data scripts
-* Cloud config data
-* Command line interface via **cloud-init query** or
- **cloud-init devel render**
-
-This means that any variable present in
-``/run/cloud-init/instance-data-sensitive.json`` can be used,
-unless a non-root user is using the command line interface.
-In the non-root user case,
-``/run/cloud-init/instance-data.json`` will be used instead.
-
-Many clouds allow users to provide user-data to an instance at
-the time the instance is launched. Cloud-init supports a number of
-:ref:`user_data_formats`.
-
-Both user-data scripts and **#cloud-config** data support jinja template
-rendering.
-When the first line of the provided user-data begins with,
-**## template: jinja** cloud-init will use jinja to render that file.
-Any instance-data-sensitive.json variables are surfaced as jinja template
-variables because cloud-config modules are run as 'root' user.
-
-.. note::
- cloud-init also provides jinja-safe key aliases for any instance-data.json
- keys which contain jinja operator characters such as +, -, ., /, etc. Any
- jinja operator will be replaced with underscores in the jinja-safe key
- alias. This allows for cloud-init templates to use aliased variable
- references which allow for jinja's dot-notation reference such as
- ``{{ ds.v1_0.my_safe_key }}`` instead of ``{{ ds["v1.0"]["my/safe-key"] }}``.
-
-Below are some other examples of using jinja templates in user-data:
-
-* Cloud config calling home with the ec2 public hostname and availability-zone
-
-.. code-block:: yaml
-
- ## template: jinja
- #cloud-config
- runcmd:
- - echo 'EC2 public hostname allocated to instance: {{
- ds.meta_data.public_hostname }}' > /tmp/instance_metadata
- - echo 'EC2 availability zone: {{ v1.availability_zone }}' >>
- /tmp/instance_metadata
- - curl -X POST -d '{"hostname": "{{ds.meta_data.public_hostname }}",
- "availability-zone": "{{ v1.availability_zone }}"}'
- https://example.com
-
-* Custom user-data script performing different operations based on region
-
-.. code-block:: jinja
-
- ## template: jinja
- #!/bin/bash
- {% if v1.region == 'us-east-2' -%}
- echo 'Installing custom proxies for {{ v1.region }}
- sudo apt-get install my-xtra-fast-stack
- {%- endif %}
- ...
-
-One way to easily explore what Jinja variables are available on your machine
-is to use the ``cloud-init query --format`` (-f) commandline option which will
-render any Jinja syntax you use. Warnings or exceptions will be raised on
-invalid instance-data keys, paths or invalid syntax.
-
-.. code-block:: shell-session
-
- # List all instance-data keys and values as root user
- % sudo cloud-init query --all
- {...}
-
- # Introspect nested keys on an object
- % cloud-init query -f "{{ds.keys()}}"
- dict_keys(['meta_data', '_doc'])
-
- # Test your Jinja rendering syntax on the command-line directly
-
- # Failure to reference valid top-level instance-data key
- % cloud-init query -f "{{invalid.instance-data.key}}"
- WARNING: Ignoring jinja template for query commandline: 'invalid' is undefined
-
- # Failure to reference valid dot-delimited key path on a known top-level key
- % cloud-init query -f "{{v1.not_here}}"
- WARNING: Could not render jinja template variables in file 'query commandline': 'not_here'
- CI_MISSING_JINJA_VAR/not_here
-
- # Test expected value using valid instance-data key path
- % cloud-init query -f "My AMI: {{ds.meta_data.ami_id}}"
- My AMI: ami-0fecc35d3c8ba8d60
-
-.. note::
- Trying to reference jinja variables that don't exist in
- instance-data will result in warnings in ``/var/log/cloud-init.log``
- and the following string in your rendered user-data:
- ``CI_MISSING_JINJA_VAR/<your_varname>``.
-
-Cloud-init also surfaces a command line tool **cloud-init query** which can
-assist developers or scripts with obtaining instance metadata easily. See
-:ref:`cli_query` for more information.
-
-To cut down on keystrokes on the command line, cloud-init also provides
-top-level key aliases for any standardized ``v#`` keys present. The preceding
-``v1`` is not required of ``v1.var_name`` These aliases will represent the
-value of the highest versioned standard key. For example, ``cloud_name``
-value will be ``v2.cloud_name`` if both ``v1`` and ``v2`` keys are present in
-instance-data.json.
-The **query** command also publishes ``userdata`` and ``vendordata`` keys to
-the root user which will contain the decoded user and vendor data provided to
-this instance. Non-root users referencing userdata or vendordata keys will
-see only redacted values.
-
-.. code-block:: shell-session
-
- # List all top-level instance-data keys available
- % cloud-init query --list-keys
-
- # Find your EC2 ami-id
- % cloud-init query ds.metadata.ami_id
-
- # Format your cloud_name and region using jinja template syntax
- % cloud-init query --format 'cloud: {{ v1.cloud_name }} myregion: {{
- % v1.region }}'
-
- # Locally test that your template userdata provided to the vm was rendered as
- # intended.
- % cloud-init query --format "$(sudo cloud-init query userdata)"
-
- # The --format command renders jinja templates, this can also be used
- # to develop and test jinja template constructs
- % cat > test-templating.yaml <<EOF
- {% for val in ds.meta_data.keys() %}
- - {{ val }}
- {% endfor %}
- EOF
- % cloud-init query --format="$( cat test-templating.yaml )"
- - instance_id
- - dsmode
- - local_hostname
-
-.. note::
- To save time designing a user-data template for a specific cloud's
- instance-data.json, use the 'render' cloud-init command on an
- instance booted on your favorite cloud. See :ref:`cli_devel` for more
- information.
-
-.. vi: textwidth=79
diff --git a/doc/rtd/topics/logging.rst b/doc/rtd/topics/logging.rst
index f72b77c1..4d0a14ca 100644
--- a/doc/rtd/topics/logging.rst
+++ b/doc/rtd/topics/logging.rst
@@ -1,3 +1,5 @@
+.. _logging:
+
*******
Logging
*******
@@ -98,8 +100,10 @@ the default format string ``%(message)s``::
For additional information about configuring python's logging module, please
see the documentation for `python logging config`_.
+.. _logging_command_output:
+
Command Output
---------------
+==============
Cloud-init can redirect its stdout and stderr based on config given under the
``output`` config key. The output of any commands run by cloud-init and any
user or vendor scripts provided will also be included here. The ``output`` key
diff --git a/doc/rtd/topics/merging.rst b/doc/rtd/topics/merging.rst
index 204719eb..a1422fe3 100644
--- a/doc/rtd/topics/merging.rst
+++ b/doc/rtd/topics/merging.rst
@@ -114,7 +114,7 @@ An example of one of these merging classes is the following:
.. code-block:: python
- class Merger(object):
+ class Merger:
def __init__(self, merger, opts):
self._merger = merger
self._overwrite = 'overwrite' in opts
diff --git a/doc/rtd/topics/module_creation.rst b/doc/rtd/topics/module_creation.rst
index 12cfdb00..56cadec4 100644
--- a/doc/rtd/topics/module_creation.rst
+++ b/doc/rtd/topics/module_creation.rst
@@ -17,6 +17,7 @@ Example
from logging import Logger
from cloudinit.cloud import Cloud
+ from cloudinit.config import Config
from cloudinit.config.schema import MetaSchema, get_meta_doc
from cloudinit.distros import ALL_DISTROS
from cloudinit.settings import PER_INSTANCE
@@ -44,7 +45,9 @@ Example
__doc__ = get_meta_doc(meta)
- def handle(name: str, cfg: dict, cloud: Cloud, log: Logger, args: list):
+ def handle(
+ name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
+ ) -> None:
log.debug(f"Hi from module {name}")
@@ -63,7 +66,9 @@ Guidelines
instance types.
* ``log``: A logger object that can be used to log messages.
* ``args``: An argument list. This is usually empty and is only populated
- if the module is called independently from the command line.
+ if the module is called independently from the command line or if the
+ module definition in ``/etc/cloud/cloud.cfg[.d]`` has been modified
+ to pass arguments to this module.
* If your module introduces any new cloud-config keys, you must provide a
schema definition in `cloud-init-schema.json`_.
diff --git a/doc/rtd/topics/modules.rst b/doc/rtd/topics/modules.rst
index cbe0f5d7..b0ad83e4 100644
--- a/doc/rtd/topics/modules.rst
+++ b/doc/rtd/topics/modules.rst
@@ -3,7 +3,6 @@
Module Reference
****************
-.. contents:: Table of Contents
.. automodule:: cloudinit.config.cc_ansible
.. automodule:: cloudinit.config.cc_apk_configure
diff --git a/doc/rtd/topics/network-config-format-v1.rst b/doc/rtd/topics/network-config-format-v1.rst
index 68a9cefa..a9dd31af 100644
--- a/doc/rtd/topics/network-config-format-v1.rst
+++ b/doc/rtd/topics/network-config-format-v1.rst
@@ -10,7 +10,8 @@ creation (bonds, bridges, vlans) routes and DNS configuration.
Required elements of a Network Config Version 1 are ``config`` and
``version``.
-Cloud-init will read this format from system config.
+Cloud-init will read this format from :ref:`base_config_reference`.
+
For example the following could be present in
``/etc/cloud/cloud.cfg.d/custom-networking.cfg``:
diff --git a/doc/rtd/topics/network-config-format-v2.rst b/doc/rtd/topics/network-config-format-v2.rst
index 3080c6d4..53274417 100644
--- a/doc/rtd/topics/network-config-format-v2.rst
+++ b/doc/rtd/topics/network-config-format-v2.rst
@@ -8,6 +8,8 @@ version 2 format defined for the `netplan`_ tool. Cloud-init supports
both reading and writing of Version 2; the latter support requires a
distro with `netplan`_ present.
+.. _Netplan Passthrough:
+
Netplan Passthrough
-------------------
@@ -28,7 +30,8 @@ The ``network`` key has at least two required elements. First
it must include ``version: 2`` and one or more of possible device
``types``.
-Cloud-init will read this format from system config.
+Cloud-init will read this format from :ref:`base_config_reference`.
+
For example the following could be present in
``/etc/cloud/cloud.cfg.d/custom-networking.cfg``::
@@ -177,6 +180,48 @@ Enable DHCP for IPv4. Off by default.
Enable DHCP for IPv6. Off by default.
+**dhcp4-overrides** and **dhcp6-overrides**: *<(mapping)>*
+
+DHCP behavior overrides. Overrides will only have an effect if
+the corresponding DHCP type is enabled. Refer to `netplan#dhcp-overrides`_
+for more documentation.
+
+.. note::
+
+ These properties are only consumed on ``netplan`` and ``networkd``
+ renderers.
+
+The ``netplan`` renderer :ref:`passes through <Netplan Passthrough>`
+everything and the ``networkd`` renderer consumes the following sub-properties:
+
+* ``hostname`` *
+* ``route-metric`` *
+* ``send-hostname`` *
+* ``use-dns``
+* ``use-domains``
+* ``use-hostname``
+* ``use-mtu`` *
+* ``use-ntp``
+* ``use-routes`` *
+
+.. note::
+
+ Sub-properties marked with a ``*`` are unsupported for ``dhcp6-overrides``
+ when used with the ``networkd`` renderer.
+
+Example: ::
+
+ dhcp4-overrides:
+ hostname: hal
+ route-metric: 1100
+ send-hostname: false
+ use-dns: false
+ use-domains: false
+ use-hostname: false
+ use-mtu: false
+ use-ntp: false
+ use-routes: false
+
**addresses**: *<(sequence of scalars)>*
Add static addresses to the interface in addition to the ones received
@@ -527,4 +572,5 @@ This is a complex example which shows most available features: ::
dhcp4: yes
.. _netplan: https://netplan.io
+.. _netplan#dhcp-overrides: https://netplan.io/reference#dhcp-overrides
.. vi: textwidth=79
diff --git a/doc/rtd/topics/network-config.rst b/doc/rtd/topics/network-config.rst
index 3e48555f..b6d7a9be 100644
--- a/doc/rtd/topics/network-config.rst
+++ b/doc/rtd/topics/network-config.rst
@@ -1,3 +1,5 @@
+.. _network_config:
+
*********************
Network Configuration
*********************
diff --git a/doc/rtd/topics/vendordata.rst b/doc/rtd/topics/vendordata.rst
index e659c941..6ef6b74b 100644
--- a/doc/rtd/topics/vendordata.rst
+++ b/doc/rtd/topics/vendordata.rst
@@ -1,3 +1,5 @@
+.. _vendordata:
+
***********
Vendor Data
***********
diff --git a/integration-requirements.txt b/integration-requirements.txt
index 95503012..1056f0e2 100644
--- a/integration-requirements.txt
+++ b/integration-requirements.txt
@@ -1,5 +1,5 @@
# PyPI requirements for cloud-init integration testing
# https://cloudinit.readthedocs.io/en/latest/topics/integration_tests.html
#
-pycloudlib @ git+https://github.com/canonical/pycloudlib.git@7f5bf6e67cf79f31770c456196b2ce695c6ce165
+pycloudlib @ git+https://github.com/canonical/pycloudlib.git@d76228e24d400937ba99cdb516460dd757dd3348
pytest
diff --git a/packages/README.md b/packages/README.md
new file mode 100644
index 00000000..61681a2b
--- /dev/null
+++ b/packages/README.md
@@ -0,0 +1,10 @@
+# packages
+
+Package builders under this folder are development only templates. Do not rely on them.
+
+Downstream packaging resources:
+
+* [debian](https://packages.debian.org/sid/cloud-init)
+* [fedora](https://src.fedoraproject.org/rpms/cloud-init)
+* [opensuse](https://build.opensuse.org/package/show/Cloud:Tools/cloud-init)
+* [ubuntu](https://launchpad.net/cloud-init)
diff --git a/pyproject.toml b/pyproject.toml
index 2ee26121..d566b4a2 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -17,17 +17,18 @@ exclude=[]
module = [
"apport.*",
"BaseHTTPServer",
- "configobj",
"cloudinit.feature_overrides",
+ "configobj",
"debconf",
- "httpretty",
"httplib",
+ "httpretty",
"jsonpatch",
"netifaces",
"paramiko.*",
"pycloudlib.*",
"responses",
"serial",
- "tests.integration_tests.user_settings"
+ "tests.integration_tests.user_settings",
+ "uaclient.*"
]
ignore_missing_imports = true
diff --git a/templates/chrony.conf.cos.tmpl b/templates/chrony.conf.cos.tmpl
new file mode 100644
index 00000000..fa115f86
--- /dev/null
+++ b/templates/chrony.conf.cos.tmpl
@@ -0,0 +1,26 @@
+## template:jinja
+# Welcome to the chrony configuration file. See chrony.conf(5) for more
+# information about usuable directives.
+
+{% if pools %}# pools
+{% endif %}
+{% for pool in pools -%}
+pool {{pool}} iburst
+{% endfor %}
+{%- if servers %}# servers
+{% endif %}
+{% for server in servers -%}
+server {{server}} iburst
+{% endfor %}
+
+# This directive specify the file into which chronyd will store the rate
+# information.
+driftfile /var/lib/chrony/chrony.drift
+
+# This directive enables kernel synchronisation (every 11 minutes) of the
+# real-time clock. Note that it can’t be used along with the 'rtcfile' directive.
+rtcsync
+
+# Step the system clock instead of slewing it if the adjustment is larger than
+# one second, but only in the first three clock updates.
+makestep 1 3
diff --git a/templates/chrony.conf.freebsd.tmpl b/templates/chrony.conf.freebsd.tmpl
new file mode 100644
index 00000000..1e4155f3
--- /dev/null
+++ b/templates/chrony.conf.freebsd.tmpl
@@ -0,0 +1,347 @@
+## template:jinja
+#######################################################################
+#
+# This is an example chrony configuration file. You should copy it to
+# /usr/local/etc/chrony.conf after uncommenting and editing the options that you
+# want to enable. The more obscure options are not included. Refer
+# to the documentation for these.
+#
+#######################################################################
+### COMMENTS
+# Any of the following lines are comments (you have a choice of
+# comment start character):
+# a comment
+% a comment
+! a comment
+; a comment
+#
+# Below, the '!' form is used for lines that you might want to
+# uncomment and edit to make your own chrony.conf file.
+#
+#######################################################################
+#######################################################################
+### SPECIFY YOUR NTP SERVERS
+# Most computers using chrony will send measurement requests to one or
+# more 'NTP servers'. You will probably find that your Internet Service
+# Provider or company have one or more NTP servers that you can specify.
+# Failing that, there are a lot of public NTP servers. There is a list
+# you can access at http://support.ntp.org/bin/view/Servers/WebHome or
+# you can use servers from the pool.ntp.org project.
+
+{%- if servers %}# servers
+{% endif %}
+{% for server in servers -%}
+server {{server}} iburst
+{% endfor %}
+
+# This is a reasonable default setting to have on in typical cases for
+# a workstation with a full-time internet connection:
+{% if pools %}# pools
+{% endif %}
+{% for pool in pools -%}
+pool {{pool}} iburst
+{% endfor %}
+
+#######################################################################
+### AVOIDING POTENTIALLY BOGUS CHANGES TO YOUR CLOCK
+#
+# To avoid changes being made to your computer's gain/loss compensation
+# when the measurement history is too erratic, you might want to enable
+# one of the following lines. The first seems good with servers on the
+# Internet, the second seems OK for a LAN environment.
+
+! maxupdateskew 100
+! maxupdateskew 5
+
+# If you want to increase the minimum number of selectable sources
+# required to update the system clock in order to make the
+# synchronisation more reliable, uncomment (and edit) the following
+# line.
+
+! minsources 2
+
+# If your computer has a good stable clock (e.g. it is not a virtual
+# machine), you might also want to reduce the maximum assumed drift
+# (frequency error) of the clock (the value is specified in ppm).
+
+! maxdrift 100
+
+# By default, chronyd allows synchronisation to an unauthenticated NTP
+# source (i.e. specified without the nts and key options) if it agrees with
+# a majority of authenticated NTP sources, or if no authenticated source is
+# specified. If you don't want chronyd to ever synchronise to an
+# unauthenticated NTP source, uncomment the first from the following lines.
+# If you don't want to synchronise to an unauthenticated NTP source only
+# when an authenticated source is specified, uncomment the second line.
+# If you want chronyd to ignore authentication in the source selection,
+# uncomment the third line.
+
+! authselectmode require
+! authselectmode prefer
+! authselectmode ignore
+
+#######################################################################
+### FILENAMES ETC
+# Chrony likes to keep information about your computer's clock in files.
+# The 'driftfile' stores the computer's clock gain/loss rate in parts
+# per million. When chronyd starts, the system clock can be tuned
+# immediately so that it doesn't gain or lose any more time. You
+# generally want this, so it is uncommented.
+
+driftfile /var/db/chrony/drift
+
+# If you want to enable NTP authentication with symmetric keys, you will need
+# to uncomment the following line and edit the file to set up the keys.
+
+! keyfile /usr/local/etc/chrony.keys
+
+# If you specify an NTP server with the nts option to enable authentication
+# with the Network Time Security (NTS) mechanism, or enable server NTS with
+# the ntsservercert and ntsserverkey directives below, the following line will
+# allow the client/server to save the NTS keys and cookies in order to reduce
+# the number of key establishments (NTS-KE sessions).
+
+ntsdumpdir /var/db/chrony
+
+# If chronyd is configured to act as an NTP server and you want to enable NTS
+# for its clients, you will need a TLS certificate and private key. Uncomment
+# and edit the following lines to specify the locations of the certificate and
+# key.
+
+! ntsservercert /etc/.../foo.example.net.crt
+! ntsserverkey /etc/.../foo.example.net.key
+
+# chronyd can save the measurement history for the servers to files when
+# it exits. This is useful in 2 situations:
+#
+# 1. If you stop chronyd and restart it with the '-r' option (e.g. after
+# an upgrade), the old measurements will still be relevant when chronyd
+# is restarted. This will reduce the time needed to get accurate
+# gain/loss measurements.
+#
+# 2. On Linux, if you use the RTC support and start chronyd with
+# '-r -s' on bootup, measurements from the last boot will still be
+# useful (the real time clock is used to 'flywheel' chronyd between
+# boots).
+#
+# Uncomment the following line to use this.
+
+! dumpdir /var/db/chrony
+
+# chronyd writes its process ID to a file. If you try to start a second
+# copy of chronyd, it will detect that the process named in the file is
+# still running and bail out. If you want to change the path to the PID
+# file, uncomment this line and edit it. The default path is shown.
+
+! pidfile /var/run/chrony/chronyd.pid
+
+# If the system timezone database is kept up to date and includes the
+# right/UTC timezone, chronyd can use it to determine the current
+# TAI-UTC offset and when will the next leap second occur.
+
+! leapsectz right/UTC
+
+#######################################################################
+### INITIAL CLOCK CORRECTION
+# This option is useful to quickly correct the clock on start if it's
+# off by a large amount. The value '1.0' means that if the error is less
+# than 1 second, it will be gradually removed by speeding up or slowing
+# down your computer's clock until it is correct. If the error is above
+# 1 second, an immediate time jump will be applied to correct it. The
+# value '3' means the step is allowed only in the first three updates of
+# the clock. Some software can get upset if the system clock jumps
+# (especially backwards), so be careful!
+
+! makestep 1.0 3
+
+#######################################################################
+### LEAP SECONDS
+# A leap second is an occasional one-second correction of the UTC
+# time scale. By default, chronyd tells the kernel to insert/delete
+# the leap second, which makes a backward/forward step to correct the
+# clock for it. As with the makestep directive, this jump can upset
+# some applications. If you prefer chronyd to make a gradual
+# correction, causing the clock to be off for a longer time, uncomment
+# the following line.
+
+! leapsecmode slew
+
+#######################################################################
+### LOGGING
+# If you want to log information about the time measurements chronyd has
+# gathered, you might want to enable the following lines. You probably
+# only need this if you really enjoy looking at the logs, you want to
+# produce some graphs of your system's timekeeping performance, or you
+# need help in debugging a problem.
+#
+# If you enable logging, you may want to add an entry to a log rotation
+# utility's configuration (e.g., newsyslog(8)). 'chronyc cyclelogs'
+# should be used to signal chronyd that a log file has been renamed.
+
+! logdir /var/log/chrony
+! log measurements statistics tracking
+
+# If you have real time clock support enabled (see below), you might want
+# this line instead:
+
+! log measurements statistics tracking rtc
+
+#######################################################################
+### ACTING AS AN NTP SERVER
+# You might want the computer to be an NTP server for other computers.
+#
+# By default, chronyd does not allow any clients to access it. You need
+# to explicitly enable access using 'allow' and 'deny' directives.
+#
+# e.g. to enable client access from the 192.168.*.* class B subnet,
+
+! allow 192.168/16
+
+# .. but disallow the 192.168.100.* subnet of that,
+
+! deny 192.168.100/24
+
+# You can have as many allow and deny directives as you need. The order
+# is unimportant.
+
+# If you want to present your computer's time for others to synchronise
+# with, even if you don't seem to be synchronised to any NTP servers
+# yourself, enable the following line. The value 10 may be varied
+# between 1 and 15. You should avoid small values because you will look
+# like a real NTP server. The value 10 means that you appear to be 10
+# NTP 'hops' away from an authoritative source (atomic clock, GPS
+# receiver, radio clock etc).
+
+! local stratum 10
+
+# Normally, chronyd will keep track of how many times each client
+# machine accesses it. The information can be accessed by the 'clients'
+# command of chronyc. You can disable this facility by uncommenting the
+# following line. This will save a bit of memory if you have many
+# clients and it will also disable support for the interleaved mode.
+
+! noclientlog
+
+# The clientlog size is limited to 512KB by default. If you have many
+# clients, you might want to increase the limit.
+
+! clientloglimit 4194304
+
+# By default, chronyd tries to respond to all valid NTP requests from
+# allowed addresses. If you want to limit the response rate for NTP
+# clients that are sending requests too frequently, uncomment and edit
+# the following line.
+
+! ratelimit interval 3 burst 8
+
+#######################################################################
+### REPORTING BIG CLOCK CHANGES
+# Perhaps you want to know if chronyd suddenly detects any large error
+# in your computer's clock. This might indicate a fault or a problem
+# with the server(s) you are using, for example.
+#
+# The next option causes a message to be written to syslog when chronyd
+# has to correct an error above 0.5 seconds (you can use any amount you
+# like).
+
+! logchange 0.5
+
+# The next option will send email to the named person when chronyd has
+# to correct an error above 0.5 seconds. (If you need to send mail to
+# several people, you need to set up a mailing list or sendmail alias
+# for them and use the address of that.)
+
+! mailonchange wibble@foo.example.net 0.5
+
+#######################################################################
+### COMMAND ACCESS
+# The program chronyc is used to show the current operation of chronyd
+# and to change parts of its configuration whilst it is running.
+
+# By default chronyd binds to the loopback interface. Uncomment the
+# following lines to allow receiving command packets from remote hosts.
+
+! bindcmdaddress 0.0.0.0
+! bindcmdaddress ::
+
+# Normally, chronyd will only allow connections from chronyc on the same
+# machine as itself. This is for security. If you have a subnet
+# 192.168.*.* and you want to be able to use chronyc from any machine on
+# it, you could uncomment the following line. (Edit this to your own
+# situation.)
+
+! cmdallow 192.168/16
+
+# You can add as many 'cmdallow' and 'cmddeny' lines as you like. The
+# syntax and meaning is the same as for 'allow' and 'deny', except that
+# 'cmdallow' and 'cmddeny' control access to the chronyd's command port.
+
+# Rate limiting can be enabled also for command packets. (Note,
+# commands from localhost are never limited.)
+
+! cmdratelimit interval -4 burst 16
+
+#######################################################################
+### HARDWARE TIMESTAMPING
+# On Linux, if the network interface controller and its driver support
+# hardware timestamping, it can significantly improve the accuracy of
+# synchronisation. It can be enabled on specified interfaces only, or it
+# can be enabled on all interfaces that support it.
+
+! hwtimestamp eth0
+! hwtimestamp *
+
+#######################################################################
+### REAL TIME CLOCK
+# chronyd can characterise the system's real-time clock. This is the
+# clock that keeps running when the power is turned off, so that the
+# machine knows the approximate time when it boots again. The error at
+# a particular epoch and gain/loss rate can be written to a file and
+# used later by chronyd when it is started with the '-s' option.
+#
+# You need to have 'enhanced RTC support' compiled into your Linux
+# kernel. (Note, these options apply only to Linux.)
+
+! rtcfile /var/db/chrony/rtc
+
+# Your RTC can be set to keep Universal Coordinated Time (UTC) or local
+# time. (Local time means UTC +/- the effect of your timezone.) If you
+# use UTC, chronyd will function correctly even if the computer is off
+# at the epoch when you enter or leave summer time (aka daylight saving
+# time). However, if you dual boot your system with Microsoft Windows,
+# that will work better if your RTC maintains local time. You take your
+# pick!
+
+! rtconutc
+
+# By default chronyd assumes that the enhanced RTC device is accessed as
+# /dev/rtc. If it's accessed somewhere else on your system (e.g. you're
+# using devfs), uncomment and edit the following line.
+
+! rtcdevice /dev/misc/rtc
+
+# Alternatively, if not using the -s option, this directive can be used
+# to enable a mode in which the RTC is periodically set to the system
+# time, with no tracking of its drift.
+
+! rtcsync
+
+#######################################################################
+### REAL TIME SCHEDULER
+# This directive tells chronyd to use the real-time FIFO scheduler with the
+# specified priority (which must be between 0 and 100). This should result
+# in reduced latency. You don't need it unless you really have a requirement
+# for extreme clock stability. Works only on Linux. Note that the "-P"
+# command-line switch will override this.
+
+! sched_priority 1
+
+#######################################################################
+### LOCKING CHRONYD INTO RAM
+# This directive tells chronyd to use the mlockall() syscall to lock itself
+# into RAM so that it will never be paged out. This should result in reduced
+# latency. You don't need it unless you really have a requirement
+# for extreme clock stability. Works only on Linux. Note that the "-m"
+# command-line switch will also enable this feature.
+
+! lock_all
diff --git a/templates/host.mariner.tmpl b/templates/host.mariner.tmpl
new file mode 100644
index 00000000..7787c367
--- /dev/null
+++ b/templates/host.mariner.tmpl
@@ -0,0 +1,22 @@
++## template:jinja
++{#
++This file /etc/cloud/templates/hosts.mariner.tmpl is only utilized
++if enabled in cloud-config. Specifically, in order to enable it
++you need to add the following to config:
++ manage_etc_hosts: True
++-#}
++# Your system has configured 'manage_etc_hosts' as True.
++# As a result, if you wish for changes to this file to persist
++# then you will need to either
++# a.) make changes to the master file in /etc/cloud/templates/hosts.mariner.tmpl
++# b.) change or remove the value of 'manage_etc_hosts' in
++# /etc/cloud/cloud.cfg or cloud-config from user-data
++#
++# The following lines are desirable for IPv4 capable hosts
++127.0.0.1 {{fqdn}} {{hostname}}
++127.0.0.1 localhost.localdomain localhost
++127.0.0.1 localhost4.localdomain4 localhost4
++
++# The following lines are desirable for IPv6 capable hosts
++::1 {{fqdn}} {{hostname}}
++::1 localhost6.localdomain6 localhost6
diff --git a/templates/ntp.conf.freebsd.tmpl b/templates/ntp.conf.freebsd.tmpl
new file mode 100644
index 00000000..8d417f6d
--- /dev/null
+++ b/templates/ntp.conf.freebsd.tmpl
@@ -0,0 +1,114 @@
+## template:jinja
+
+#
+# $FreeBSD$
+#
+# Default NTP servers for the FreeBSD operating system.
+#
+# Don't forget to enable ntpd in /etc/rc.conf with:
+# ntpd_enable="YES"
+#
+# The driftfile is by default /var/db/ntpd.drift, check
+# /etc/defaults/rc.conf on how to change the location.
+#
+
+#
+# Set the target and limit for adding servers configured via pool statements
+# or discovered dynamically via mechanisms such as broadcast and manycast.
+# Ntpd automatically adds maxclock-1 servers from configured pools, and may
+# add as many as maxclock*2 if necessary to ensure that at least minclock
+# servers are providing good consistent time.
+#
+tos minclock 3 maxclock 6
+
+#
+# The following pool statement will give you a random set of NTP servers
+# geographically close to you. A single pool statement adds multiple
+# servers from the pool, according to the tos minclock/maxclock targets.
+# See http://www.pool.ntp.org/ for details. Note, pool.ntp.org encourages
+# users with a static IP and good upstream NTP servers to add a server
+# to the pool. See http://www.pool.ntp.org/join.html if you are interested.
+#
+# The option `iburst' is used for faster initial synchronization.
+#
+{% if pools %}# pools
+{% endif %}
+{% for pool in pools -%}
+pool {{pool}} iburst
+{% endfor %}
+
+#
+# To configure a specific server, such as an organization-wide local
+# server, add lines similar to the following. One or more specific
+# servers can be configured in addition to, or instead of, any server
+# pools specified above. When both are configured, ntpd first adds all
+# the specific servers, then adds servers from the pool until the tos
+# minclock/maxclock targets are met.
+#
+{%- if servers %}# servers
+{% endif %}
+{% for server in servers -%}
+server {{server}} iburst
+{% endfor %}
+
+#
+# Security:
+#
+# By default, only allow time queries and block all other requests
+# from unauthenticated clients.
+#
+# The "restrict source" line allows peers to be mobilized when added by
+# ntpd from a pool, but does not enable mobilizing a new peer association
+# by other dynamic means (broadcast, manycast, ntpq commands, etc).
+#
+# See http://support.ntp.org/bin/view/Support/AccessRestrictions
+# for more information.
+#
+restrict default limited kod nomodify notrap noquery nopeer
+restrict source limited kod nomodify notrap noquery
+
+#
+# Alternatively, the following rules would block all unauthorized access.
+#
+#restrict default ignore
+#
+# In this case, all remote NTP time servers also need to be explicitly
+# allowed or they would not be able to exchange time information with
+# this server.
+#
+# Please note that this example doesn't work for the servers in
+# the pool.ntp.org domain since they return multiple A records.
+#
+#restrict 0.pool.ntp.org nomodify nopeer noquery notrap
+#restrict 1.pool.ntp.org nomodify nopeer noquery notrap
+#restrict 2.pool.ntp.org nomodify nopeer noquery notrap
+#
+# The following settings allow unrestricted access from the localhost
+restrict 127.0.0.1
+restrict ::1
+
+#
+# If a server loses sync with all upstream servers, NTP clients
+# no longer follow that server. The local clock can be configured
+# to provide a time source when this happens, but it should usually
+# be configured on just one server on a network. For more details see
+# http://support.ntp.org/bin/view/Support/UndisciplinedLocalClock
+# The use of Orphan Mode may be preferable.
+#
+#server 127.127.1.0
+#fudge 127.127.1.0 stratum 10
+
+# See http://support.ntp.org/bin/view/Support/ConfiguringNTP#Section_6.14.
+# for documentation regarding leapfile. Updates to the file can be obtained
+# from ftp://time.nist.gov/pub/ or ftp://tycho.usno.navy.mil/pub/ntp/.
+# Use either leapfile in /etc/ntp or periodically updated leapfile in /var/db.
+#leapfile "/etc/ntp/leap-seconds"
+leapfile "/var/db/ntpd.leap-seconds.list"
+
+# Specify the number of megabytes of memory that should be allocated and
+# locked. -1 (default) means "do not lock the process into memory".
+# 0 means "lock whatever memory the process wants into memory". Any other
+# number means to lock up to that number of megabytes into memory.
+# 0 may result in a segfault when ASLR with stack gap randomization
+# is enabled.
+#rlimit memlock 32
diff --git a/templates/ntpd.conf.openbsd.tmpl b/templates/ntpd.conf.openbsd.tmpl
new file mode 100644
index 00000000..05610bb6
--- /dev/null
+++ b/templates/ntpd.conf.openbsd.tmpl
@@ -0,0 +1,19 @@
+## template:jinja
+
+# $OpenBSD: ntpd.conf,v 1.16 2019/11/06 19:04:12 deraadt Exp $
+#
+# See ntpd.conf(5) and /etc/examples/ntpd.conf
+
+{% if pools %}# pools
+{% endif %}
+{% for pool in pools -%}
+servers {{pool}}
+{% endfor %}
+{% for server in servers -%}# servers
+server {{server}}
+{% endfor %}
+sensor *
+
+constraint from "9.9.9.9" # quad9 v4 without DNS
+constraint from "2620:fe::fe" # quad9 v6 without DNS
+constraints from "www.google.com" # intentionally not 8.8.8.8
diff --git a/test-requirements.txt b/test-requirements.txt
index df4ad0ff..739d8ebb 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -1,5 +1,4 @@
# Needed generally in tests
-httpretty>=0.7.1
pytest
pytest-cov
pytest-mock
diff --git a/tests/data/netinfo/freebsd-ifconfig-output b/tests/data/netinfo/freebsd-ifconfig-output
index f64c2f60..3ca0d2b2 100644
--- a/tests/data/netinfo/freebsd-ifconfig-output
+++ b/tests/data/netinfo/freebsd-ifconfig-output
@@ -1,6 +1,7 @@
vtnet0: flags=8843<UP,BROADCAST,RUNNING,SIMPLEX,MULTICAST> metric 0 mtu 1500
options=6c07bb<RXCSUM,TXCSUM,VLAN_MTU,VLAN_HWTAGGING,JUMBO_MTU,VLAN_HWCSUM,TSO4,TSO6,LRO,VLAN_HWTSO,LINKSTATE,RXCSUM_IPV6,TXCSUM_IPV6>
ether 52:54:00:50:b7:0d
+ media: Ethernet autoselect (10Gbase-T <full-duplex>)
re0.33: flags=8943<UP,BROADCAST,RUNNING,PROMISC,SIMPLEX,MULTICAST> metric 0 mtu 1500
options=80003<RXCSUM,TXCSUM,LINKSTATE>
ether 80:00:73:63:5c:48
@@ -36,4 +37,5 @@ lo0: flags=8049<UP,LOOPBACK,RUNNING,MULTICAST> metric 0 mtu 16384
inet6 ::1 prefixlen 128
inet6 fe80::1%lo0 prefixlen 64 scopeid 0x2
inet 127.0.0.1 netmask 0xff000000
+ groups: lo
nd6 options=21<PERFORMNUD,AUTO_LINKLOCAL>
diff --git a/tests/data/netinfo/openbsd-ifconfig-output b/tests/data/netinfo/openbsd-ifconfig-output
new file mode 100644
index 00000000..8c8bfb62
--- /dev/null
+++ b/tests/data/netinfo/openbsd-ifconfig-output
@@ -0,0 +1,29 @@
+lo0: flags=8049<UP,LOOPBACK,RUNNING,MULTICAST> mtu 32768
+ index 3 priority 0 llprio 3
+ groups: lo
+ inet6 ::1 prefixlen 128
+ inet6 fe80::1%lo0 prefixlen 64 scopeid 0x3
+ inet 127.0.0.1 netmask 0xff000000
+vio0: flags=a48843<UP,BROADCAST,RUNNING,SIMPLEX,MULTICAST,AUTOCONF6TEMP,AUTOCONF6,AUTOCONF4> mtu 1500
+ lladdr 52:54:00:35:4f:b8
+ index 1 priority 0 llprio 3
+ groups: egress
+ media: Ethernet autoselect
+ status: active
+ inet6 fe80::5054:ff:fe35:4fb8%vio0 prefixlen 64 scopeid 0x1
+ inet 192.168.122.81 netmask 0xffffff00 broadcast 192.168.122.255
+enc0: flags=0<>
+ index 2 priority 0 llprio 3
+ groups: enc
+ status: active
+pflog0: flags=141<UP,RUNNING,PROMISC> mtu 33136
+ index 4 priority 0 llprio 3
+ groups: pflog
+gif0: flags=8051<UP,POINTOPOINT,RUNNING,MULTICAST> metric 0 mtu 1280
+ options=80000<LINKSTATE>
+ tunnel inet 195.154.241.122 --> 216.66.84.42
+ inet6 2001:470:1f12:243::2 --> 2001:470:1f12:243::1 prefixlen 128
+ inet6 fe80::be30:5bff:fed0:471%gif0 prefixlen 64 scopeid 0x4
+ inet6 2001:470:1f12:243:b007::42 prefixlen 64
+ groups: gif
+ nd6 options=23<PERFORMNUD,ACCEPT_RTADV,AUTO_LINKLOCAL>
diff --git a/tests/data/old_pickles/focal-azure-20.1-10-g71af48df-0ubuntu5.pkl b/tests/data/old_pickles/focal-azure-20.1-10-g71af48df-0ubuntu5.pkl
new file mode 100644
index 00000000..7f6000c7
--- /dev/null
+++ b/tests/data/old_pickles/focal-azure-20.1-10-g71af48df-0ubuntu5.pkl
Binary files differ
diff --git a/tests/integration_tests/assets/echo_server.py b/tests/integration_tests/assets/echo_server.py
new file mode 100644
index 00000000..5700082b
--- /dev/null
+++ b/tests/integration_tests/assets/echo_server.py
@@ -0,0 +1,37 @@
+#!/usr/bin/env python3
+"""
+Very simple HTTP daemon server in python for incoming POST data to stdout.
+Each line represents a request's POST data a dictionary.
+"""
+import contextlib
+import pathlib
+from http.server import BaseHTTPRequestHandler, HTTPServer
+
+OUTFILE = pathlib.Path("/var/tmp/echo_server_output")
+
+
+class Server(BaseHTTPRequestHandler):
+ def _set_response(self):
+ self.send_response(200)
+ self.send_header("Content-type", "text/html")
+ self.end_headers()
+
+ def do_GET(self):
+ self._set_response()
+
+ def do_POST(self):
+ content_length = int(self.headers["Content-Length"])
+ post_data = self.rfile.read(content_length).decode("utf-8")
+ with OUTFILE.open("a") as f:
+ f.write(f"{post_data}\n")
+ self._set_response()
+
+ def log_message(self, *args, **kwargs):
+ pass
+
+
+server_address = ("", 55555)
+httpd = HTTPServer(server_address, Server)
+with contextlib.suppress(KeyboardInterrupt):
+ httpd.serve_forever()
+httpd.server_close()
diff --git a/tests/integration_tests/assets/echo_server.service b/tests/integration_tests/assets/echo_server.service
new file mode 100644
index 00000000..8190b2c5
--- /dev/null
+++ b/tests/integration_tests/assets/echo_server.service
@@ -0,0 +1,10 @@
+[Unit]
+Description=echo_server
+Before=cloud-init-local.service
+DefaultDependencies=no
+
+[Service]
+ExecStart=/usr/bin/env python3 /var/tmp/echo_server.py
+
+[Install]
+WantedBy=multi-user.target
diff --git a/tests/integration_tests/clouds.py b/tests/integration_tests/clouds.py
index 6b959ade..c4dd4eec 100644
--- a/tests/integration_tests/clouds.py
+++ b/tests/integration_tests/clouds.py
@@ -3,6 +3,7 @@ import datetime
import logging
import os.path
import random
+import re
import string
from abc import ABC, abstractmethod
from copy import deepcopy
@@ -161,9 +162,17 @@ class IntegrationCloud(ABC):
"user_data": user_data,
}
launch_kwargs = {**default_launch_kwargs, **launch_kwargs}
+ display_launch_kwargs = deepcopy(launch_kwargs)
+ if display_launch_kwargs.get("user_data") is not None:
+ if "token" in display_launch_kwargs.get("user_data"):
+ display_launch_kwargs["user_data"] = re.sub(
+ r"token: .*", "token: REDACTED", launch_kwargs["user_data"]
+ )
log.info(
"Launching instance with launch_kwargs:\n%s",
- "\n".join("{}={}".format(*item) for item in launch_kwargs.items()),
+ "\n".join(
+ "{}={}".format(*item) for item in display_launch_kwargs.items()
+ ),
)
with emit_dots_on_travis():
diff --git a/tests/integration_tests/datasources/test_lxd_discovery.py b/tests/integration_tests/datasources/test_lxd_discovery.py
index 899ea935..f3ca7158 100644
--- a/tests/integration_tests/datasources/test_lxd_discovery.py
+++ b/tests/integration_tests/datasources/test_lxd_discovery.py
@@ -5,7 +5,7 @@ import yaml
from tests.integration_tests.clouds import ImageSpecification
from tests.integration_tests.instances import IntegrationInstance
-from tests.integration_tests.util import verify_clean_log
+from tests.integration_tests.util import lxd_has_nocloud, verify_clean_log
def _customize_environment(client: IntegrationInstance):
@@ -86,9 +86,13 @@ def test_lxd_datasource_discovery(client: IntegrationInstance):
assert "lxd" == v1["platform"]
assert "LXD socket API v. 1.0 (/dev/lxd/sock)" == v1["subplatform"]
ds_cfg = json.loads(client.execute("cloud-init query ds").stdout)
- assert ["_doc", "_metadata_api_version", "config", "meta-data"] == sorted(
- list(ds_cfg.keys())
- )
+ assert [
+ "_doc",
+ "_metadata_api_version",
+ "config",
+ "devices",
+ "meta-data",
+ ] == sorted(list(ds_cfg.keys()))
if (
client.settings.PLATFORM == "lxd_vm"
and ImageSpecification.from_os_image().release == "bionic"
@@ -109,11 +113,8 @@ def test_lxd_datasource_discovery(client: IntegrationInstance):
)
assert "#cloud-config\ninstance-id" in ds_cfg["meta-data"]
- # Jammy not longer provides nocloud-net seed files (LP: #1958460)
- if ImageSpecification.from_os_image().release in [
- "bionic",
- "focal",
- ]:
+ # Some series no longer provide nocloud-net seed files (LP: #1958460)
+ if lxd_has_nocloud(client):
# Assert NoCloud seed files are still present in non-Jammy images
# and that NoCloud seed files provide the same content as LXD socket.
nocloud_metadata = yaml.safe_load(
diff --git a/tests/integration_tests/datasources/test_lxd_hotplug.py b/tests/integration_tests/datasources/test_lxd_hotplug.py
new file mode 100644
index 00000000..8c403e04
--- /dev/null
+++ b/tests/integration_tests/datasources/test_lxd_hotplug.py
@@ -0,0 +1,154 @@
+import json
+
+import pytest
+
+from cloudinit import safeyaml
+from cloudinit.subp import subp
+from tests.integration_tests.clouds import ImageSpecification
+from tests.integration_tests.decorators import retry
+from tests.integration_tests.instances import IntegrationInstance
+from tests.integration_tests.util import lxd_has_nocloud
+
+USER_DATA = """\
+#cloud-config
+updates:
+ network:
+ when: ["hotplug"]
+"""
+
+UPDATED_NETWORK_CONFIG = """\
+version: 2
+ethernets:
+ eth0:
+ dhcp4: true
+ eth2:
+ dhcp4: true
+"""
+
+
+@retry()
+def ensure_hotplug_exited(client):
+ assert "cloud-init" not in client.execute("ps -A")
+
+
+def get_parent_network(instance_name: str):
+ lxd_network = json.loads(
+ subp("lxc network list --format json".split()).stdout
+ )
+ for net in lxd_network:
+ if net["type"] == "bridge" and net["managed"]:
+ if f"/1.0/instances/{instance_name}" in net.get("used_by", []):
+ return net["name"]
+ return "lxdbr0"
+
+
+def _prefer_lxd_datasource_over_nocloud(client: IntegrationInstance):
+ """For hotplug support we need LXD datasource detected instead of NoCloud
+
+ Bionic and Focal still deliver nocloud-net seed files so override it
+ with /etc/cloud/cloud.cfg.d/99-detect-lxd-first.cfg
+ """
+ client.write_to_file(
+ "/etc/cloud/cloud.cfg.d/99-detect-lxd-first.cfg",
+ "datasource_list: [LXD, NoCloud]\n",
+ )
+ client.execute("cloud-init clean --logs")
+ client.restart()
+
+
+# TODO: Once LXD adds MACs to the devices endpoint, support LXD VMs here
+# Currently the names are too unpredictable to be worth testing on VMs.
+@pytest.mark.lxd_container
+@pytest.mark.user_data(USER_DATA)
+class TestLxdHotplug:
+ @pytest.fixture(autouse=True, scope="class")
+ def class_teardown(self, class_client: IntegrationInstance):
+ # We need a teardown here because on IntegrationInstance teardown,
+ # if KEEP_INSTANCE=True, we grab the instance IP for logging, but
+ # we're currently running into
+ # https://github.com/canonical/pycloudlib/issues/220 .
+ # Once that issue is fixed, we can remove this teardown
+ yield
+ name = class_client.instance.name
+ subp(f"lxc config device remove {name} eth1".split())
+ subp(f"lxc config device remove {name} eth2".split())
+ subp("lxc network delete ci-test-br-eth1".split())
+ subp("lxc network delete ci-test-br-eth2".split())
+
+ def test_no_network_change_default(
+ self, class_client: IntegrationInstance
+ ):
+ client = class_client
+ if lxd_has_nocloud(client):
+ _prefer_lxd_datasource_over_nocloud(client)
+ assert "eth1" not in client.execute("ip address")
+ pre_netplan = client.read_from_file("/etc/netplan/50-cloud-init.yaml")
+
+ networks = subp("lxc network list".split())
+ if "ci-test-br-eth1" not in networks.stdout:
+ subp(
+ "lxc network create ci-test-br-eth1 --type=bridge "
+ "ipv4.address=10.10.41.1/24 ipv4.nat=true".split()
+ )
+ subp(
+ f"lxc config device add {client.instance.name} eth1 nic name=eth1 "
+ f"nictype=bridged parent=ci-test-br-eth1".split()
+ )
+ ensure_hotplug_exited(client)
+ post_netplan = client.read_from_file("/etc/netplan/50-cloud-init.yaml")
+ assert pre_netplan == post_netplan
+ ip_info = json.loads(client.execute("ip --json address"))
+ eth1s = [i for i in ip_info if i["ifname"] == "eth1"]
+ assert len(eth1s) == 1
+ assert eth1s[0]["operstate"] == "DOWN"
+
+ def test_network_config_applied(self, class_client: IntegrationInstance):
+ client = class_client
+ if lxd_has_nocloud(client):
+ _prefer_lxd_datasource_over_nocloud(client)
+ assert "eth2" not in client.execute("ip address")
+ pre_netplan = client.read_from_file("/etc/netplan/50-cloud-init.yaml")
+ assert "eth2" not in pre_netplan
+ if ImageSpecification.from_os_image().release in [
+ "bionic",
+ "focal",
+ ]: # pyright: ignore
+ top_key = "user"
+ else:
+ top_key = "cloud-init"
+ assert subp(
+ [
+ "lxc",
+ "config",
+ "set",
+ client.instance.name,
+ f"{top_key}.network-config={UPDATED_NETWORK_CONFIG}",
+ ]
+ )
+ assert (
+ client.read_from_file("/etc/netplan/50-cloud-init.yaml")
+ == pre_netplan
+ )
+ networks = subp("lxc network list".split())
+ if "ci-test-br-eth2" not in networks.stdout:
+ assert subp(
+ "lxc network create ci-test-br-eth2 --type=bridge"
+ " ipv4.address=10.10.42.1/24 ipv4.nat=true".split()
+ )
+ assert subp(
+ f"lxc config device add {client.instance.name} eth2 nic name=eth2 "
+ f"nictype=bridged parent=ci-test-br-eth2".split()
+ )
+ ensure_hotplug_exited(client)
+ post_netplan = safeyaml.load(
+ client.read_from_file("/etc/netplan/50-cloud-init.yaml")
+ )
+ expected_netplan = safeyaml.load(UPDATED_NETWORK_CONFIG)
+ expected_netplan = {"network": expected_netplan}
+ assert post_netplan == expected_netplan, client.read_from_file(
+ "/var/log/cloud-init.log"
+ )
+ ip_info = json.loads(client.execute("ip --json address"))
+ eth2s = [i for i in ip_info if i["ifname"] == "eth2"]
+ assert len(eth2s) == 1
+ assert eth2s[0]["operstate"] == "UP"
diff --git a/tests/integration_tests/datasources/test_tmp_noexec.py b/tests/integration_tests/datasources/test_tmp_noexec.py
new file mode 100644
index 00000000..a060e20f
--- /dev/null
+++ b/tests/integration_tests/datasources/test_tmp_noexec.py
@@ -0,0 +1,32 @@
+import pytest
+
+from tests.integration_tests.instances import IntegrationInstance
+from tests.integration_tests.util import verify_clean_log
+
+
+def customize_client(client: IntegrationInstance):
+ assert client.execute(
+ "echo '/tmp /var/tmp none rw,noexec,nosuid,nodev,bind 0 0'"
+ " | sudo tee -a /etc/fstab"
+ ).ok
+ client.execute("cloud-init clean --logs")
+ client.restart()
+
+
+@pytest.mark.adhoc
+@pytest.mark.azure
+@pytest.mark.ec2
+@pytest.mark.gce
+@pytest.mark.oci
+@pytest.mark.openstack
+def test_dhcp_tmp_noexec(client: IntegrationInstance):
+ customize_client(client)
+ assert (
+ "noexec" in client.execute('grep "/var/tmp" /proc/mounts').stdout
+ ), "Precondition error: /var/tmp is not mounted as noexec"
+ log = client.read_from_file("/var/log/cloud-init.log")
+ assert (
+ "dhclient did not produce expected files: dhcp.leases, dhclient.pid"
+ not in log
+ )
+ verify_clean_log(log)
diff --git a/tests/integration_tests/instances.py b/tests/integration_tests/instances.py
index bd807cef..cf2bf4cc 100644
--- a/tests/integration_tests/instances.py
+++ b/tests/integration_tests/instances.py
@@ -210,4 +210,4 @@ class IntegrationInstance:
if not self.settings.KEEP_INSTANCE:
self.destroy()
else:
- log.info("Keeping Instance, public ip: %s", self.ip)
+ log.info("Keeping Instance, public ip: %s", self.ip())
diff --git a/tests/integration_tests/modules/test_ansible.py b/tests/integration_tests/modules/test_ansible.py
index 0d979d40..d781dabf 100644
--- a/tests/integration_tests/modules/test_ansible.py
+++ b/tests/integration_tests/modules/test_ansible.py
@@ -34,7 +34,6 @@ write_files:
WorkingDirectory=/root/playbooks/.git
ExecStart=/usr/bin/env python3 -m http.server --bind 0.0.0.0 8000
-
- path: /etc/systemd/system/repo_waiter.service
content: |
[Unit]
@@ -79,6 +78,7 @@ write_files:
- "{{ item }}"
state: latest
loop: "{{ packages }}"
+
runcmd:
- [systemctl, enable, repo_server.service]
- [systemctl, enable, repo_waiter.service]
@@ -86,11 +86,15 @@ runcmd:
INSTALL_METHOD = """
ansible:
- install-method: {method}
- package-name: {package}
+ ansible_config: /etc/ansible/ansible.cfg
+ install_method: {method}
+ package_name: {package}
+ galaxy:
+ actions:
+ - ["ansible-galaxy", "collection", "install", "community.grafana"]
pull:
url: "http://0.0.0.0:8000/"
- playbook-name: ubuntu.yml
+ playbook_name: ubuntu.yml
full: true
"""
@@ -103,6 +107,155 @@ git add {REPO_D}/roles/apt/tasks/main.yml {REPO_D}/ubuntu.yml &&\
git commit -m auto &&\
(cd {REPO_D}/.git; git update-server-info)"
+ANSIBLE_CONTROL = """\
+#cloud-config
+#
+# Demonstrate setting up an ansible controller host on boot.
+# This example installs a playbook repository from a remote private repository
+# and then runs two of the plays.
+
+packages_update: true
+packages_upgrade: true
+packages:
+ - git
+ - python3-pip
+
+# Set up an ansible user
+# ----------------------
+# In this case I give the local ansible user passwordless sudo so that ansible
+# may write to a local root-only file.
+users:
+- name: ansible
+ gecos: Ansible User
+ shell: /bin/bash
+ groups: users,admin,wheel,lxd
+ sudo: ALL=(ALL) NOPASSWD:ALL
+
+# Initialize lxd using cloud-init.
+# --------------------------------
+# In this example, a lxd container is
+# started using ansible on boot, so having lxd initialized is required.
+lxd:
+ init:
+ storage_backend: dir
+
+# Configure and run ansible on boot
+# ---------------------------------
+# Install ansible using pip, ensure that community.general collection is
+# installed [1].
+# Use a deploy key to clone a remote private repository then run two playbooks.
+# The first playbook starts a lxd container and creates a new inventory file.
+# The second playbook connects to and configures the container using ansible.
+# The public version of the playbooks can be inspected here [2]
+#
+# [1] community.general is likely already installed by pip
+# [2] https://github.com/holmanb/ansible-lxd-public
+#
+ansible:
+ install_method: pip
+ package_name: ansible
+ run_user: ansible
+ galaxy:
+ actions:
+ - ["ansible-galaxy", "collection", "install", "community.general"]
+
+ setup_controller:
+ repositories:
+ - path: /home/ansible/my-repo/
+ source: git@github.com:holmanb/ansible-lxd-private.git
+ run_ansible:
+ - playbook_dir: /home/ansible/my-repo
+ playbook_name: start-lxd.yml
+ timeout: 120
+ forks: 1
+ private_key: /home/ansible/.ssh/id_rsa
+ - playbook_dir: /home/ansible/my-repo
+ playbook_name: configure-lxd.yml
+ become_user: ansible
+ timeout: 120
+ forks: 1
+ private_key: /home/ansible/.ssh/id_rsa
+ inventory: new_ansible_hosts
+
+# Write a deploy key to the filesystem for ansible.
+# -------------------------------------------------
+# This deploy key is tied to a private github repository [1]
+# This key exists to demonstrate deploy key usage in ansible
+# a duplicate public copy of the repository exists here[2]
+#
+# [1] https://github.com/holmanb/ansible-lxd-private
+# [2] https://github.com/holmanb/ansible-lxd-public
+#
+write_files:
+ - path: /home/ansible/.ssh/known_hosts
+ owner: ansible:ansible
+ permissions: 0o600
+ defer: true
+ content: |
+ |1|YJEFAk6JjnXpUjUSLFiBQS55W9E=|OLNePOn3eBa1PWhBBmt5kXsbGM4= ssh-ed2551\
+9 AAAAC3NzaC1lZDI1NTE5AAAAIOMqqnkVzrm0SdG6UOoqKLsabgH5C9okWi0dh2l9GKJl
+
+ - path: /home/ansible/.ssh/id_rsa
+ owner: ansible:ansible
+ permissions: 0o600
+ defer: true
+ encoding: base64
+ content: |
+ LS0tLS1CRUdJTiBPUEVOU1NIIFBSSVZBVEUgS0VZLS0tLS0KYjNCbGJuTnphQzFyWlhrdGRqRUFB
+ QUFBQkc1dmJtVUFBQUFFYm05dVpRQUFBQUFBQUFBQkFBQUJsd0FBQUFkemMyZ3RjbgpOaEFBQUFB
+ d0VBQVFBQUFZRUEwUWlRa05WQS9VTEpWZzBzT1Q4TEwyMnRGckg5YVR1SWFNT1FiVFdtWjlNUzJh
+ VTZ0cDZoClJDYklWSkhmOHdsaGV3MXNvWmphWVVQSFBsUHNISm5UVlhJTnFTTlpEOGF0Rldjd1gy
+ ZTNBOElZNEhpN0NMMDE3MVBoMVUKYmJGNGVIT1JaVkY2VVkzLzhmbXQ3NmhVYnpiRVhkUXhQdVdh
+ a0IyemxXNTdFclpOejJhYVdnY2pJUGdHV1RNZWVqbEpOcQpXUW9MNlFzSStpeUlzYXNMc1RTajha
+ aVgrT1VjanJEMUY4QXNKS3ZWQStKbnVZNUxFeno1TGQ2SGxGc05XVWtoZkJmOWVOClpxRnJCc1Vw
+ M2VUY1FtejFGaHFFWDJIQjNQT3VSTzlKemVGcTJaRE8wUlNQN09acjBMYm8vSFVTK3V5VkJNTDNi
+ eEF6dEIKQWM5dFJWZjRqcTJuRjNkcUpwVTFFaXZzR0sxaHJZc0VNQklLK0srVzRwc1F5c3ZTL0ZK
+ V2lXZmpqWVMwei9IbkV4MkpHbApOUXUrYkMxL1dXSGVXTGFvNGpSckRSZnNIVnVscTE2MElsbnNx
+ eGl1MmNHd081V29Fc1NHdThucXB5ZzQzWkhDYjBGd21CCml6UFFEQVNsbmlXanFjS21mblRycHpB
+ eTNlVldhd3dsTnBhUWtpZFRBQUFGZ0dLU2o4ZGlrby9IQUFBQUIzTnphQzF5YzIKRUFBQUdCQU5F
+ SWtKRFZRUDFDeVZZTkxEay9DeTl0clJheC9XazdpR2pEa0cwMXBtZlRFdG1sT3JhZW9VUW15RlNS
+ My9NSgpZWHNOYktHWTJtRkR4ejVUN0J5WjAxVnlEYWtqV1EvR3JSVm5NRjludHdQQ0dPQjR1d2k5
+ TmU5VDRkVkcyeGVIaHprV1ZSCmVsR04vL0g1cmUrb1ZHODJ4RjNVTVQ3bG1wQWRzNVZ1ZXhLMlRj
+ OW1tbG9ISXlENEJsa3pIbm81U1RhbGtLQytrTENQb3MKaUxHckM3RTBvL0dZbC9qbEhJNnc5UmZB
+ TENTcjFRUGlaN21PU3hNOCtTM2VoNVJiRFZsSklYd1gvWGpXYWhhd2JGS2QzawozRUpzOVJZYWhG
+ OWh3ZHp6cmtUdlNjM2hhdG1RenRFVWorem1hOUMyNlB4MUV2cnNsUVRDOTI4UU03UVFIUGJVVlgr
+ STZ0CnB4ZDNhaWFWTlJJcjdCaXRZYTJMQkRBU0N2aXZsdUtiRU1yTDB2eFNWb2xuNDQyRXRNL3g1
+ eE1kaVJwVFVMdm13dGYxbGgKM2xpMnFPSTBhdzBYN0IxYnBhdGV0Q0paN0tzWXJ0bkJzRHVWcUJM
+ RWhydko2cWNvT04yUndtOUJjSmdZc3owQXdFcFo0bApvNm5DcG41MDY2Y3dNdDNsVm1zTUpUYVdr
+ SkluVXdBQUFBTUJBQUVBQUFHQUV1ejc3SHU5RUVaeXVqTE9kVG5BVzlhZlJ2ClhET1pBNnBTN3lX
+ RXVmanc1Q1NsTUx3aXNSODN5d3cwOXQxUVd5dmhScUV5WW12T0JlY3NYZ2FTVXRuWWZmdFd6NDRh
+ cHkKL2dRWXZNVkVMR0thSkFDL3E3dmpNcEd5cnhVUGt5TE1oY2tBTFUyS1lnVisvcmovajZwQk1l
+ VmxjaG1rM3Bpa1lyZmZVWApKRFk5OTBXVk8xOTREbTBidUxSekp2Zk1LWUYyQmNmRjRUdmFyak9Y
+ V0F4U3VSOHd3dzA1MG9KOEhkS2FoVzdDbTVTMHBvCkZSbk5YRkdNbkxBNjJ2TjAwdkpXOFY3ajd2
+ dWk5dWtCYmhqUldhSnVZNXJkRy9VWW16QWU0d3ZkSUVucGs5eEluNkpHQ3AKRlJZVFJuN2xUaDUr
+ L1FsUTZGWFJQOElyMXZYWkZuaEt6bDBLOFZxaDJzZjRNNzlNc0lVR0FxR3hnOXhkaGpJYTVkbWdw
+ OApOMThJRURvTkVWS1ViS3VLZS9aNXlmOFo5dG1leGZIMVl0dGptWE1Pb2pCdlVISWpSUzVoZEk5
+ TnhuUEdSTFkya2pBemNtCmdWOVJ2M3Z0ZEYvK3phbGszZkFWTGVLOGhYSytkaS83WFR2WXBmSjJF
+ WkJXaU5yVGVhZ2ZOTkdpWXlkc1F5M3pqWkFBQUEKd0JOUmFrN1VycW5JSE1abjdwa0NUZ2NlYjFN
+ ZkJ5YUZ0bE56ZCtPYmFoNTRIWUlRajVXZFpUQkFJVFJlTVpOdDlTNU5BUgpNOHNRQjhVb1pQYVZT
+ QzNwcElMSU9mTGhzNktZajZSckdkaVl3eUloTVBKNWtSV0Y4eEdDTFVYNUNqd0gyRU9xN1hoSVd0
+ Ck13RUZ0ZC9nRjJEdTdIVU5GUHNaR256SjNlN3BES0RuRTd3MmtoWjhDSXBURmdENzY5dUJZR0F0
+ azQ1UVlURG81SnJvVk0KWlBEcTA4R2IvUmhJZ0pMbUlwTXd5cmVWcExMTGU4U3dvTUpKK3JpaG1u
+ Slp4TzhnQUFBTUVBMGxoaUtlemVUc2hodDR4dQpyV2MwTnh4RDg0YTI5Z1NHZlRwaERQT3JsS1NF
+ WWJrU1hoanFDc0FaSGQ4UzhrTXIzaUY2cG9PazNJV1N2Rko2bWJkM2llCnFkUlRnWEg5VGh3azRL
+ Z3BqVWhOc1F1WVJIQmJJNTlNbytCeFNJMUIxcXptSlNHZG1DQkw1NHd3elptRktEUVBRS1B4aUwK
+ bjBNbGM3R29vaURNalQxdGJ1Vy9PMUVMNUVxVFJxd2dXUFRLaEJBNnI0UG5HRjE1MGhaUklNb29a
+ a0Qyelg2YjFzR29qawpRcHZLa0V5a1R3bktDekY1VFhPOCt3SjNxYmNFbzlBQUFBd1FEK1owcjY4
+ YzJZTU5wc215ajNaS3RaTlBTdkpOY0xteUQvCmxXb05KcTNkakpONHMySmJLOGw1QVJVZFczeFNG
+ RURJOXl4L3dwZnNYb2FxV255Z1AzUG9GdzJDTTRpMEVpSml5dnJMRlUKcjNKTGZEVUZSeTNFSjI0
+ UnNxYmlnbUVzZ1FPelRsM3hmemVGUGZ4Rm9PaG9rU3ZURzg4UFFqaTFBWUh6NWtBN3A2WmZhegpP
+ azExckpZSWU3K2U5QjBsaGt1MEFGd0d5cWxXUW1TL01oSXBuakhJazV0UDRoZUhHU216S1FXSkRi
+ VHNrTldkNmFxMUc3CjZIV2ZEcFg0SGdvTThBQUFBTGFHOXNiV0Z1WWtCaGNtTT0KLS0tLS1FTkQg
+ T1BFTlNTSCBQUklWQVRFIEtFWS0tLS0tCg==
+
+
+# Work around this bug [1] by dropping the second interface after it is no
+# longer required
+# [1] https://github.com/canonical/pycloudlib/issues/220
+runcmd:
+ - [ip, link, delete, lxdbr0]
+"""
+
def _test_ansible_pull_from_local_server(my_client):
setup = my_client.execute(SETUP_REPO)
@@ -116,18 +269,46 @@ def _test_ansible_pull_from_local_server(my_client):
assert "ok=3" in output_log
assert "SUCCESS: config-ansible ran successfully" in log
+ # binary location is dependent on install-type, check the filepath
+ # to ensure that the installed collection directory exists
+ output = my_client.execute(
+ "ls /root/.ansible/collections/ansible_collections/community/grafana"
+ )
+ assert not output.stderr.strip() and output.ok
+
+# temporarily disable this test on jenkins until firewall rules are in place
+@pytest.mark.adhoc
@pytest.mark.user_data(
USER_DATA + INSTALL_METHOD.format(package="ansible-core", method="pip")
)
-class TestAnsiblePullPip:
- def test_ansible_pull_pip(self, class_client):
- _test_ansible_pull_from_local_server(class_client)
+def test_ansible_pull_pip(client):
+ _test_ansible_pull_from_local_server(client)
+# temporarily disable this test on jenkins until firewall rules are in place
+@pytest.mark.adhoc
+# Ansible packaged in bionic is 2.5.1. This test relies on ansible collections,
+# which requires Ansible 2.9+, so no bionic. The functionality is covered
+# in `test_ansible_pull_pip` using pip rather than the bionic package.
+@pytest.mark.not_bionic
@pytest.mark.user_data(
USER_DATA + INSTALL_METHOD.format(package="ansible", method="distro")
)
-class TestAnsiblePullDistro:
- def test_ansible_pull_distro(self, class_client):
- _test_ansible_pull_from_local_server(class_client)
+def test_ansible_pull_distro(client):
+ _test_ansible_pull_from_local_server(client)
+
+
+@pytest.mark.user_data(ANSIBLE_CONTROL)
+@pytest.mark.lxd_vm
+def test_ansible_controller(client):
+ log = client.read_from_file("/var/log/cloud-init.log")
+ verify_clean_log(log)
+ content_ansible = client.execute(
+ "lxc exec lxd-container-00 -- cat /home/ansible/ansible.txt"
+ )
+ content_root = client.execute(
+ "lxc exec lxd-container-00 -- cat /root/root.txt"
+ )
+ assert content_ansible == "hello as ansible"
+ assert content_root == "hello as root"
diff --git a/tests/integration_tests/modules/test_apt.py b/tests/integration_tests/modules/test_apt.py
index 2e07b33b..27ce2c5a 100644
--- a/tests/integration_tests/modules/test_apt.py
+++ b/tests/integration_tests/modules/test_apt.py
@@ -78,6 +78,24 @@ apt:
pb0uBy+g0oxJQg15
=uy53
-----END PGP PUBLIC KEY BLOCK-----
+ test_write:
+ keyid: A2EB2DEC0BD7519B7B38BE38376A290EC8068B11
+ keyserver: keyserver.ubuntu.com
+ source: "deb [signed-by=$KEY_FILE] http://ppa.launchpad.net/juju/stable/ubuntu $RELEASE main"
+ append: false
+ test_write.list:
+ keyid: A2EB2DEC0BD7519B7B38BE38376A290EC8068B11
+ keyserver: keyserver.ubuntu.com
+ source: "deb [signed-by=$KEY_FILE] http://ppa.launchpad.net/juju/devel/ubuntu $RELEASE main"
+ append: false
+ test_append:
+ keyid: A2EB2DEC0BD7519B7B38BE38376A290EC8068B11
+ keyserver: keyserver.ubuntu.com
+ source: "deb [signed-by=$KEY_FILE] http://ppa.launchpad.net/juju/stable/ubuntu $RELEASE main"
+ test_append.list:
+ keyid: A2EB2DEC0BD7519B7B38BE38376A290EC8068B11
+ keyserver: keyserver.ubuntu.com
+ source: "deb [signed-by=$KEY_FILE] http://ppa.launchpad.net/juju/devel/ubuntu $RELEASE main"
apt_pipelining: os
""" # noqa: E501
@@ -231,6 +249,32 @@ class TestApt:
).ok
assert conf_exists is False
+ def test_sources_write(self, class_client: IntegrationInstance):
+ """Test overwrite or append to sources file"""
+ release = ImageSpecification.from_os_image().release
+ test_write_content = class_client.read_from_file(
+ "/etc/apt/sources.list.d/test_write.list"
+ )
+ expected_contents = (
+ "deb [signed-by=/etc/apt/cloud-init.gpg.d/test_write.gpg] "
+ f"http://ppa.launchpad.net/juju/devel/ubuntu {release} main"
+ )
+ assert expected_contents.strip() == test_write_content.strip()
+
+ def test_sources_append(self, class_client: IntegrationInstance):
+ release = ImageSpecification.from_os_image().release
+ test_append_content = class_client.read_from_file(
+ "/etc/apt/sources.list.d/test_append.list"
+ )
+
+ expected_contents = (
+ "deb [signed-by=/etc/apt/cloud-init.gpg.d/test_append.gpg] "
+ f"http://ppa.launchpad.net/juju/stable/ubuntu {release} main\n"
+ "deb [signed-by=/etc/apt/cloud-init.gpg.d/test_append.gpg] "
+ f"http://ppa.launchpad.net/juju/devel/ubuntu {release} main"
+ )
+ assert expected_contents.strip() == test_append_content.strip()
+
_DEFAULT_DATA = """\
#cloud-config
diff --git a/tests/integration_tests/modules/test_combined.py b/tests/integration_tests/modules/test_combined.py
index 93523bfc..32537729 100644
--- a/tests/integration_tests/modules/test_combined.py
+++ b/tests/integration_tests/modules/test_combined.py
@@ -20,6 +20,7 @@ from tests.integration_tests.decorators import retry
from tests.integration_tests.instances import IntegrationInstance
from tests.integration_tests.util import (
get_inactive_modules,
+ lxd_has_nocloud,
verify_clean_log,
verify_ordered_items_in_text,
)
@@ -217,10 +218,7 @@ class TestCombined:
parsed_datasource = json.loads(status_file)["v1"]["datasource"]
if client.settings.PLATFORM in ["lxd_container", "lxd_vm"]:
- if ImageSpecification.from_os_image().release in [
- "bionic",
- "focal",
- ]:
+ if lxd_has_nocloud(client):
datasource = "DataSourceNoCloud"
else:
datasource = "DataSourceLXD"
@@ -294,10 +292,7 @@ class TestCombined:
data = json.loads(instance_json_file)
self._check_common_metadata(data)
v1_data = data["v1"]
- if ImageSpecification.from_os_image().release not in [
- "bionic",
- "focal",
- ]:
+ if not lxd_has_nocloud(client):
cloud_name = "lxd"
subplatform = "LXD socket API v. 1.0 (/dev/lxd/sock)"
# instance-id should be a UUID
@@ -333,10 +328,7 @@ class TestCombined:
data = json.loads(instance_json_file)
self._check_common_metadata(data)
v1_data = data["v1"]
- if ImageSpecification.from_os_image().release not in [
- "bionic",
- "focal",
- ]:
+ if not lxd_has_nocloud(client):
cloud_name = "lxd"
subplatform = "LXD socket API v. 1.0 (/dev/lxd/sock)"
# instance-id should be a UUID
@@ -347,6 +339,8 @@ class TestCombined:
f"LXD instance-id is not a UUID: {v1_data['instance_id']}"
) from e
assert v1_data["subplatform"] == subplatform
+ assert v1_data["platform"] == "lxd"
+ assert v1_data["cloud_id"] == "lxd"
else:
cloud_name = "unknown"
# Pre-Jammy instance-id and instance.name are synonymous
@@ -358,11 +352,11 @@ class TestCombined:
"/dev/sr0" in v1_data["subplatform"],
]
)
+ assert v1_data["platform"] in ["lxd", "nocloud"]
+ assert v1_data["cloud_id"] in ["lxd", "nocloud"]
assert v1_data["cloud_name"] == cloud_name
- assert v1_data["platform"] == "lxd"
- assert v1_data["cloud_id"] == "lxd"
assert f"{v1_data['cloud_id']}" == client.read_from_file(
- "/run/cloud-init/cloud-id-lxd"
+ "/run/cloud-init/cloud-id"
)
assert v1_data["availability_zone"] is None
@@ -428,5 +422,4 @@ class TestCombinedNoCI:
client = class_client
ssh_output = client.read_from_file("/home/ubuntu/.ssh/authorized_keys")
- assert "# ssh-import-id gh:powersj" in ssh_output
assert "# ssh-import-id lp:smoser" in ssh_output
diff --git a/tests/integration_tests/modules/test_jinja_templating.py b/tests/integration_tests/modules/test_jinja_templating.py
index 7788c6f0..56a691f0 100644
--- a/tests/integration_tests/modules/test_jinja_templating.py
+++ b/tests/integration_tests/modules/test_jinja_templating.py
@@ -2,7 +2,15 @@
import pytest
from tests.integration_tests.instances import IntegrationInstance
-from tests.integration_tests.util import verify_ordered_items_in_text
+from tests.integration_tests.util import (
+ verify_clean_log,
+ verify_ordered_items_in_text,
+)
+
+MERGED_CFG_DOC = (
+ "Merged cloud-init system config from /etc/cloud/cloud.cfg "
+ "and /etc/cloud/cloud.cfg.d/"
+)
USER_DATA = """\
## template: jinja
@@ -23,11 +31,91 @@ def test_runcmd_with_variable_substitution(client: IntegrationInstance):
LP: #1931392.
"""
hostname = client.execute("hostname").stdout.strip()
- expected = [
- hostname,
- "Merged cloud-init system config from /etc/cloud/cloud.cfg and "
- "/etc/cloud/cloud.cfg.d/",
- hostname,
- ]
+ expected = [hostname, MERGED_CFG_DOC, hostname]
output = client.read_from_file("/var/tmp/runcmd_output")
verify_ordered_items_in_text(expected, output)
+
+
+@pytest.mark.ci
+def test_substitution_in_etc_cloud(client: IntegrationInstance):
+ orig_etc_cloud = client.read_from_file("/etc/cloud/cloud.cfg")
+ assert "## template: jinja" not in orig_etc_cloud
+
+ new_etc_cloud = (
+ "## template: jinja\n\n"
+ f"{orig_etc_cloud}\n\n"
+ "runcmd:\n"
+ " - echo {{v1.local_hostname}} > /var/tmp/runcmd_output\n"
+ )
+ client.write_to_file("/etc/cloud/cloud.cfg", new_etc_cloud)
+
+ new_cloud_part = (
+ "## template: jinja\n"
+ "bootcmd:\n"
+ " - echo {{merged_cfg._doc}} > /var/tmp/bootcmd_output\n"
+ )
+ client.write_to_file(
+ "/etc/cloud/cloud.cfg.d/50-jinja-test.cfg", new_cloud_part
+ )
+
+ cloud_part_no_jinja = "final_message: final hi {{v1.local_hostname}}"
+ client.write_to_file(
+ "/etc/cloud/cloud.cfg.d/70-no-jinja-test.cfg", cloud_part_no_jinja
+ )
+
+ client.execute("cloud-init clean --logs")
+ client.restart()
+
+ log = client.read_from_file("/var/log/cloud-init.log")
+ verify_clean_log(log)
+
+ # Ensure /etc/cloud/cloud.cfg template works as expected
+ hostname = client.execute("hostname").stdout.strip()
+ assert client.read_from_file("/var/tmp/runcmd_output").strip() == hostname
+
+ # Ensure /etc/cloud/cloud.cfg.d template works as expected
+ assert (
+ client.read_from_file("/var/tmp/bootcmd_output").strip()
+ == MERGED_CFG_DOC
+ )
+
+ # Ensure a file without '## template: jinja' isn't interpreted as jinja
+ assert "final hi {{v1.local_hostname}}" in log
+
+
+def test_invalid_etc_cloud_substitution(client: IntegrationInstance):
+ no_var_part = (
+ "## template: jinja\n"
+ "runcmd:\n"
+ " - echo {{bad}} > /var/tmp/runcmd_bad\n"
+ " - echo {{v1.local_hostname}} > /var/tmp/runcmd_output\n"
+ "final_message: final hi {{v1.local_hostname}}"
+ )
+ client.write_to_file("/etc/cloud/cloud.cfg.d/50-no-var.cfg", no_var_part)
+
+ normal_part = "bootcmd:\n" " - echo hi > /var/tmp/bootcmd_output\n"
+ client.write_to_file("/etc/cloud/cloud.cfg.d/60-normal.cfg", normal_part)
+
+ client.execute("cloud-init clean --logs")
+ client.restart()
+
+ log = client.read_from_file("/var/log/cloud-init.log")
+
+ # Ensure we get warning from invalid jinja var
+ assert (
+ "jinja_template.py[WARNING]: Could not render jinja template "
+ "variables in file '/etc/cloud/cloud.cfg.d/50-no-var.cfg': "
+ "'bad'"
+ ) in log
+
+ # Ensure the file was still processed with invalid var
+ assert (
+ client.read_from_file("/var/tmp/runcmd_bad").strip()
+ == "CI_MISSING_JINJA_VAR/bad"
+ )
+ hostname = client.execute("hostname").stdout.strip()
+ assert client.read_from_file("/var/tmp/runcmd_output").strip() == hostname
+ assert f"final hi {hostname}" in log
+
+ # Ensure other files continue to load correctly
+ assert client.read_from_file("/var/tmp/bootcmd_output").strip() == "hi"
diff --git a/tests/integration_tests/modules/test_lxd.py b/tests/integration_tests/modules/test_lxd.py
index 3443b74a..55d82a54 100644
--- a/tests/integration_tests/modules/test_lxd.py
+++ b/tests/integration_tests/modules/test_lxd.py
@@ -8,6 +8,8 @@ import warnings
import pytest
import yaml
+from tests.integration_tests.clouds import ImageSpecification, IntegrationCloud
+from tests.integration_tests.instances import IntegrationInstance
from tests.integration_tests.util import verify_clean_log
BRIDGE_USER_DATA = """\
@@ -34,6 +36,119 @@ lxd:
storage_backend: {}
"""
+PRESEED_USER_DATA = """\
+#cloud-config
+lxd:
+ preseed: |
+ config: {}
+ networks:
+ - config:
+ ipv4.address: auto
+ ipv6.address: auto
+ description: ""
+ managed: false
+ name: lxdbr0
+ type: ""
+ storage_pools:
+ - config:
+ source: /var/snap/lxd/common/lxd/storage-pools/default
+ description: ""
+ name: default
+ driver: dir
+ profiles:
+ - config: {}
+ description: ""
+ devices:
+ eth0:
+ name: eth0
+ nictype: bridged
+ parent: lxdbr0
+ type: nic
+ root:
+ path: /
+ pool: default
+ type: disk
+ name: default
+ cluster: null
+"""
+
+
+STORAGE_PRESEED_USER_DATA = """\
+#cloud-config
+lxd:
+ preseed: |
+ networks:
+ - config:
+ ipv4.address: 10.42.42.1/24
+ ipv4.nat: "true"
+ ipv6.address: fd42:4242:4242:4242::1/64
+ ipv6.nat: "true"
+ description: ""
+ name: lxdbr0
+ type: bridge
+ project: default
+ storage_pools:
+ - config:
+ size: 5GiB
+ source: /var/snap/lxd/common/lxd/disks/default.img
+ description: ""
+ name: default
+ driver: {driver}
+ profiles:
+ - config: {{ }}
+ description: Default LXD profile
+ devices:
+ eth0:
+ {nictype}
+ {parent}
+ {network}
+ type: nic
+ root:
+ path: /
+ pool: default
+ type: disk
+ name: default
+ - config:
+ user.vendor-data: |
+ #cloud-config
+ write_files:
+ - path: /var/lib/cloud/scripts/per-once/setup-lxc.sh
+ encoding: b64
+ permissions: '0755'
+ owner: root:root
+ content: |
+ IyEvYmluL2Jhc2gKZWNobyBZRVAgPj4gL3Zhci9sb2cvY2xvdWQtaW5pdC5sb2cK
+ devices:
+ config:
+ source: cloud-init:config
+ type: disk
+ eth0:
+ name: eth0
+ network: lxdbr0
+ type: nic
+ root:
+ path: /
+ pool: default
+ type: disk
+ description: Pycloudlib LXD profile for bionic VMs
+ name: bionic-vm-lxc-setup
+ projects:
+ - config:
+ features.images: "true"
+ features.networks: "true"
+ features.profiles: "true"
+ features.storage.volumes: "true"
+ description: Default LXD project
+ name: default
+ - config:
+ features.images: "false"
+ features.networks: "true"
+ features.profiles: "false"
+ features.storage.volumes: "true"
+ description: Limited project
+ name: limited
+"""
+
@pytest.mark.no_container
@pytest.mark.user_data(BRIDGE_USER_DATA)
@@ -62,6 +177,52 @@ def validate_storage(validate_client, pkg_name, command):
return log
+def validate_preseed_profiles(client, preseed_cfg):
+ for src_profile in preseed_cfg["profiles"]:
+ profile = yaml.safe_load(
+ client.execute(f"lxc profile show {src_profile['name']}")
+ )
+ assert src_profile["config"] == profile["config"]
+
+
+def validate_preseed_storage_pools(client, preseed_cfg):
+ for src_storage in preseed_cfg["storage_pools"]:
+ storage_pool = yaml.safe_load(
+ client.execute(f"lxc storage show {src_storage['name']}")
+ )
+ if "volatile.initial_source" in storage_pool["config"]:
+ assert storage_pool["config"]["source"] == storage_pool[
+ "config"
+ ].pop("volatile.initial_source")
+ if storage_pool["driver"] == "zfs":
+ "default" == storage_pool["config"].pop("zfs.pool_name")
+ assert storage_pool["config"] == src_storage["config"]
+ assert storage_pool["driver"] == src_storage["driver"]
+
+
+def validate_preseed_projects(client: IntegrationInstance, preseed_cfg):
+ # Support for projects by lxd init --preseed was added in lxd 4.12
+ # https://discuss.linuxcontainers.org/t/lxd-4-12-has-been-released/10424#projects-now-supported-by-lxd-init-dump-and-preseed-9
+ if ImageSpecification.from_os_image().release in ("bionic", "focal"):
+ return
+ for src_project in preseed_cfg.get("projects", []):
+ proj_name = src_project["name"]
+ proj_result = client.execute(f"lxc project show {proj_name}")
+ assert (
+ proj_result.ok
+ ), f"Error showing `{proj_name}` project info: {proj_result.stderr}"
+ project = yaml.safe_load(
+ client.execute(f"lxc project show {src_project['name']}")
+ )
+ project.pop("used_by", None)
+
+ # `features.storage.buckets` was introduced in lxd 5.5 . More info:
+ # https://discuss.linuxcontainers.org/t/lxd-5-5-has-been-released/14899
+ if "features.storage.buckets" in project["config"]:
+ assert "true" == project["config"].pop("features.storage.buckets")
+ assert project == src_project
+
+
@pytest.mark.no_container
@pytest.mark.user_data(STORAGE_USER_DATA.format("btrfs"))
def test_storage_btrfs(client):
@@ -69,6 +230,30 @@ def test_storage_btrfs(client):
@pytest.mark.no_container
+@pytest.mark.not_bionic
+def test_storage_preseed_btrfs(setup_image, session_cloud: IntegrationCloud):
+ cfg_image_spec = ImageSpecification.from_os_image()
+ if cfg_image_spec.release in ("bionic",):
+ nictype = "nictype: bridged"
+ parent = "parent: lxdbr0"
+ network = ""
+ else:
+ nictype = ""
+ parent = ""
+ network = "network: lxdbr0"
+ user_data = STORAGE_PRESEED_USER_DATA.format(
+ driver="btrfs", nictype=nictype, parent=parent, network=network
+ )
+ with session_cloud.launch(user_data=user_data) as client:
+ validate_storage(client, "btrfs-progs", "mkfs.btrfs")
+ src_cfg = yaml.safe_load(user_data)
+ preseed_cfg = yaml.safe_load(src_cfg["lxd"]["preseed"])
+ validate_preseed_profiles(client, preseed_cfg)
+ validate_preseed_storage_pools(client, preseed_cfg)
+ validate_preseed_projects(client, preseed_cfg)
+
+
+@pytest.mark.no_container
@pytest.mark.user_data(STORAGE_USER_DATA.format("lvm"))
def test_storage_lvm(client):
log = client.read_from_file("/var/log/cloud-init.log")
@@ -82,7 +267,42 @@ def test_storage_lvm(client):
validate_storage(client, "lvm2", "lvcreate")
+@pytest.mark.user_data(PRESEED_USER_DATA)
+def test_basic_preseed(client):
+ preseed_cfg = yaml.safe_load(PRESEED_USER_DATA)["lxd"]["preseed"]
+ preseed_cfg = yaml.safe_load(preseed_cfg)
+ cloud_init_log = client.read_from_file("/var/log/cloud-init.log")
+ verify_clean_log(cloud_init_log)
+ validate_preseed_profiles(client, preseed_cfg)
+ validate_preseed_storage_pools(client, preseed_cfg)
+ validate_preseed_projects(client, preseed_cfg)
+
+
@pytest.mark.no_container
@pytest.mark.user_data(STORAGE_USER_DATA.format("zfs"))
def test_storage_zfs(client):
validate_storage(client, "zfsutils-linux", "zpool")
+
+
+@pytest.mark.no_container
+@pytest.mark.not_bionic
+def test_storage_preseed_zfs(setup_image, session_cloud: IntegrationCloud):
+ cfg_image_spec = ImageSpecification.from_os_image()
+ if cfg_image_spec.release in ("bionic",):
+ nictype = "nictype: bridged"
+ parent = "parent: lxdbr0"
+ network = ""
+ else:
+ nictype = ""
+ parent = ""
+ network = "network: lxdbr0"
+ user_data = STORAGE_PRESEED_USER_DATA.format(
+ driver="zfs", nictype=nictype, parent=parent, network=network
+ )
+ with session_cloud.launch(user_data=user_data) as client:
+ validate_storage(client, "zfsutils-linux", "zpool")
+ src_cfg = yaml.safe_load(user_data)
+ preseed_cfg = yaml.safe_load(src_cfg["lxd"]["preseed"])
+ validate_preseed_profiles(client, preseed_cfg)
+ validate_preseed_storage_pools(client, preseed_cfg)
+ validate_preseed_projects(client, preseed_cfg)
diff --git a/tests/integration_tests/modules/test_ubuntu_advantage.py b/tests/integration_tests/modules/test_ubuntu_advantage.py
new file mode 100644
index 00000000..547ec9e7
--- /dev/null
+++ b/tests/integration_tests/modules/test_ubuntu_advantage.py
@@ -0,0 +1,223 @@
+import json
+import logging
+import os
+
+import pytest
+from pycloudlib.cloud import ImageType
+
+from tests.integration_tests.clouds import ImageSpecification, IntegrationCloud
+from tests.integration_tests.conftest import get_validated_source
+from tests.integration_tests.instances import (
+ CloudInitSource,
+ IntegrationInstance,
+)
+from tests.integration_tests.util import verify_clean_log
+
+LOG = logging.getLogger("integration_testing.test_ubuntu_advantage")
+
+CLOUD_INIT_UA_TOKEN = os.environ.get("CLOUD_INIT_UA_TOKEN")
+
+ATTACH_FALLBACK = """\
+#cloud-config
+ubuntu_advantage:
+ features:
+ disable_auto_attach: true
+ token: {token}
+"""
+
+ATTACH = """\
+#cloud-config
+ubuntu_advantage:
+ token: {token}
+ enable:
+ - esm-infra
+"""
+
+PRO_AUTO_ATTACH_DISABLED = """\
+#cloud-config
+ubuntu_advantage:
+ features:
+ disable_auto_attach: true
+"""
+
+PRO_DAEMON_DISABLED = """\
+#cloud-config
+# Disable UA daemon (only needed in GCE)
+ubuntu_advantage:
+ features:
+ disable_auto_attach: true
+bootcmd:
+- sudo systemctl mask ubuntu-advantage.service
+"""
+
+AUTO_ATTACH_CUSTOM_SERVICES = """\
+#cloud-config
+ubuntu_advantage:
+ enable:
+ - livepatch
+"""
+
+
+def did_ua_service_noop(client: IntegrationInstance) -> bool:
+ ua_log = client.read_from_file("/var/log/ubuntu-advantage.log")
+ return (
+ "Skipping auto-attach and deferring to cloud-init to setup and"
+ " configure auto-attach" in ua_log
+ )
+
+
+def is_attached(client: IntegrationInstance) -> bool:
+ status_resp = client.execute("sudo pro status --format json")
+ assert status_resp.ok
+ status = json.loads(status_resp.stdout)
+ return bool(status.get("attached"))
+
+
+def get_services_status(client: IntegrationInstance) -> dict:
+ """Creates a map of service -> is_enable.
+
+ pro status --format json contains a key with list of service objects like:
+
+ {
+ ...
+ "services":[
+ {
+ "available":"yes",
+ "blocked_by":[
+
+ ],
+ "description":"Common Criteria EAL2 Provisioning Packages",
+ "description_override":null,
+ "entitled":"yes",
+ "name":"cc-eal",
+ "status":"disabled",
+ "status_details":"CC EAL2 is not configured"
+ },
+ ...
+ ]
+ }
+
+ :return: Dict where the keys are ua service names and the values
+ are booleans representing if the service is enable or not.
+ """
+ status_resp = client.execute("sudo pro status --format json")
+ assert status_resp.ok
+ status = json.loads(status_resp.stdout)
+ return {
+ svc["name"]: svc["status"] == "enabled" for svc in status["services"]
+ }
+
+
+@pytest.mark.adhoc
+@pytest.mark.ubuntu
+@pytest.mark.skipif(
+ not CLOUD_INIT_UA_TOKEN, reason="CLOUD_INIT_UA_TOKEN env var not provided"
+)
+class TestUbuntuAdvantage:
+ @pytest.mark.user_data(ATTACH_FALLBACK.format(token=CLOUD_INIT_UA_TOKEN))
+ def test_valid_token(self, client: IntegrationInstance):
+ log = client.read_from_file("/var/log/cloud-init.log")
+ verify_clean_log(log)
+ assert is_attached(client)
+
+ @pytest.mark.user_data(ATTACH.format(token=CLOUD_INIT_UA_TOKEN))
+ def test_idempotency(self, client: IntegrationInstance):
+ log = client.read_from_file("/var/log/cloud-init.log")
+ verify_clean_log(log)
+ assert is_attached(client)
+
+ # Clean reboot to change instance-id and trigger cc_ua in next boot
+ assert client.execute("cloud-init clean --logs").ok
+ client.restart()
+
+ log = client.read_from_file("/var/log/cloud-init.log")
+ verify_clean_log(log)
+ assert is_attached(client)
+
+
+def maybe_install_cloud_init(session_cloud: IntegrationCloud):
+ cfg_image_spec = ImageSpecification.from_os_image()
+ source = get_validated_source(session_cloud)
+
+ launch_kwargs = {
+ "image_id": session_cloud.cloud_instance.daily_image(
+ cfg_image_spec.image_id, image_type=ImageType.PRO
+ )
+ }
+
+ if source is CloudInitSource.NONE:
+ LOG.info(
+ "No need to customize cloud-init version. Return without spawning"
+ " an extra instance"
+ )
+ return launch_kwargs
+
+ user_data = (
+ PRO_DAEMON_DISABLED
+ if session_cloud.settings.PLATFORM == "gce"
+ else PRO_AUTO_ATTACH_DISABLED
+ )
+
+ with session_cloud.launch(
+ user_data=user_data,
+ launch_kwargs=launch_kwargs,
+ ) as client:
+ # TODO: Re-enable this check after cloud images contain
+ # cloud-init 23.4.
+ # Explanation: We have to include something under
+ # user-data.ubuntu_advantage to skip the automatic auto-attach
+ # (driven by ua-auto-attach.service and/or ubuntu-advantage.service)
+ # while customizing the instance but in cloud-init < 23.4,
+ # user-data.ubuntu_advantage requires a token key.
+
+ # log = client.read_from_file("/var/log/cloud-init.log")
+ # verify_clean_log(log)
+
+ assert not is_attached(
+ client
+ ), "Test precondition error. Instance is auto-attached."
+
+ if session_cloud.settings.PLATFORM == "gce":
+ LOG.info(
+ "Restore `ubuntu-advantage.service` original status for next"
+ " boot"
+ )
+ assert client.execute(
+ "sudo systemctl unmask ubuntu-advantage.service"
+ ).ok
+
+ client.install_new_cloud_init(source)
+ client.destroy()
+
+ return {"image_id": session_cloud.snapshot_id}
+
+
+@pytest.mark.azure
+@pytest.mark.ec2
+@pytest.mark.gce
+@pytest.mark.ubuntu
+class TestUbuntuAdvantagePro:
+ def test_custom_services(self, session_cloud: IntegrationCloud):
+ release = ImageSpecification.from_os_image().release
+ if release not in {"bionic", "focal", "jammy"}:
+ pytest.skip(f"Cannot run on non LTS release: {release}")
+
+ launch_kwargs = maybe_install_cloud_init(session_cloud)
+ with session_cloud.launch(
+ user_data=AUTO_ATTACH_CUSTOM_SERVICES,
+ launch_kwargs=launch_kwargs,
+ ) as client:
+ log = client.read_from_file("/var/log/cloud-init.log")
+ verify_clean_log(log)
+ assert did_ua_service_noop(client)
+ assert is_attached(client)
+ services_status = get_services_status(client)
+ assert services_status.pop(
+ "livepatch"
+ ), "livepatch expected to be enabled"
+ enabled_services = {
+ svc for svc, status in services_status.items() if status
+ }
+ assert (
+ not enabled_services
+ ), f"Only livepatch must be enabled. Found: {enabled_services}"
diff --git a/tests/integration_tests/reporting/test_webhook_reporting.py b/tests/integration_tests/reporting/test_webhook_reporting.py
new file mode 100644
index 00000000..9eb720c9
--- /dev/null
+++ b/tests/integration_tests/reporting/test_webhook_reporting.py
@@ -0,0 +1,66 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+"""Tests for testing reporting and event handling."""
+
+import json
+
+import pytest
+
+from tests.integration_tests.instances import IntegrationInstance
+from tests.integration_tests.util import ASSETS_DIR, verify_clean_log
+
+URL = "http://127.0.0.1:55555"
+
+USER_DATA = f"""\
+#cloud-config
+reporting:
+ webserver:
+ type: webhook
+ endpoint: "{URL}"
+ timeout: 1
+ retries: 1
+
+"""
+
+
+@pytest.mark.user_data(USER_DATA)
+def test_webhook_reporting(client: IntegrationInstance):
+ """Test when using webhook reporting that we get expected events.
+
+ This test setups a simple echo server that prints out POST data out to
+ a file. Ensure that that file contains all of the expected events.
+ """
+ client.push_file(ASSETS_DIR / "echo_server.py", "/var/tmp/echo_server.py")
+ client.push_file(
+ ASSETS_DIR / "echo_server.service",
+ "/etc/systemd/system/echo_server.service",
+ )
+ client.execute("cloud-init clean --logs")
+ client.execute("systemctl start echo_server.service")
+ # Run through our standard process here. This remove any uncertainty
+ # around messages transmitting during pre-network boot.
+ client.execute(
+ "cloud-init init --local; "
+ "cloud-init init; "
+ "cloud-init modules --mode=config; "
+ "cloud-init modules --mode=final; "
+ "cloud-init status --wait"
+ )
+ verify_clean_log(client.read_from_file("/var/log/cloud-init.log"))
+
+ server_output = client.read_from_file(
+ "/var/tmp/echo_server_output"
+ ).splitlines()
+ events = [json.loads(line) for line in server_output]
+
+ # Only time this should be less is if we remove modules
+ assert len(events) > 58, events
+
+ # Assert our first and last expected messages exist
+ ds_events = [
+ e for e in events if e["name"] == "init-network/activate-datasource"
+ ]
+ assert len(ds_events) == 2 # 1 for start, 1 for stop
+
+ final_events = [e for e in events if e["name"] == "modules-final"]
+ assert final_events # 1 for stop and ignore LP: #1992711 for now
diff --git a/tests/integration_tests/test_paths.py b/tests/integration_tests/test_paths.py
index 20392e35..14513c82 100644
--- a/tests/integration_tests/test_paths.py
+++ b/tests/integration_tests/test_paths.py
@@ -1,8 +1,14 @@
+import os
import re
+from datetime import datetime
from typing import Iterator
import pytest
+from cloudinit.cmd.devel.logs import (
+ INSTALLER_APPORT_FILES,
+ INSTALLER_APPORT_SENSITIVE_FILES,
+)
from tests.integration_tests.instances import IntegrationInstance
from tests.integration_tests.util import verify_clean_log
@@ -43,12 +49,62 @@ class TestHonorCloudDir:
assert f"{NEW_CLOUD_DIR}/instance/user-data.txt" in re.sub(
r"\s+", "", help_result.stdout
), "user-data file not correctly render in collect-logs -h"
+
+ # Touch a couple of subiquity files to assert collected
+ installer_files = (
+ INSTALLER_APPORT_FILES[-1],
+ INSTALLER_APPORT_SENSITIVE_FILES[-1],
+ )
+
+ for apport_file in installer_files:
+ custom_client.execute(
+ f"mkdir -p {os.path.dirname(apport_file.path)}"
+ )
+ custom_client.execute(f"touch {apport_file.path}")
+
collect_logs_result = custom_client.execute(
"cloud-init collect-logs --include-userdata"
)
assert (
collect_logs_result.ok
), f"collect-logs error: {collect_logs_result.stderr}"
+ found_logs = custom_client.execute(
+ "tar -tf cloud-init.tar.gz"
+ ).stdout.splitlines()
+ dirname = datetime.utcnow().date().strftime("cloud-init-logs-%Y-%m-%d")
+ expected_logs = [
+ f"{dirname}/",
+ f"{dirname}/cloud-init.log",
+ f"{dirname}/cloud-init-output.log",
+ f"{dirname}/dmesg.txt",
+ f"{dirname}/user-data.txt",
+ f"{dirname}/version",
+ f"{dirname}/dpkg-version",
+ f"{dirname}/journal.txt",
+ f"{dirname}/run/",
+ f"{dirname}/run/cloud-init/",
+ f"{dirname}/run/cloud-init/result.json",
+ f"{dirname}/run/cloud-init/.instance-id",
+ f"{dirname}/run/cloud-init/cloud-init-generator.log",
+ f"{dirname}/run/cloud-init/enabled",
+ f"{dirname}/run/cloud-init/cloud-id",
+ f"{dirname}/run/cloud-init/instance-data.json",
+ f"{dirname}/run/cloud-init/instance-data-sensitive.json",
+ f"{dirname}{installer_files[0].path}",
+ f"{dirname}{installer_files[1].path}",
+ ]
+ for log in expected_logs:
+ assert log in found_logs
+ # Assert disabled cloud-init collect-logs grabs /var/lib/cloud/data
+ custom_client.execute("touch /run/cloud-init/disabled")
+ assert custom_client.execute(
+ "cloud-init collect-logs --include-userdata"
+ ).ok
+ found_logs = custom_client.execute(
+ "tar -tf cloud-init.tar.gz"
+ ).stdout.splitlines()
+ dirname = datetime.utcnow().date().strftime("cloud-init-logs-%Y-%m-%d")
+ assert f"{dirname}/new-cloud-dir/data/result.json" in found_logs
# LXD inserts some agent setup code into VMs on Bionic under
# /var/lib/cloud. The inserted script will cause this test to fail
diff --git a/tests/integration_tests/test_upgrade.py b/tests/integration_tests/test_upgrade.py
index b13d4703..5ef82e88 100644
--- a/tests/integration_tests/test_upgrade.py
+++ b/tests/integration_tests/test_upgrade.py
@@ -95,7 +95,12 @@ def test_clean_boot_of_upgraded_package(session_cloud: IntegrationCloud):
# have broken across re-constitution of a cached datasource. Some
# platforms invalidate their datasource cache on reboot, so we run
# it here to ensure we get a dirty run.
- assert instance.execute("cloud-init init").ok
+ assert instance.execute(
+ "cloud-init init --local; "
+ "cloud-init init; "
+ "cloud-init modules --mode=config; "
+ "cloud-init modules --mode=final"
+ ).ok
# Reboot
instance.execute("hostname something-else")
@@ -185,4 +190,6 @@ def test_subsequent_boot_of_upgraded_package(session_cloud: IntegrationCloud):
source, take_snapshot=False, clean=False
)
instance.restart()
+ log = instance.read_from_file("/var/log/cloud-init.log")
+ verify_clean_log(log)
assert instance.execute("cloud-init status --wait --long").ok
diff --git a/tests/integration_tests/util.py b/tests/integration_tests/util.py
index 7eec3a4a..69214e9f 100644
--- a/tests/integration_tests/util.py
+++ b/tests/integration_tests/util.py
@@ -5,12 +5,14 @@ import re
import time
from collections import namedtuple
from contextlib import contextmanager
+from functools import lru_cache
from itertools import chain
from pathlib import Path
from typing import Set
import pytest
+from cloudinit.subp import subp
from tests.integration_tests.instances import IntegrationInstance
log = logging.getLogger("integration_testing")
@@ -47,6 +49,12 @@ def verify_clean_log(log: str, ignore_deprecations: bool = True):
)
log = "\n".join(log_lines)
+ error_logs = re.findall("CRITICAL.*", log) + re.findall("ERROR.*", log)
+ if error_logs:
+ raise AssertionError(
+ "Found unexpected errors: %s" % "\n".join(error_logs)
+ )
+
warning_count = log.count("WARN")
expected_warnings = 0
traceback_count = log.count("Traceback")
@@ -155,3 +163,12 @@ def get_console_log(client: IntegrationInstance):
if console_log.lower().startswith("no console output"):
pytest.fail("no console output")
return console_log
+
+
+@lru_cache()
+def lxd_has_nocloud(client: IntegrationInstance) -> bool:
+ # Bionic or Focal may be detected as NoCloud rather than LXD
+ lxd_image_metadata = subp(
+ ["lxc", "config", "metadata", "show", client.instance.name]
+ )
+ return "/var/lib/cloud/seed/nocloud" in lxd_image_metadata.stdout
diff --git a/tests/unittests/analyze/test_boot.py b/tests/unittests/analyze/test_boot.py
index 261f4c4e..b17a2f16 100644
--- a/tests/unittests/analyze/test_boot.py
+++ b/tests/unittests/analyze/test_boot.py
@@ -1,5 +1,7 @@
import os
+import pytest
+
from cloudinit.analyze.__main__ import analyze_boot, get_parser
from cloudinit.analyze.show import (
CONTAINER_CODE,
@@ -25,29 +27,33 @@ class TestDistroChecker(CiTestCase):
self.assertEqual(err_code, dist_check_timestamp())
-class TestSystemCtlReader(CiTestCase):
- def test_systemctl_invalid_property(self):
- reader = SystemctlReader("dummyProperty")
- with self.assertRaises(RuntimeError):
- reader.parse_epoch_as_float()
-
- def test_systemctl_invalid_parameter(self):
- reader = SystemctlReader("dummyProperty", "dummyParameter")
- with self.assertRaises(RuntimeError):
+class TestSystemCtlReader:
+ @pytest.mark.parametrize(
+ "args",
+ [
+ pytest.param(["dummyProperty"], id="invalid_property"),
+ pytest.param(
+ ["dummyProperty", "dummyParameter"], id="invalid_parameter"
+ ),
+ ],
+ )
+ def test_systemctl_invalid(self, args):
+ reader = SystemctlReader(*args)
+ with pytest.raises(RuntimeError):
reader.parse_epoch_as_float()
@mock.patch("cloudinit.subp.subp", return_value=("U=1000000", None))
def test_systemctl_works_correctly_threshold(self, m_subp):
reader = SystemctlReader("dummyProperty", "dummyParameter")
- self.assertEqual(1.0, reader.parse_epoch_as_float())
+ assert 1.0 == reader.parse_epoch_as_float()
thresh = 1.0 - reader.parse_epoch_as_float()
- self.assertTrue(thresh < 1e-6)
- self.assertTrue(thresh > (-1 * 1e-6))
+ assert thresh < 1e-6
+ assert thresh > (-1 * 1e-6)
@mock.patch("cloudinit.subp.subp", return_value=("U=0", None))
def test_systemctl_succeed_zero(self, m_subp):
reader = SystemctlReader("dummyProperty", "dummyParameter")
- self.assertEqual(0.0, reader.parse_epoch_as_float())
+ assert 0.0 == reader.parse_epoch_as_float()
@mock.patch("cloudinit.subp.subp", return_value=("U=1", None))
def test_systemctl_succeed_distinct(self, m_subp):
@@ -56,22 +62,28 @@ class TestSystemCtlReader(CiTestCase):
m_subp.return_value = ("U=2", None)
reader2 = SystemctlReader("dummyProperty", "dummyParameter")
val2 = reader2.parse_epoch_as_float()
- self.assertNotEqual(val1, val2)
-
- @mock.patch("cloudinit.subp.subp", return_value=("100", None))
- def test_systemctl_epoch_not_splittable(self, m_subp):
- reader = SystemctlReader("dummyProperty", "dummyParameter")
- with self.assertRaises(IndexError):
- reader.parse_epoch_as_float()
-
- @mock.patch("cloudinit.subp.subp", return_value=("U=foobar", None))
- def test_systemctl_cannot_convert_epoch_to_float(self, m_subp):
+ assert val1 != val2
+
+ @pytest.mark.parametrize(
+ "return_value, exception",
+ [
+ pytest.param(("100", None), IndexError, id="epoch_not_splittable"),
+ pytest.param(
+ ("U=foobar", None),
+ ValueError,
+ id="cannot_convert_epoch_to_float",
+ ),
+ ],
+ )
+ @mock.patch("cloudinit.subp.subp")
+ def test_systemctl_epoch_not_error(self, m_subp, return_value, exception):
+ m_subp.return_value = return_value
reader = SystemctlReader("dummyProperty", "dummyParameter")
- with self.assertRaises(ValueError):
+ with pytest.raises(exception):
reader.parse_epoch_as_float()
-class TestAnalyzeBoot(CiTestCase):
+class TestAnalyzeBoot:
def set_up_dummy_file_ci(self, path, log_path):
infh = open(path, "w+")
infh.write(
@@ -124,7 +136,7 @@ class TestAnalyzeBoot(CiTestCase):
)
self.remove_dummy_file(path, log_path)
- self.assertEqual(err_string, data)
+ assert err_string == data
@mock.patch("cloudinit.util.is_container", return_value=True)
@mock.patch("cloudinit.subp.subp", return_value=("U=1000000", None))
@@ -141,7 +153,7 @@ class TestAnalyzeBoot(CiTestCase):
finish_code = analyze_boot(name_default, args)
self.remove_dummy_file(path, log_path)
- self.assertEqual(FAIL_CODE, finish_code)
+ assert FAIL_CODE == finish_code
@mock.patch("cloudinit.util.is_container", return_value=True)
@mock.patch("cloudinit.subp.subp", return_value=("U=1000000", None))
@@ -171,4 +183,4 @@ class TestAnalyzeBoot(CiTestCase):
finish_code = analyze_boot(name_default, args)
self.remove_dummy_file(path, log_path)
- self.assertEqual(CONTAINER_CODE, finish_code)
+ assert CONTAINER_CODE == finish_code
diff --git a/tests/unittests/analyze/test_dump.py b/tests/unittests/analyze/test_dump.py
index 1b4ce820..04c896e2 100644
--- a/tests/unittests/analyze/test_dump.py
+++ b/tests/unittests/analyze/test_dump.py
@@ -3,6 +3,8 @@
from datetime import datetime
from textwrap import dedent
+import pytest
+
from cloudinit.analyze.dump import (
dump_events,
parse_ci_logline,
@@ -10,18 +12,16 @@ from cloudinit.analyze.dump import (
)
from cloudinit.subp import which
from cloudinit.util import write_file
-from tests.unittests.helpers import CiTestCase, mock, skipIf
+from tests.unittests.helpers import mock, skipIf
-class TestParseTimestamp(CiTestCase):
+class TestParseTimestamp:
def test_parse_timestamp_handles_cloud_init_default_format(self):
"""Logs with cloud-init detailed formats will be properly parsed."""
trusty_fmt = "%Y-%m-%d %H:%M:%S,%f"
trusty_stamp = "2016-09-12 14:39:20,839"
dt = datetime.strptime(trusty_stamp, trusty_fmt)
- self.assertEqual(
- float(dt.strftime("%s.%f")), parse_timestamp(trusty_stamp)
- )
+ assert float(dt.strftime("%s.%f")) == parse_timestamp(trusty_stamp)
def test_parse_timestamp_handles_syslog_adding_year(self):
"""Syslog timestamps lack a year. Add year and properly parse."""
@@ -31,9 +31,7 @@ class TestParseTimestamp(CiTestCase):
# convert stamp ourselves by adding the missing year value
year = datetime.now().year
dt = datetime.strptime(syslog_stamp + " " + str(year), syslog_fmt)
- self.assertEqual(
- float(dt.strftime("%s.%f")), parse_timestamp(syslog_stamp)
- )
+ assert float(dt.strftime("%s.%f")) == parse_timestamp(syslog_stamp)
def test_parse_timestamp_handles_journalctl_format_adding_year(self):
"""Journalctl precise timestamps lack a year. Add year and parse."""
@@ -43,11 +41,10 @@ class TestParseTimestamp(CiTestCase):
# convert stamp ourselves by adding the missing year value
year = datetime.now().year
dt = datetime.strptime(journal_stamp + " " + str(year), journal_fmt)
- self.assertEqual(
- float(dt.strftime("%s.%f")), parse_timestamp(journal_stamp)
- )
+ assert float(dt.strftime("%s.%f")) == parse_timestamp(journal_stamp)
@skipIf(not which("date"), "'date' command not available.")
+ @pytest.mark.allow_subp_for("date")
def test_parse_unexpected_timestamp_format_with_date_command(self):
"""Dump sends unexpected timestamp formats to date for processing."""
new_fmt = "%H:%M %m/%d %Y"
@@ -56,14 +53,10 @@ class TestParseTimestamp(CiTestCase):
year = datetime.now().year
dt = datetime.strptime(new_stamp + " " + str(year), new_fmt)
- # use date(1)
- with self.allow_subp(["date"]):
- self.assertEqual(
- float(dt.strftime("%s.%f")), parse_timestamp(new_stamp)
- )
+ assert float(dt.strftime("%s.%f")) == parse_timestamp(new_stamp)
-class TestParseCILogLine(CiTestCase):
+class TestParseCILogLine:
def test_parse_logline_returns_none_without_separators(self):
"""When no separators are found, parse_ci_logline returns None."""
expected_parse_ignores = [
@@ -74,7 +67,7 @@ class TestParseCILogLine(CiTestCase):
"CLOUDINIT",
]
for parse_ignores in expected_parse_ignores:
- self.assertIsNone(parse_ci_logline(parse_ignores))
+ assert None is parse_ci_logline(parse_ignores)
def test_parse_logline_returns_event_for_cloud_init_logs(self):
"""parse_ci_logline returns an event parse from cloud-init format."""
@@ -94,7 +87,7 @@ class TestParseCILogLine(CiTestCase):
"origin": "cloudinit",
"timestamp": timestamp,
}
- self.assertEqual(expected, parse_ci_logline(line))
+ assert expected == parse_ci_logline(line)
def test_parse_logline_returns_event_for_journalctl_logs(self):
"""parse_ci_logline returns an event parse from journalctl format."""
@@ -115,7 +108,7 @@ class TestParseCILogLine(CiTestCase):
"origin": "cloudinit",
"timestamp": timestamp,
}
- self.assertEqual(expected, parse_ci_logline(line))
+ assert expected == parse_ci_logline(line)
@mock.patch("cloudinit.analyze.dump.parse_timestamp_from_date")
def test_parse_logline_returns_event_for_finish_events(
@@ -136,7 +129,7 @@ class TestParseCILogLine(CiTestCase):
"timestamp": 1472594005.972,
}
m_parse_from_date.return_value = "1472594005.972"
- self.assertEqual(expected, parse_ci_logline(line))
+ assert expected == parse_ci_logline(line)
m_parse_from_date.assert_has_calls(
[mock.call("2016-08-30 21:53:25.972325+00:00")]
)
@@ -158,7 +151,7 @@ class TestParseCILogLine(CiTestCase):
"origin": "cloudinit",
"timestamp": timestamp_dt.timestamp(),
}
- self.assertEqual(expected, parse_ci_logline(line))
+ assert expected == parse_ci_logline(line)
SAMPLE_LOGS = dedent(
@@ -172,7 +165,7 @@ Nov 03 06:51:06.074410 x2 cloud-init[106]: [CLOUDINIT] util.py[DEBUG]:\
)
-class TestDumpEvents(CiTestCase):
+class TestDumpEvents:
maxDiff = None
@mock.patch("cloudinit.analyze.dump.parse_timestamp_from_date")
@@ -181,11 +174,10 @@ class TestDumpEvents(CiTestCase):
m_parse_from_date.return_value = "1472594005.972"
events, data = dump_events(rawdata=SAMPLE_LOGS)
expected_data = SAMPLE_LOGS.splitlines()
- self.assertEqual(
- [mock.call("2016-08-30 21:53:25.972325+00:00")],
- m_parse_from_date.call_args_list,
- )
- self.assertEqual(expected_data, data)
+ assert [
+ mock.call("2016-08-30 21:53:25.972325+00:00")
+ ] == m_parse_from_date.call_args_list
+ assert expected_data == data
year = datetime.now().year
dt1 = datetime.strptime(
"Nov 03 06:51:06.074410 %d" % year, "%b %d %H:%M:%S.%f %Y"
@@ -208,12 +200,12 @@ class TestDumpEvents(CiTestCase):
"timestamp": 1472594005.972,
},
]
- self.assertEqual(expected_events, events)
+ assert expected_events == events
@mock.patch("cloudinit.analyze.dump.parse_timestamp_from_date")
- def test_dump_events_with_cisource(self, m_parse_from_date):
+ def test_dump_events_with_cisource(self, m_parse_from_date, tmpdir):
"""Cisource file is read and parsed into a tuple of events and data."""
- tmpfile = self.tmp_path("logfile")
+ tmpfile = str(tmpdir.join(("logfile")))
write_file(tmpfile, SAMPLE_LOGS)
m_parse_from_date.return_value = 1472594005.972
with open(tmpfile) as file:
@@ -240,8 +232,8 @@ class TestDumpEvents(CiTestCase):
"timestamp": 1472594005.972,
},
]
- self.assertEqual(expected_events, events)
- self.assertEqual(SAMPLE_LOGS.splitlines(), [d.strip() for d in data])
+ assert expected_events == events
+ assert SAMPLE_LOGS.splitlines() == [d.strip() for d in data]
m_parse_from_date.assert_has_calls(
[mock.call("2016-08-30 21:53:25.972325+00:00")]
)
diff --git a/tests/unittests/cmd/devel/test_hotplug_hook.py b/tests/unittests/cmd/devel/test_hotplug_hook.py
index d2ef82b1..b1372925 100644
--- a/tests/unittests/cmd/devel/test_hotplug_hook.py
+++ b/tests/unittests/cmd/devel/test_hotplug_hook.py
@@ -24,6 +24,7 @@ def mocks():
m_distro.network_activator = mock.PropertyMock(return_value=m_activator)
m_datasource = mock.MagicMock(spec=DataSource)
m_datasource.distro = m_distro
+ m_datasource.skip_hotplug_detect = False
m_init.datasource = m_datasource
m_init.fetch.return_value = m_datasource
@@ -80,8 +81,8 @@ class TestUnsupportedActions:
handle_hotplug(
hotplug_init=mocks.m_init,
devpath="/dev/fake",
- udevaction="not_real",
subsystem="net",
+ udevaction="not_real",
)
@@ -122,6 +123,21 @@ class TestHotplug:
mocks.m_activator.bring_up_interface.assert_not_called()
init._write_to_cache.assert_called_once_with()
+ @mock.patch(
+ "cloudinit.cmd.devel.hotplug_hook.NetHandler.detect_hotplugged_device"
+ )
+ @pytest.mark.parametrize("skip", [True, False])
+ def test_skip_detected(self, m_detect, skip, mocks):
+ mocks.m_init.datasource.skip_hotplug_detect = skip
+ expected_call_count = 0 if skip else 1
+ handle_hotplug(
+ hotplug_init=mocks.m_init,
+ devpath="/dev/fake",
+ udevaction="add",
+ subsystem="net",
+ )
+ assert m_detect.call_count == expected_call_count
+
def test_update_event_disabled(self, mocks, caplog):
init = mocks.m_init
with mock.patch(
diff --git a/tests/unittests/cmd/devel/test_logs.py b/tests/unittests/cmd/devel/test_logs.py
index c916c19f..4e3f30d4 100644
--- a/tests/unittests/cmd/devel/test_logs.py
+++ b/tests/unittests/cmd/devel/test_logs.py
@@ -1,17 +1,21 @@
# This file is part of cloud-init. See LICENSE file for license information.
+import glob
import os
import re
from datetime import datetime
from io import StringIO
+import pytest
+
from cloudinit.cmd.devel import logs
-from cloudinit.sources import INSTANCE_JSON_SENSITIVE_FILE
+from cloudinit.cmd.devel.logs import ApportFile
from cloudinit.subp import subp
-from cloudinit.util import load_file, write_file
+from cloudinit.util import ensure_dir, load_file, write_file
from tests.unittests.helpers import mock
M_PATH = "cloudinit.cmd.devel.logs."
+INSTANCE_JSON_SENSITIVE_FILE = "instance-data-sensitive.json"
@mock.patch("cloudinit.cmd.devel.logs.os.getuid")
@@ -81,6 +85,8 @@ class TestCollectLogs:
mocker.patch(M_PATH + "sys.stderr", fake_stderr)
mocker.patch(M_PATH + "CLOUDINIT_LOGS", [log1, log2])
mocker.patch(M_PATH + "CLOUDINIT_RUN_DIR", run_dir)
+ mocker.patch(M_PATH + "INSTALLER_APPORT_FILES", [])
+ mocker.patch(M_PATH + "INSTALLER_APPORT_SENSITIVE_FILES", [])
logs.collect_logs(output_tarfile, include_userdata=False)
# unpack the tarfile and check file contents
subp(["tar", "zxvf", output_tarfile, "-C", str(tmpdir)])
@@ -168,6 +174,8 @@ class TestCollectLogs:
mocker.patch(M_PATH + "sys.stderr", fake_stderr)
mocker.patch(M_PATH + "CLOUDINIT_LOGS", [log1, log2])
mocker.patch(M_PATH + "CLOUDINIT_RUN_DIR", run_dir)
+ mocker.patch(M_PATH + "INSTALLER_APPORT_FILES", [])
+ mocker.patch(M_PATH + "INSTALLER_APPORT_SENSITIVE_FILES", [])
mocker.patch(M_PATH + "_get_user_data_file", return_value=userdata)
logs.collect_logs(output_tarfile, include_userdata=True)
# unpack the tarfile and check file contents
@@ -187,6 +195,95 @@ class TestCollectLogs:
fake_stderr.write.assert_any_call("Wrote %s\n" % output_tarfile)
+class TestCollectInstallerLogs:
+ @pytest.mark.parametrize(
+ "include_userdata, apport_files, apport_sensitive_files",
+ (
+ pytest.param(True, [], [], id="no_files_include_userdata"),
+ pytest.param(False, [], [], id="no_files_exclude_userdata"),
+ pytest.param(
+ True,
+ (ApportFile("log1", "Label1"), ApportFile("log2", "Label2")),
+ (
+ ApportFile("private1", "LabelPrivate1"),
+ ApportFile("private2", "PrivateLabel2"),
+ ),
+ id="files_and_dirs_include_userdata",
+ ),
+ pytest.param(
+ False,
+ (ApportFile("log1", "Label1"), ApportFile("log2", "Label2")),
+ (
+ ApportFile("private1", "LabelPrivate1"),
+ ApportFile("private2", "PrivateLabel2"),
+ ),
+ id="files_and_dirs_exclude_userdata",
+ ),
+ ),
+ )
+ def test_include_installer_logs_when_present(
+ self,
+ include_userdata,
+ apport_files,
+ apport_sensitive_files,
+ tmpdir,
+ mocker,
+ ):
+ src_dir = tmpdir.join("src")
+ ensure_dir(src_dir.strpath)
+ # collect-logs nests full directory path to file in the tarfile
+ destination_dir = tmpdir.join(src_dir)
+
+ # Create tmppath-based userdata_files, installer_logs, installer_dirs
+ expected_files = []
+ # Create last file in list to assert ignoring absent files
+ apport_files = [
+ logs.ApportFile(src_dir.join(apport.path).strpath, apport.label)
+ for apport in apport_files
+ ]
+ if apport_files:
+ write_file(apport_files[-1].path, apport_files[-1].label)
+ expected_files += [
+ destination_dir.join(
+ os.path.basename(apport_files[-1].path)
+ ).strpath
+ ]
+ apport_sensitive_files = [
+ logs.ApportFile(src_dir.join(apport.path).strpath, apport.label)
+ for apport in apport_sensitive_files
+ ]
+ if apport_sensitive_files:
+ write_file(
+ apport_sensitive_files[-1].path,
+ apport_sensitive_files[-1].label,
+ )
+ if include_userdata:
+ expected_files += [
+ destination_dir.join(
+ os.path.basename(apport_sensitive_files[-1].path)
+ ).strpath
+ ]
+ mocker.patch(M_PATH + "INSTALLER_APPORT_FILES", apport_files)
+ mocker.patch(
+ M_PATH + "INSTALLER_APPORT_SENSITIVE_FILES", apport_sensitive_files
+ )
+ logs.collect_installer_logs(
+ log_dir=tmpdir.strpath,
+ include_userdata=include_userdata,
+ verbosity=0,
+ )
+ expect_userdata = bool(include_userdata and apport_sensitive_files)
+ # when subiquity artifacts exist, and userdata set true, expect logs
+ expect_subiquity_logs = any([apport_files, expect_userdata])
+ if expect_subiquity_logs:
+ assert destination_dir.exists(), "Missing subiquity artifact dir"
+ assert sorted(expected_files) == sorted(
+ glob.glob(f"{destination_dir.strpath}/*")
+ )
+ else:
+ assert not destination_dir.exists(), "Unexpected subiquity dir"
+
+
class TestParser:
def test_parser_help_has_userdata_file(self, mocker, tmpdir):
userdata = str(tmpdir.join("user-data.txt"))
diff --git a/tests/unittests/cmd/devel/test_render.py b/tests/unittests/cmd/devel/test_render.py
index 0ef4f364..d179f474 100644
--- a/tests/unittests/cmd/devel/test_render.py
+++ b/tests/unittests/cmd/devel/test_render.py
@@ -1,11 +1,11 @@
# This file is part of cloud-init. See LICENSE file for license information.
-from collections import namedtuple
from io import StringIO
+import pytest
+
from cloudinit.cmd.devel import render
from cloudinit.helpers import Paths
-from cloudinit.sources import INSTANCE_JSON_FILE, INSTANCE_JSON_SENSITIVE_FILE
from cloudinit.util import ensure_dir, write_file
from tests.unittests.helpers import mock, skipUnlessJinja
@@ -13,105 +13,88 @@ M_PATH = "cloudinit.cmd.devel.render."
class TestRender:
+ @pytest.fixture(autouse=True)
+ def mocks(self, mocker):
+ mocker.patch("sys.stderr", new_callable=StringIO)
- Args = namedtuple("Args", "user_data instance_data debug")
-
- def test_handle_args_error_on_missing_user_data(self, caplog, tmpdir):
+ def test_error_on_missing_user_data(self, caplog, tmpdir):
"""When user_data file path does not exist, log an error."""
absent_file = tmpdir.join("user-data")
instance_data = tmpdir.join("instance-data")
write_file(instance_data, "{}")
- args = self.Args(
- user_data=absent_file, instance_data=instance_data, debug=False
- )
- with mock.patch("sys.stderr", new_callable=StringIO):
- assert render.handle_args("anyname", args) == 1
- assert "Missing user-data file: %s" % absent_file in caplog.text
+ assert render.render_template(absent_file, instance_data, False) == 1
+ assert f"Missing user-data file: {absent_file}" in caplog.text
- def test_handle_args_error_on_missing_instance_data(self, caplog, tmpdir):
+ def test_error_on_missing_instance_data(self, caplog, tmpdir):
"""When instance_data file path does not exist, log an error."""
user_data = tmpdir.join("user-data")
absent_file = tmpdir.join("instance-data")
- args = self.Args(
- user_data=user_data, instance_data=absent_file, debug=False
- )
- with mock.patch("sys.stderr", new_callable=StringIO):
- assert render.handle_args("anyname", args) == 1
- assert (
- "Missing instance-data.json file: %s" % absent_file in caplog.text
- )
+ assert render.render_template(user_data, absent_file, False) == 1
+ assert f"Missing instance-data.json file: {absent_file}" in caplog.text
- @mock.patch(M_PATH + "read_cfg_paths")
- def test_handle_args_defaults_instance_data(self, m_paths, caplog, tmpdir):
+ @mock.patch(f"{M_PATH}read_cfg_paths")
+ def test_default_instance_data(self, m_paths, caplog, tmpdir):
"""When no instance_data argument, default to configured run_dir."""
user_data = tmpdir.join("user-data")
run_dir = tmpdir.join("run_dir")
ensure_dir(run_dir)
- m_paths.return_value = Paths({"run_dir": run_dir})
- args = self.Args(user_data=user_data, instance_data=None, debug=False)
- with mock.patch("sys.stderr", new_callable=StringIO):
- assert render.handle_args("anyname", args) == 1
- json_file = run_dir.join(INSTANCE_JSON_FILE)
- msg = "Missing instance-data.json file: %s" % json_file
+ paths = Paths({"run_dir": run_dir})
+ m_paths.return_value = paths
+ assert render.render_template(user_data, None, False) == 1
+ json_file = paths.get_runpath("instance_data")
+ msg = f"Missing instance-data.json file: {json_file}"
assert msg in caplog.text
- @mock.patch(M_PATH + "read_cfg_paths")
- def test_handle_args_root_fallback_from_sensitive_instance_data(
+ @mock.patch(f"{M_PATH}read_cfg_paths")
+ def test_root_fallback_from_sensitive_instance_data(
self, m_paths, caplog, tmpdir
):
"""When root user defaults to sensitive.json."""
user_data = tmpdir.join("user-data")
run_dir = tmpdir.join("run_dir")
ensure_dir(run_dir)
- m_paths.return_value = Paths({"run_dir": run_dir})
- args = self.Args(user_data=user_data, instance_data=None, debug=False)
- with mock.patch("sys.stderr", new_callable=StringIO):
- with mock.patch("os.getuid") as m_getuid:
- m_getuid.return_value = 0
- assert render.handle_args("anyname", args) == 1
- json_file = run_dir.join(INSTANCE_JSON_FILE)
- json_sensitive = run_dir.join(INSTANCE_JSON_SENSITIVE_FILE)
+ paths = Paths({"run_dir": run_dir})
+ m_paths.return_value = paths
+ with mock.patch("os.getuid") as m_getuid:
+ m_getuid.return_value = 0
+ assert render.render_template(user_data, None, False) == 1
+ json_file = paths.get_runpath("instance_data")
+ json_sensitive = paths.get_runpath("instance_data_sensitive")
assert (
- "Missing root-readable %s. Using redacted %s"
- % (json_sensitive, json_file)
- in caplog.text
+ f"Missing root-readable {json_sensitive}. "
+ f"Using redacted {json_file}" in caplog.text
)
- assert "Missing instance-data.json file: %s" % json_file in caplog.text
- @mock.patch(M_PATH + "read_cfg_paths")
- def test_handle_args_root_uses_sensitive_instance_data(
- self, m_paths, tmpdir
- ):
+ assert f"Missing instance-data.json file: {json_file}" in caplog.text
+
+ @mock.patch(f"{M_PATH}read_cfg_paths")
+ def test_root_uses_sensitive_instance_data(self, m_paths, tmpdir):
"""When root user, and no instance-data arg, use sensitive.json."""
user_data = tmpdir.join("user-data")
write_file(user_data, "##template: jinja\nrendering: {{ my_var }}")
run_dir = tmpdir.join("run_dir")
+ json_sensitive = Paths({"run_dir": run_dir}).get_runpath(
+ "instance_data_sensitive"
+ )
+
ensure_dir(run_dir)
- json_sensitive = run_dir.join(INSTANCE_JSON_SENSITIVE_FILE)
write_file(json_sensitive, '{"my-var": "jinja worked"}')
m_paths.return_value = Paths({"run_dir": run_dir})
- args = self.Args(user_data=user_data, instance_data=None, debug=False)
with mock.patch("sys.stdout", new_callable=StringIO) as m_stdout:
with mock.patch("os.getuid") as m_getuid:
m_getuid.return_value = 0
- assert render.handle_args("anyname", args) == 0
+ assert render.render_template(user_data, None, False) == 0
assert "rendering: jinja worked" in m_stdout.getvalue()
@skipUnlessJinja()
- def test_handle_args_renders_instance_data_vars_in_template(
- self, caplog, tmpdir
- ):
+ def test_renders_instance_data_vars_in_template(self, caplog, tmpdir):
"""If user_data file is a jinja template render instance-data vars."""
user_data = tmpdir.join("user-data")
write_file(user_data, "##template: jinja\nrendering: {{ my_var }}")
instance_data = tmpdir.join("instance-data")
write_file(instance_data, '{"my-var": "jinja worked"}')
- args = self.Args(
- user_data=user_data, instance_data=instance_data, debug=True
- )
- with mock.patch("sys.stderr", new_callable=StringIO):
- with mock.patch("sys.stdout", new_callable=StringIO) as m_stdout:
- assert render.handle_args("anyname", args) == 0
+ with mock.patch("sys.stdout", new_callable=StringIO) as m_stdout:
+ assert render.render_template(user_data, instance_data, True) == 0
# Make sure the log is correctly captured. There is an issue
# with this fixture in pytest==4.6.9 (focal):
assert (
@@ -120,7 +103,7 @@ class TestRender:
assert "rendering: jinja worked" == m_stdout.getvalue()
@skipUnlessJinja()
- def test_handle_args_warns_and_gives_up_on_invalid_jinja_operation(
+ def test_render_warns_and_gives_up_on_invalid_jinja_operation(
self, caplog, tmpdir
):
"""If user_data file has invalid jinja operations log warnings."""
@@ -128,16 +111,40 @@ class TestRender:
write_file(user_data, "##template: jinja\nrendering: {{ my-var }}")
instance_data = tmpdir.join("instance-data")
write_file(instance_data, '{"my-var": "jinja worked"}')
- args = self.Args(
- user_data=user_data, instance_data=instance_data, debug=True
- )
- with mock.patch("sys.stderr", new_callable=StringIO):
- assert render.handle_args("anyname", args) == 1
+ assert render.render_template(user_data, instance_data, True) == 1
assert (
"Ignoring jinja template for %s: Undefined jinja"
' variable: "my-var". Jinja tried subtraction. Perhaps you meant'
' "my_var"?' % user_data
) in caplog.text
+ @skipUnlessJinja()
+ def test_jinja_load_error(self, caplog, tmpdir):
+ user_data = tmpdir.join("user-data")
+ write_file(user_data, "##template: jinja\nrendering: {{ my-var }}")
+ instance_data = tmpdir.join("instance-data")
+ write_file(instance_data, '{"my-var": "jinja failed"')
+ render.render_template(user_data, instance_data, False)
+ assert (
+ "Cannot render from instance data due to exception" in caplog.text
+ )
+
+ @skipUnlessJinja()
+ def test_not_jinja_error(self, caplog, tmpdir):
+ user_data = tmpdir.join("user-data")
+ write_file(user_data, "{{ my-var }}")
+ instance_data = tmpdir.join("instance-data")
+ write_file(instance_data, '{"my-var": "jinja worked"}')
+ render.render_template(user_data, instance_data, False)
+ assert (
+ "Cannot render from instance data due to exception" in caplog.text
+ )
-# vi: ts=4 expandtab
+ @skipUnlessJinja()
+ def test_no_user_data(self, caplog, tmpdir):
+ user_data = tmpdir.join("user-data")
+ write_file(user_data, "##template: jinja")
+ instance_data = tmpdir.join("instance-data")
+ write_file(instance_data, '{"my-var": "jinja worked"}')
+ render.render_template(user_data, instance_data, False)
+ assert "Unable to render user-data file" in caplog.text
diff --git a/tests/unittests/cmd/test_clean.py b/tests/unittests/cmd/test_clean.py
index b859b83b..232cc731 100644
--- a/tests/unittests/cmd/test_clean.py
+++ b/tests/unittests/cmd/test_clean.py
@@ -29,7 +29,7 @@ def clean_paths(tmpdir):
@pytest.fixture(scope="function")
def init_class(clean_paths):
- class FakeInit(object):
+ class FakeInit:
cfg = {
"def_log_file": clean_paths.log,
"output": {"all": f"|tee -a {clean_paths.output_log}"},
diff --git a/tests/unittests/cmd/test_cloud_id.py b/tests/unittests/cmd/test_cloud_id.py
index 37f1df2c..80600555 100644
--- a/tests/unittests/cmd/test_cloud_id.py
+++ b/tests/unittests/cmd/test_cloud_id.py
@@ -5,11 +5,52 @@
import pytest
from cloudinit import util
-from cloudinit.cmd import cloud_id
+from cloudinit.cmd import cloud_id, status
+from cloudinit.helpers import Paths
from tests.unittests.helpers import mock
M_PATH = "cloudinit.cmd.cloud_id."
+STATUS_DETAILS_DONE = status.StatusDetails(
+ status.UXAppStatus.DONE,
+ status.UXAppBootStatusCode.UNKNOWN,
+ "DataSourceNoCloud somedetail",
+ [],
+ "",
+ "nocloud",
+)
+STATUS_DETAILS_DISABLED = status.StatusDetails(
+ status.UXAppStatus.DISABLED,
+ status.UXAppBootStatusCode.DISABLED_BY_GENERATOR,
+ "DataSourceNoCloud somedetail",
+ [],
+ "",
+ "",
+)
+STATUS_DETAILS_NOT_RUN = status.StatusDetails(
+ status.UXAppStatus.NOT_RUN,
+ status.UXAppBootStatusCode.UNKNOWN,
+ "",
+ [],
+ "",
+ "",
+)
+STATUS_DETAILS_RUNNING = status.StatusDetails(
+ status.UXAppStatus.RUNNING,
+ status.UXAppBootStatusCode.UNKNOWN,
+ "",
+ [],
+ "",
+ "",
+)
+
+
+@pytest.fixture(autouse=True)
+def setup_mocks(mocker):
+ mocker.patch(
+ "cloudinit.cmd.cloud_id.read_cfg_paths", return_value=Paths({})
+ )
+
class TestCloudId:
def test_cloud_id_arg_parser_defaults(self):
@@ -43,7 +84,7 @@ class TestCloudId:
self, get_status_details, tmpdir, capsys
):
"""Exit error when the provided instance-data.json does not exist."""
- get_status_details.return_value = cloud_id.UXAppStatus.DONE, "n/a", ""
+ get_status_details.return_value = STATUS_DETAILS_DONE
instance_data = tmpdir.join("instance-data.json")
cmd = ["cloud-id", "--instance-data", instance_data.strpath]
with mock.patch("sys.argv", cmd):
@@ -58,7 +99,7 @@ class TestCloudId:
self, get_status_details, tmpdir, capsys
):
"""Exit error when the provided instance-data.json is not json."""
- get_status_details.return_value = cloud_id.UXAppStatus.DONE, "n/a", ""
+ get_status_details.return_value = STATUS_DETAILS_DONE
instance_data = tmpdir.join("instance-data.json")
cmd = ["cloud-id", "--instance-data", instance_data.strpath]
instance_data.write("{")
@@ -78,7 +119,7 @@ class TestCloudId:
):
"""Report canonical cloud-id from cloud_name in instance-data."""
instance_data = tmpdir.join("instance-data.json")
- get_status_details.return_value = cloud_id.UXAppStatus.DONE, "n/a", ""
+ get_status_details.return_value = STATUS_DETAILS_DONE
instance_data.write(
'{"v1": {"cloud_name": "mycloud", "region": "somereg"}}',
)
@@ -95,7 +136,7 @@ class TestCloudId:
self, get_status_details, tmpdir, capsys
):
"""Report long cloud-id format from cloud_name and region."""
- get_status_details.return_value = cloud_id.UXAppStatus.DONE, "n/a", ""
+ get_status_details.return_value = STATUS_DETAILS_DONE
instance_data = tmpdir.join("instance-data.json")
instance_data.write(
'{"v1": {"cloud_name": "mycloud", "region": "somereg"}}',
@@ -113,7 +154,7 @@ class TestCloudId:
self, get_status_details, tmpdir, capsys
):
"""Report discovered canonical cloud_id when region lookup matches."""
- get_status_details.return_value = cloud_id.UXAppStatus.DONE, "n/a", ""
+ get_status_details.return_value = STATUS_DETAILS_DONE
instance_data = tmpdir.join("instance-data.json")
instance_data.write(
'{"v1": {"cloud_name": "aws", "region": "cn-north-1",'
@@ -132,7 +173,7 @@ class TestCloudId:
self, get_status_details, tmpdir, capsys
):
"""Report v1 instance-data content with cloud_id when --json set."""
- get_status_details.return_value = cloud_id.UXAppStatus.DONE, "n/a", ""
+ get_status_details.return_value = STATUS_DETAILS_DONE
instance_data = tmpdir.join("instance-data.json")
instance_data.write(
'{"v1": {"cloud_name": "unknown", "region": "dfw",'
@@ -151,26 +192,27 @@ class TestCloudId:
with mock.patch("sys.argv", cmd):
with pytest.raises(SystemExit) as context_manager:
cloud_id.main()
- out, _err = capsys.readouterr()
+ out, err = capsys.readouterr()
+ assert "DEPRECATED: Use: cloud-init query v1\n" == err
assert 0 == context_manager.value.code
assert expected + "\n" == out
@pytest.mark.parametrize(
- "status, exit_code",
+ "details, exit_code",
(
- (cloud_id.UXAppStatus.DISABLED, 2),
- (cloud_id.UXAppStatus.NOT_RUN, 3),
- (cloud_id.UXAppStatus.RUNNING, 0),
+ (STATUS_DETAILS_DISABLED, 2),
+ (STATUS_DETAILS_NOT_RUN, 3),
+ (STATUS_DETAILS_RUNNING, 0),
),
)
@mock.patch(M_PATH + "get_status_details")
def test_cloud_id_unique_exit_codes_for_status(
- self, get_status_details, status, exit_code, tmpdir, capsys
+ self, get_status_details, details, exit_code, tmpdir, capsys
):
"""cloud-id returns unique exit codes for status."""
- get_status_details.return_value = status, "n/a", ""
+ get_status_details.return_value = details
instance_data = tmpdir.join("instance-data.json")
- if status == cloud_id.UXAppStatus.RUNNING:
+ if details.status == cloud_id.UXAppStatus.RUNNING:
instance_data.write("{}")
cmd = ["cloud-id", "--instance-data", instance_data.strpath, "--json"]
with mock.patch("sys.argv", cmd):
diff --git a/tests/unittests/cmd/test_query.py b/tests/unittests/cmd/test_query.py
index dd517a4b..720a0768 100644
--- a/tests/unittests/cmd/test_query.py
+++ b/tests/unittests/cmd/test_query.py
@@ -6,17 +6,14 @@ import json
import os
from collections import namedtuple
from io import BytesIO
+from pathlib import Path
from textwrap import dedent
import pytest
from cloudinit.cmd import query
from cloudinit.helpers import Paths
-from cloudinit.sources import (
- INSTANCE_JSON_FILE,
- INSTANCE_JSON_SENSITIVE_FILE,
- REDACT_SENSITIVE_VALUE,
-)
+from cloudinit.sources import REDACT_SENSITIVE_VALUE
from cloudinit.util import b64e, write_file
from tests.unittests.helpers import mock
@@ -30,6 +27,11 @@ def _gzip_data(data):
return iobuf.getvalue()
+@pytest.fixture(autouse=True)
+def setup_mocks(mocker):
+ mocker.patch("cloudinit.cmd.query.read_cfg_paths", return_value=Paths({}))
+
+
@mock.patch(M_PATH + "addLogHandlerCLI", lambda *args: "")
class TestQuery:
@@ -219,12 +221,12 @@ class TestQuery:
vendor_data=None,
varname=None,
)
- paths, run_dir, _, _ = self._setup_paths(tmpdir)
+ paths, _, _, _ = self._setup_paths(tmpdir)
with mock.patch(M_PATH + "read_cfg_paths") as m_paths:
m_paths.return_value = paths
assert 1 == query.handle_args("anyname", args)
- json_file = run_dir.join(INSTANCE_JSON_FILE)
- msg = "Missing instance-data file: %s" % json_file.strpath
+ json_file = paths.get_runpath("instance_data")
+ msg = f"Missing instance-data file: {json_file}"
assert msg in caplog.text
def test_handle_args_root_fallsback_to_instance_data(self, caplog, tmpdir):
@@ -239,17 +241,17 @@ class TestQuery:
vendor_data=None,
varname=None,
)
- paths, run_dir, _, _ = self._setup_paths(tmpdir)
+ paths, _, _, _ = self._setup_paths(tmpdir)
with mock.patch(M_PATH + "read_cfg_paths") as m_paths:
m_paths.return_value = paths
with mock.patch("os.getuid") as m_getuid:
m_getuid.return_value = 0
assert 1 == query.handle_args("anyname", args)
- json_file = run_dir.join(INSTANCE_JSON_FILE)
- sensitive_file = run_dir.join(INSTANCE_JSON_SENSITIVE_FILE)
- msg = "Missing root-readable %s. Using redacted %s instead." % (
- sensitive_file.strpath,
- json_file.strpath,
+ json_file = paths.get_runpath("instance_data")
+ sensitive_file = paths.get_runpath("instance_data_sensitive")
+ msg = (
+ f"Missing root-readable {sensitive_file}. "
+ f"Using redacted {json_file} instead."
)
assert msg in caplog.text
@@ -266,11 +268,11 @@ class TestQuery:
self, ud_src, ud_expected, vd_src, vd_expected, capsys, tmpdir
):
"""Support reading multiple user-data file content types"""
- paths, run_dir, user_data, vendor_data = self._setup_paths(
+ paths, _, user_data, vendor_data = self._setup_paths(
tmpdir, ud_val=ud_src, vd_val=vd_src
)
- sensitive_file = run_dir.join(INSTANCE_JSON_SENSITIVE_FILE)
- sensitive_file.write('{"my-var": "it worked"}')
+ sensitive_file = Path(paths.get_runpath("instance_data_sensitive"))
+ sensitive_file.write_text('{"my-var": "it worked"}')
args = self.Args(
debug=False,
dump_all=True,
@@ -300,9 +302,9 @@ class TestQuery:
self, capsys, tmpdir
):
"""When no instance_data argument, root uses sensitive json."""
- paths, run_dir, _, _ = self._setup_paths(tmpdir)
- sensitive_file = run_dir.join(INSTANCE_JSON_SENSITIVE_FILE)
- sensitive_file.write('{"my-var": "it worked"}')
+ paths, _, _, _ = self._setup_paths(tmpdir)
+ sensitive_file = Path(paths.get_runpath("instance_data_sensitive"))
+ sensitive_file.write_text('{"my-var": "it worked"}')
ud_path = os.path.join(paths.instance_link, "user-data.txt")
write_file(ud_path, "instance_link_ud")
@@ -335,11 +337,11 @@ class TestQuery:
self, capsys, tmpdir
):
"""When no instance_data argument, root uses sensitive json."""
- paths, run_dir, user_data, vendor_data = self._setup_paths(
+ paths, _, user_data, vendor_data = self._setup_paths(
tmpdir, ud_val="ud", vd_val="vd"
)
- sensitive_file = run_dir.join(INSTANCE_JSON_SENSITIVE_FILE)
- sensitive_file.write('{"my-var": "it worked"}')
+ sensitive_file = Path(paths.get_runpath("instance_data_sensitive"))
+ sensitive_file.write_text('{"my-var": "it worked"}')
args = self.Args(
debug=False,
dump_all=True,
diff --git a/tests/unittests/cmd/test_status.py b/tests/unittests/cmd/test_status.py
index e9169a55..6ae3b398 100644
--- a/tests/unittests/cmd/test_status.py
+++ b/tests/unittests/cmd/test_status.py
@@ -1,8 +1,8 @@
# This file is part of cloud-init. See LICENSE file for license information.
+import json
import os
from collections import namedtuple
-from io import StringIO
from textwrap import dedent
from typing import Callable, Dict, Optional, Union
from unittest import mock
@@ -18,7 +18,7 @@ M_NAME = "cloudinit.cmd.status"
M_PATH = f"{M_NAME}."
MyPaths = namedtuple("MyPaths", "run_dir")
-MyArgs = namedtuple("MyArgs", "long wait")
+MyArgs = namedtuple("MyArgs", "long wait format")
Config = namedtuple(
"Config", "new_root, status_file, disable_file, result_file, paths"
)
@@ -36,13 +36,15 @@ def config(tmpdir):
class TestStatus:
+ maxDiff = None
+
@pytest.mark.parametrize(
[
"ensured_file",
"uses_systemd",
"get_cmdline",
- "expected_is_disabled",
- "is_disabled_msg",
+ "expected_bootstatus",
+ "failure_msg",
"expected_reason",
],
[
@@ -51,7 +53,7 @@ class TestStatus:
lambda config: config.disable_file,
False,
"root=/dev/my-root not-important",
- False,
+ status.UXAppBootStatusCode.ENABLED_BY_SYSVINIT,
"expected enabled cloud-init on sysvinit",
"Cloud-init enabled on sysvinit",
id="false_on_sysvinit",
@@ -61,7 +63,7 @@ class TestStatus:
lambda config: config.disable_file,
True,
"root=/dev/my-root not-important",
- True,
+ status.UXAppBootStatusCode.DISABLED_BY_MARKER_FILE,
"expected disabled cloud-init",
lambda config: f"Cloud-init disabled by {config.disable_file}",
id="true_on_disable_file",
@@ -71,7 +73,7 @@ class TestStatus:
lambda config: config.disable_file,
True,
"something cloud-init=enabled else",
- False,
+ status.UXAppBootStatusCode.ENABLED_BY_KERNEL_CMDLINE,
"expected enabled cloud-init",
"Cloud-init enabled by kernel command line cloud-init=enabled",
id="false_on_kernel_cmdline_enable",
@@ -81,7 +83,7 @@ class TestStatus:
None,
True,
"something cloud-init=disabled else",
- True,
+ status.UXAppBootStatusCode.DISABLED_BY_KERNEL_CMDLINE,
"expected disabled cloud-init",
"Cloud-init disabled by kernel parameter cloud-init=disabled",
id="true_on_kernel_cmdline",
@@ -91,7 +93,7 @@ class TestStatus:
lambda config: os.path.join(config.paths.run_dir, "disabled"),
True,
"something",
- True,
+ status.UXAppBootStatusCode.DISABLED_BY_GENERATOR,
"expected disabled cloud-init",
"Cloud-init disabled by cloud-init-generator",
id="true_when_generator_disables",
@@ -101,63 +103,65 @@ class TestStatus:
lambda config: os.path.join(config.paths.run_dir, "enabled"),
True,
"something ignored",
- False,
+ status.UXAppBootStatusCode.ENABLED_BY_GENERATOR,
"expected enabled cloud-init",
"Cloud-init enabled by systemd cloud-init-generator",
id="false_when_enabled_in_systemd",
),
],
)
- def test__is_cloudinit_disabled(
+ def test_get_bootstatus(
self,
ensured_file: Optional[Callable],
uses_systemd: bool,
get_cmdline: str,
- expected_is_disabled: bool,
- is_disabled_msg: str,
+ expected_bootstatus: bool,
+ failure_msg: str,
expected_reason: Union[str, Callable],
config: Config,
):
if ensured_file is not None:
ensure_file(ensured_file(config))
- (is_disabled, reason) = wrap_and_call(
+ (code, reason) = wrap_and_call(
M_NAME,
{
"uses_systemd": uses_systemd,
"get_cmdline": get_cmdline,
},
- status._is_cloudinit_disabled,
+ status.get_bootstatus,
config.disable_file,
config.paths,
)
- assert is_disabled == expected_is_disabled, is_disabled_msg
+ assert code == expected_bootstatus, failure_msg
if isinstance(expected_reason, str):
assert reason == expected_reason
else:
assert reason == expected_reason(config)
@mock.patch(M_PATH + "read_cfg_paths")
- def test_status_returns_not_run(self, m_read_cfg_paths, config: Config):
+ def test_status_returns_not_run(
+ self, m_read_cfg_paths, config: Config, capsys
+ ):
"""When status.json does not exist yet, return 'not run'."""
m_read_cfg_paths.return_value = config.paths
assert not os.path.exists(
config.status_file
), "Unexpected status.json found"
- cmdargs = MyArgs(long=False, wait=False)
- with mock.patch("sys.stdout", new_callable=StringIO) as m_stdout:
- retcode = wrap_and_call(
- M_NAME,
- {"_is_cloudinit_disabled": (False, "")},
- status.handle_status_args,
- "ignored",
- cmdargs,
- )
+ cmdargs = MyArgs(long=False, wait=False, format="tabular")
+ retcode = wrap_and_call(
+ M_NAME,
+ {"get_bootstatus": (status.UXAppBootStatusCode.UNKNOWN, "")},
+ status.handle_status_args,
+ "ignored",
+ cmdargs,
+ )
assert retcode == 0
- assert m_stdout.getvalue() == "status: not run\n"
+ out, _err = capsys.readouterr()
+ assert out == "status: not run\n"
@mock.patch(M_PATH + "read_cfg_paths")
def test_status_returns_disabled_long_on_presence_of_disable_file(
- self, m_read_cfg_paths, config: Config
+ self, m_read_cfg_paths, config: Config, capsys
):
"""When cloudinit is disabled, return disabled reason."""
m_read_cfg_paths.return_value = config.paths
@@ -168,21 +172,20 @@ class TestStatus:
status_file = os.path.join(config.paths.run_dir, "status.json")
return bool(not filepath == status_file)
- cmdargs = MyArgs(long=True, wait=False)
- with mock.patch("sys.stdout", new_callable=StringIO) as m_stdout:
- retcode = wrap_and_call(
- M_NAME,
- {
- "os.path.exists": {"side_effect": fakeexists},
- "_is_cloudinit_disabled": (
- True,
- "disabled for some reason",
- ),
- },
- status.handle_status_args,
- "ignored",
- cmdargs,
- )
+ cmdargs = MyArgs(long=True, wait=False, format="tabular")
+ retcode = wrap_and_call(
+ M_NAME,
+ {
+ "os.path.exists": {"side_effect": fakeexists},
+ "get_bootstatus": (
+ status.UXAppBootStatusCode.DISABLED_BY_KERNEL_CMDLINE,
+ "disabled for some reason",
+ ),
+ },
+ status.handle_status_args,
+ "ignored",
+ cmdargs,
+ )
assert retcode == 0
assert checked_files == [
os.path.join(config.paths.run_dir, "status.json")
@@ -190,15 +193,18 @@ class TestStatus:
expected = dedent(
"""\
status: disabled
+ boot_status_code: disabled-by-kernel-cmdline
detail:
disabled for some reason
"""
)
- assert m_stdout.getvalue() == expected
+ out, _err = capsys.readouterr()
+ assert out == expected
@pytest.mark.parametrize(
[
"ensured_file",
+ "bootstatus",
"status_content",
"assert_file",
"cmdargs",
@@ -209,9 +215,10 @@ class TestStatus:
# Report running when status.json exists but result.json does not.
pytest.param(
None,
+ status.UXAppBootStatusCode.UNKNOWN,
{},
lambda config: config.result_file,
- MyArgs(long=False, wait=False),
+ MyArgs(long=False, wait=False, format="tabular"),
0,
"status: running\n",
id="running_on_no_results_json",
@@ -219,9 +226,10 @@ class TestStatus:
# Report running when status exists with an unfinished stage.
pytest.param(
lambda config: config.result_file,
+ status.UXAppBootStatusCode.ENABLED_BY_GENERATOR,
{"v1": {"init": {"start": 1, "finished": None}}},
None,
- MyArgs(long=False, wait=False),
+ MyArgs(long=False, wait=False, format="tabular"),
0,
"status: running\n",
id="running",
@@ -229,6 +237,7 @@ class TestStatus:
# Report done results.json exists no stages are unfinished.
pytest.param(
lambda config: config.result_file,
+ status.UXAppBootStatusCode.ENABLED_BY_GENERATOR,
{
"v1": {
"stage": None, # No current stage running
@@ -247,7 +256,7 @@ class TestStatus:
}
},
None,
- MyArgs(long=False, wait=False),
+ MyArgs(long=False, wait=False, format="tabular"),
0,
"status: done\n",
id="done",
@@ -255,6 +264,7 @@ class TestStatus:
# Long format of done status includes datasource info.
pytest.param(
lambda config: config.result_file,
+ status.UXAppBootStatusCode.ENABLED_BY_GENERATOR,
{
"v1": {
"stage": None,
@@ -268,12 +278,13 @@ class TestStatus:
}
},
None,
- MyArgs(long=True, wait=False),
+ MyArgs(long=True, wait=False, format="tabular"),
0,
dedent(
"""\
status: done
- time: Thu, 01 Jan 1970 00:02:05 +0000
+ boot_status_code: enabled-by-generator
+ last_update: Thu, 01 Jan 1970 00:02:05 +0000
detail:
DataSourceNoCloud [seed=/var/.../seed/nocloud-net]\
[dsmode=net]
@@ -284,6 +295,7 @@ class TestStatus:
# Reports error when any stage has errors.
pytest.param(
None,
+ status.UXAppBootStatusCode.ENABLED_BY_GENERATOR,
{
"v1": {
"stage": None,
@@ -297,7 +309,7 @@ class TestStatus:
}
},
None,
- MyArgs(long=False, wait=False),
+ MyArgs(long=False, wait=False, format="tabular"),
1,
"status: error\n",
id="on_errors",
@@ -305,6 +317,7 @@ class TestStatus:
# Long format of error status includes all error messages.
pytest.param(
None,
+ status.UXAppBootStatusCode.ENABLED_BY_KERNEL_CMDLINE,
{
"v1": {
"stage": None,
@@ -326,12 +339,13 @@ class TestStatus:
}
},
None,
- MyArgs(long=True, wait=False),
+ MyArgs(long=True, wait=False, format="tabular"),
1,
dedent(
"""\
status: error
- time: Thu, 01 Jan 1970 00:02:05 +0000
+ boot_status_code: enabled-by-kernel-cmdline
+ last_update: Thu, 01 Jan 1970 00:02:05 +0000
detail:
error1
error2
@@ -343,6 +357,7 @@ class TestStatus:
# Long format reports the stage in which we are running.
pytest.param(
None,
+ status.UXAppBootStatusCode.ENABLED_BY_KERNEL_CMDLINE,
{
"v1": {
"stage": "init",
@@ -351,18 +366,90 @@ class TestStatus:
}
},
None,
- MyArgs(long=True, wait=False),
+ MyArgs(long=True, wait=False, format="tabular"),
0,
dedent(
"""\
status: running
- time: Thu, 01 Jan 1970 00:02:04 +0000
+ boot_status_code: enabled-by-kernel-cmdline
+ last_update: Thu, 01 Jan 1970 00:02:04 +0000
detail:
Running in stage: init
"""
),
id="running_long_format",
),
+ pytest.param(
+ None,
+ status.UXAppBootStatusCode.ENABLED_BY_KERNEL_CMDLINE,
+ {
+ "v1": {
+ "stage": "init",
+ "init": {"start": 124.456, "finished": None},
+ "init-local": {"start": 123.45, "finished": 123.46},
+ }
+ },
+ None,
+ MyArgs(long=False, wait=False, format="yaml"),
+ 0,
+ dedent(
+ """\
+ ---
+ _schema_version: '1'
+ boot_status_code: enabled-by-kernel-cmdline
+ datasource: ''
+ detail: 'Running in stage: init'
+ errors: []
+ last_update: Thu, 01 Jan 1970 00:02:04 +0000
+ schemas:
+ '1':
+ boot_status_code: enabled-by-kernel-cmdline
+ datasource: ''
+ detail: 'Running in stage: init'
+ errors: []
+ last_update: Thu, 01 Jan 1970 00:02:04 +0000
+ status: running
+ status: running
+ ...
+
+ """
+ ),
+ id="running_yaml_format",
+ ),
+ pytest.param(
+ None,
+ status.UXAppBootStatusCode.ENABLED_BY_KERNEL_CMDLINE,
+ {
+ "v1": {
+ "stage": "init",
+ "init": {"start": 124.456, "finished": None},
+ "init-local": {"start": 123.45, "finished": 123.46},
+ }
+ },
+ None,
+ MyArgs(long=False, wait=False, format="json"),
+ 0,
+ {
+ "_schema_version": "1",
+ "boot_status_code": "enabled-by-kernel-cmdline",
+ "datasource": "",
+ "detail": "Running in stage: init",
+ "errors": [],
+ "last_update": "Thu, 01 Jan 1970 00:02:04 +0000",
+ "schemas": {
+ "1": {
+ "boot_status_code": "enabled-by-kernel-cmdline",
+ "datasource": "",
+ "detail": "Running in stage: init",
+ "errors": [],
+ "last_update": "Thu, 01 Jan 1970 00:02:04 +0000",
+ "status": "running",
+ }
+ },
+ "status": "running",
+ },
+ id="running_json_format",
+ ),
],
)
@mock.patch(M_PATH + "read_cfg_paths")
@@ -370,12 +457,14 @@ class TestStatus:
self,
m_read_cfg_paths,
ensured_file: Optional[Callable],
+ bootstatus: status.UXAppBootStatusCode,
status_content: Dict,
assert_file,
cmdargs: MyArgs,
expected_retcode: int,
expected_status: str,
config: Config,
+ capsys,
):
m_read_cfg_paths.return_value = config.paths
if ensured_file:
@@ -388,20 +477,23 @@ class TestStatus:
assert not os.path.exists(
config.result_file
), f"Unexpected {config.result_file} found"
- with mock.patch("sys.stdout", new_callable=StringIO) as m_stdout:
- retcode = wrap_and_call(
- M_NAME,
- {"_is_cloudinit_disabled": (False, "")},
- status.handle_status_args,
- "ignored",
- cmdargs,
- )
+ retcode = wrap_and_call(
+ M_NAME,
+ {"get_bootstatus": (bootstatus, "")},
+ status.handle_status_args,
+ "ignored",
+ cmdargs,
+ )
assert retcode == expected_retcode
- assert m_stdout.getvalue() == expected_status
+ out, _err = capsys.readouterr()
+ if isinstance(expected_status, dict):
+ assert json.loads(out) == expected_status
+ else:
+ assert out == expected_status
@mock.patch(M_PATH + "read_cfg_paths")
def test_status_wait_blocks_until_done(
- self, m_read_cfg_paths, config: Config
+ self, m_read_cfg_paths, config: Config, capsys
):
"""Specifying wait will poll every 1/4 second until done state."""
m_read_cfg_paths.return_value = config.paths
@@ -433,25 +525,25 @@ class TestStatus:
result_file = config.result_file
ensure_file(result_file)
- cmdargs = MyArgs(long=False, wait=True)
- with mock.patch("sys.stdout", new_callable=StringIO) as m_stdout:
- retcode = wrap_and_call(
- M_NAME,
- {
- "sleep": {"side_effect": fake_sleep},
- "_is_cloudinit_disabled": (False, ""),
- },
- status.handle_status_args,
- "ignored",
- cmdargs,
- )
+ cmdargs = MyArgs(long=False, wait=True, format="tabular")
+ retcode = wrap_and_call(
+ M_NAME,
+ {
+ "sleep": {"side_effect": fake_sleep},
+ "get_bootstatus": (status.UXAppBootStatusCode.UNKNOWN, ""),
+ },
+ status.handle_status_args,
+ "ignored",
+ cmdargs,
+ )
assert retcode == 0
assert sleep_calls == 4
- assert m_stdout.getvalue() == "....\nstatus: done\n"
+ out, _err = capsys.readouterr()
+ assert out == "....\nstatus: done\n"
@mock.patch(M_PATH + "read_cfg_paths")
def test_status_wait_blocks_until_error(
- self, m_read_cfg_paths, config: Config
+ self, m_read_cfg_paths, config: Config, capsys
):
"""Specifying wait will poll every 1/4 second until error state."""
m_read_cfg_paths.return_value = config.paths
@@ -485,24 +577,24 @@ class TestStatus:
elif sleep_calls == 3:
write_json(config.status_file, error_json)
- cmdargs = MyArgs(long=False, wait=True)
- with mock.patch("sys.stdout", new_callable=StringIO) as m_stdout:
- retcode = wrap_and_call(
- M_NAME,
- {
- "sleep": {"side_effect": fake_sleep},
- "_is_cloudinit_disabled": (False, ""),
- },
- status.handle_status_args,
- "ignored",
- cmdargs,
- )
+ cmdargs = MyArgs(long=False, wait=True, format="tabular")
+ retcode = wrap_and_call(
+ M_NAME,
+ {
+ "sleep": {"side_effect": fake_sleep},
+ "get_bootstatus": (status.UXAppBootStatusCode.UNKNOWN, ""),
+ },
+ status.handle_status_args,
+ "ignored",
+ cmdargs,
+ )
assert retcode == 1
assert sleep_calls == 4
- assert m_stdout.getvalue() == "....\nstatus: error\n"
+ out, _err = capsys.readouterr()
+ assert out == "....\nstatus: error\n"
@mock.patch(M_PATH + "read_cfg_paths")
- def test_status_main(self, m_read_cfg_paths, config: Config):
+ def test_status_main(self, m_read_cfg_paths, config: Config, capsys):
"""status.main can be run as a standalone script."""
m_read_cfg_paths.return_value = config.paths
write_json(
@@ -510,17 +602,17 @@ class TestStatus:
{"v1": {"init": {"start": 1, "finished": None}}},
)
with pytest.raises(SystemExit) as e:
- with mock.patch("sys.stdout", new_callable=StringIO) as m_stdout:
- wrap_and_call(
- M_NAME,
- {
- "sys.argv": {"new": ["status"]},
- "_is_cloudinit_disabled": (False, ""),
- },
- status.main,
- )
+ wrap_and_call(
+ M_NAME,
+ {
+ "sys.argv": {"new": ["status"]},
+ "get_bootstatus": (status.UXAppBootStatusCode.UNKNOWN, ""),
+ },
+ status.main,
+ )
assert e.value.code == 0
- assert m_stdout.getvalue() == "status: running\n"
+ out, _err = capsys.readouterr()
+ assert out == "status: running\n"
# vi: ts=4 expandtab syntax=python
diff --git a/tests/unittests/config/test_apt_source_v1.py b/tests/unittests/config/test_apt_source_v1.py
index 4ce412ce..f107f964 100644
--- a/tests/unittests/config/test_apt_source_v1.py
+++ b/tests/unittests/config/test_apt_source_v1.py
@@ -35,7 +35,7 @@ S0ORP6HXET3+jC8BMG4tBWCTK/XEZw==
ADD_APT_REPO_MATCH = r"^[\w-]+:\w"
-class FakeDistro(object):
+class FakeDistro:
"""Fake Distro helper object"""
def update_package_sources(self):
diff --git a/tests/unittests/config/test_cc_ansible.py b/tests/unittests/config/test_cc_ansible.py
index 6f71add3..a0d6bcab 100644
--- a/tests/unittests/config/test_cc_ansible.py
+++ b/tests/unittests/config/test_cc_ansible.py
@@ -1,8 +1,9 @@
import re
from copy import deepcopy
+from os import environ
from textwrap import dedent
from unittest import mock
-from unittest.mock import call
+from unittest.mock import MagicMock
from pytest import mark, param, raises
@@ -16,6 +17,14 @@ from cloudinit.config.schema import (
from tests.unittests.helpers import skipUnlessJsonSchema
from tests.unittests.util import get_cloud
+try:
+ import pip as _pip # type: ignore # noqa: F401
+
+ HAS_PIP = True
+except ImportError:
+ HAS_PIP = False
+
+M_PATH = "cloudinit.config.cc_ansible."
distro_version = dedent(
"""ansible 2.10.8
config file = None
@@ -41,48 +50,103 @@ pip_version = dedent(
libyaml = True """
)
-CFG_FULL = {
+CFG_CTRL = {
+ "ansible": {
+ "install_method": "distro",
+ "package_name": "ansible-core",
+ "ansible_config": "/etc/ansible/ansible.cfg",
+ "galaxy": {
+ "actions": [["ansible-galaxy", "install", "debops.apt"]],
+ },
+ "setup_controller": {
+ "repositories": [
+ {
+ "path": "/home/ansible/public/",
+ "source": "git@github.com:holmanb/ansible-lxd-public.git",
+ },
+ {
+ "path": "/home/ansible/private/",
+ "source": "git@github.com:holmanb/ansible-lxd-private.git",
+ },
+ {
+ "path": "/home/ansible/vmboot",
+ "source": "git@github.com:holmanb/vmboot.git",
+ },
+ ],
+ "run_ansible": [
+ {
+ "playbook_dir": "/home/ansible/my-repo",
+ "playbook_name": "start-lxd.yml",
+ "timeout": 120,
+ "forks": 1,
+ "private_key": "/home/ansible/.ssh/id_rsa",
+ },
+ {
+ "playbook_name": "configure-lxd.yml",
+ "become_user": "ansible",
+ "timeout": 120,
+ "forks": 1,
+ "private_key": "/home/ansible/.ssh/id_rsa",
+ "become_password_file": "/path/less/traveled",
+ "connection-password-file": "/path/more/traveled",
+ "module_path": "/path/head/traveled",
+ "vault_password_file": "/path/tail/traveled",
+ "playbook_dir": "/path/to/nowhere",
+ "inventory": "/a/file/as/well",
+ },
+ ],
+ },
+ },
+}
+
+CFG_FULL_PULL = {
"ansible": {
- "install-method": "distro",
- "package-name": "ansible-core",
+ "install_method": "distro",
+ "package_name": "ansible-core",
+ "ansible_config": "/etc/ansible/ansible.cfg",
+ "galaxy": {
+ "actions": [["ansible-galaxy", "install", "debops.apt"]],
+ },
"pull": {
"url": "https://github/holmanb/vmboot",
- "playbook-name": "arch.yml",
- "accept-host-key": True,
+ "playbook_name": "arch.yml",
+ "accept_host_key": True,
"clean": True,
"full": True,
"diff": False,
- "ssh-common-args": "-y",
- "scp-extra-args": "-l",
- "sftp-extra-args": "-f",
+ "ssh_common_args": "-y",
+ "scp_extra_args": "-l",
+ "sftp_extra_args": "-f",
"checkout": "tree",
- "module-path": "~/.ansible/plugins/modules:"
+ "module_path": "~/.ansible/plugins/modules:"
"/usr/share/ansible/plugins/modules",
"timeout": "10",
- "vault-id": "me",
+ "vault_id": "me",
"connection": "smart",
- "vault-password-file": "/path/to/file",
- "module-name": "git",
+ "vault_password_file": "/path/to/file",
+ "module_name": "git",
"sleep": "1",
"tags": "cumulus",
- "skip-tags": "cisco",
- "private-key": "{nope}",
+ "skip_tags": "cisco",
+ "private_key": "{nope}",
},
}
}
+
CFG_MINIMAL = {
"ansible": {
- "install-method": "pip",
- "package-name": "ansible",
+ "install_method": "pip",
+ "package_name": "ansible",
+ "run_user": "ansible",
"pull": {
"url": "https://github/holmanb/vmboot",
- "playbook-name": "ubuntu.yml",
+ "playbook_name": "ubuntu.yml",
},
}
}
-class TestSetPasswordsSchema:
+class TestSchema:
@mark.parametrize(
("config", "error_msg"),
(
@@ -94,10 +158,10 @@ class TestSetPasswordsSchema:
param(
{
"ansible": {
- "install-method": "distro",
+ "install_method": "distro",
"pull": {
"url": "https://github/holmanb/vmboot",
- "playbook-name": "centos.yml",
+ "playbook_name": "centos.yml",
"dance": "bossa nova",
},
}
@@ -106,17 +170,22 @@ class TestSetPasswordsSchema:
id="additional-properties",
),
param(
- CFG_FULL,
+ CFG_FULL_PULL,
None,
- id="all-keys",
+ id="all-pull-keys",
+ ),
+ param(
+ CFG_CTRL,
+ None,
+ id="ctrl-keys",
),
param(
{
"ansible": {
- "install-method": "true",
+ "install_method": "true",
"pull": {
"url": "https://github/holmanb/vmboot",
- "playbook-name": "debian.yml",
+ "playbook_name": "debian.yml",
},
}
},
@@ -126,9 +195,9 @@ class TestSetPasswordsSchema:
param(
{
"ansible": {
- "install-method": "pip",
+ "install_method": "pip",
"pull": {
- "playbook-name": "fedora.yml",
+ "playbook_name": "fedora.yml",
},
}
},
@@ -138,13 +207,13 @@ class TestSetPasswordsSchema:
param(
{
"ansible": {
- "install-method": "pip",
+ "install_method": "pip",
"pull": {
"url": "gophers://encrypted-gophers/",
},
}
},
- "'playbook-name' is a required property",
+ "'playbook_name' is a required property",
id="require-url",
),
),
@@ -162,7 +231,7 @@ class TestAnsible:
def test_filter_args(self):
"""only diff should be removed"""
out = cc_ansible.filter_args(
- CFG_FULL.get("ansible", {}).get("pull", {})
+ CFG_FULL_PULL.get("ansible", {}).get("pull", {})
)
assert out == {
"url": "https://github/holmanb/vmboot",
@@ -190,15 +259,15 @@ class TestAnsible:
@mark.parametrize(
("cfg", "exception"),
(
- (CFG_FULL, None),
+ (CFG_FULL_PULL, None),
(CFG_MINIMAL, None),
(
{
"ansible": {
- "package-name": "ansible-core",
- "install-method": "distro",
+ "package_name": "ansible-core",
+ "install_method": "distro",
"pull": {
- "playbook-name": "ubuntu.yml",
+ "playbook_name": "ubuntu.yml",
},
}
},
@@ -207,7 +276,7 @@ class TestAnsible:
(
{
"ansible": {
- "install-method": "pip",
+ "install_method": "pip",
"pull": {
"url": "https://github/holmanb/vmboot",
},
@@ -218,26 +287,25 @@ class TestAnsible:
),
)
def test_required_keys(self, cfg, exception, mocker):
- m_subp = mocker.patch(
- "cloudinit.config.cc_ansible.subp", return_value=("", "")
- )
- mocker.patch("cloudinit.config.cc_ansible.which", return_value=True)
+ mocker.patch(M_PATH + "subp", return_value=("", ""))
+ mocker.patch(M_PATH + "which", return_value=True)
+ mocker.patch(M_PATH + "AnsiblePull.check_deps")
mocker.patch(
- "cloudinit.config.cc_ansible.AnsiblePull.get_version",
+ M_PATH + "AnsiblePull.get_version",
return_value=cc_ansible.Version(2, 7, 1),
)
- mocker.patch("cloudinit.config.cc_ansible.AnsiblePull.check_deps")
mocker.patch(
- "cloudinit.config.cc_ansible.AnsiblePullDistro.is_installed",
+ M_PATH + "AnsiblePullDistro.is_installed",
return_value=False,
)
+ mocker.patch.dict(M_PATH + "os.environ", clear=True)
if exception:
with raises(exception):
cc_ansible.handle("", cfg, get_cloud(), None, None)
else:
cloud = get_cloud(mocked_distro=True)
- print(cfg)
- install = cfg["ansible"]["install-method"]
+ cloud.distro.pip_package_name = "python3-pip"
+ install = cfg["ansible"]["install_method"]
cc_ansible.handle("", cfg, cloud, None, None)
if install == "distro":
cloud.distro.install_packages.assert_called_once()
@@ -245,45 +313,40 @@ class TestAnsible:
"ansible-core"
)
elif install == "pip":
- m_subp.assert_has_calls(
- [
- call(["python3", "-m", "pip", "list"]),
- call(
- [
- "python3",
- "-m",
- "pip",
- "install",
- "--user",
- "ansible",
- ]
- ),
- ]
- )
- assert m_subp.call_args[0][0] == [
- "ansible-pull",
- "--url=https://github/holmanb/vmboot",
- "ubuntu.yml",
- ]
+ if HAS_PIP:
+ assert 0 == cloud.distro.install_packages.call_count
+ else:
+ cloud.distro.install_packages.assert_called_with(
+ "python3-pip"
+ )
- @mock.patch("cloudinit.config.cc_ansible.which", return_value=False)
+ @mock.patch(M_PATH + "which", return_value=False)
def test_deps_not_installed(self, m_which):
+ """assert exception raised if package not installed"""
with raises(ValueError):
cc_ansible.AnsiblePullDistro(get_cloud().distro).check_deps()
- @mock.patch("cloudinit.config.cc_ansible.which", return_value=True)
+ @mock.patch(M_PATH + "which", return_value=True)
def test_deps(self, m_which):
+ """assert exception not raised if package installed"""
cc_ansible.AnsiblePullDistro(get_cloud().distro).check_deps()
- @mock.patch("cloudinit.config.cc_ansible.which", return_value=True)
- @mock.patch(
- "cloudinit.config.cc_ansible.subp", return_value=("stdout", "stderr")
- )
+ @mock.patch(M_PATH + "subp", return_value=("stdout", "stderr"))
+ @mock.patch(M_PATH + "which", return_value=False)
+ def test_pip_bootstrap(self, m_which, m_subp):
+ distro = get_cloud(mocked_distro=True).distro
+ with mock.patch("builtins.__import__", side_effect=ImportError):
+ cc_ansible.AnsiblePullPip(distro, "ansible").install("")
+ distro.install_packages.assert_called_once()
+
+ @mock.patch(M_PATH + "which", return_value=True)
+ @mock.patch(M_PATH + "subp", return_value=("stdout", "stderr"))
+ @mock.patch("cloudinit.distros.subp", return_value=("stdout", "stderr"))
@mark.parametrize(
("cfg", "expected"),
(
(
- CFG_FULL,
+ CFG_FULL_PULL,
[
"ansible-pull",
"--url=https://github/holmanb/vmboot",
@@ -318,45 +381,59 @@ class TestAnsible:
),
),
)
- def test_ansible_pull(self, m_subp, m_which, cfg, expected):
- pull_type = cfg["ansible"]["install-method"]
- ansible_pull = (
- cc_ansible.AnsiblePullPip()
- if pull_type == "pip"
- else cc_ansible.AnsiblePullDistro(get_cloud().distro)
- )
+ def test_ansible_pull(self, m_subp1, m_subp2, m_which, cfg, expected):
+ """verify expected ansible invocation from userdata config"""
+ pull_type = cfg["ansible"]["install_method"]
+ distro = get_cloud().distro
+ with mock.patch.dict(M_PATH + "os.environ", clear=True):
+ ansible_pull = (
+ cc_ansible.AnsiblePullPip(distro, "ansible")
+ if pull_type == "pip"
+ else cc_ansible.AnsiblePullDistro(distro)
+ )
cc_ansible.run_ansible_pull(
ansible_pull, deepcopy(cfg["ansible"]["pull"])
)
- assert m_subp.call_args[0][0] == expected
- @mock.patch("cloudinit.config.cc_ansible.validate_config")
+ if pull_type != "pip":
+ assert m_subp2.call_args[0][0] == expected
+ assert m_subp2.call_args[1]["env"].get("HOME") == environ.get(
+ "HOME"
+ )
+
+ @mock.patch(M_PATH + "validate_config")
def test_do_not_run(self, m_validate):
- cc_ansible.handle("", {}, None, None, None) # pyright: ignore
+ """verify that if ansible key not included, don't do anything"""
+ cc_ansible.handle("", {}, get_cloud(), None, None) # pyright: ignore
assert not m_validate.called
@mock.patch(
- "cloudinit.config.cc_ansible.subp",
- side_effect=[
- (distro_version, ""),
- (pip_version, ""),
- (" ansible 2.1.0", ""),
- (" ansible 2.1.0", ""),
- ],
+ "cloudinit.config.cc_ansible.subp", side_effect=[(distro_version, "")]
)
- def test_parse_version(self, m_subp):
+ def test_parse_version_distro(self, m_subp):
+ """Verify that the expected version is returned"""
assert cc_ansible.AnsiblePullDistro(
get_cloud().distro
- ).get_version() == cc_ansible.Version(2, 10, 8)
- assert cc_ansible.AnsiblePullPip().get_version() == cc_ansible.Version(
- 2, 13, 2
- )
+ ).get_version() == util.Version(2, 10, 8)
- assert (
- util.Version(2, 1, 0, -1)
- == cc_ansible.AnsiblePullPip().get_version()
- )
- assert (
- util.Version(2, 1, 0, -1)
- == cc_ansible.AnsiblePullDistro(get_cloud().distro).get_version()
- )
+ @mock.patch("cloudinit.subp.subp", side_effect=[(pip_version, "")])
+ def test_parse_version_pip(self, m_subp):
+ """Verify that the expected version is returned"""
+ distro = get_cloud().distro
+ distro.do_as = MagicMock(return_value=(pip_version, ""))
+ pip = cc_ansible.AnsiblePullPip(distro, "root")
+ received = pip.get_version()
+ expected = util.Version(2, 13, 2)
+ assert received == expected
+
+ @mock.patch(M_PATH + "subp", return_value=("stdout", "stderr"))
+ @mock.patch(M_PATH + "which", return_value=True)
+ def test_ansible_env_var(self, m_which, m_subp):
+ cc_ansible.handle("", CFG_FULL_PULL, get_cloud(), mock.Mock(), [])
+
+ # python 3.8 required for Mock.call_args.kwargs dict attribute
+ if isinstance(m_subp.call_args.kwargs, dict):
+ assert (
+ "/etc/ansible/ansible.cfg"
+ == m_subp.call_args.kwargs["env"]["ansible_config"]
+ )
diff --git a/tests/unittests/config/test_cc_bootcmd.py b/tests/unittests/config/test_cc_bootcmd.py
index 34b16b85..9831d25e 100644
--- a/tests/unittests/config/test_cc_bootcmd.py
+++ b/tests/unittests/config/test_cc_bootcmd.py
@@ -18,7 +18,7 @@ from tests.unittests.util import get_cloud
LOG = logging.getLogger(__name__)
-class FakeExtendedTempFile(object):
+class FakeExtendedTempFile:
def __init__(self, suffix):
self.suffix = suffix
self.handle = tempfile.NamedTemporaryFile(
diff --git a/tests/unittests/config/test_cc_chef.py b/tests/unittests/config/test_cc_chef.py
index f86be293..606eada4 100644
--- a/tests/unittests/config/test_cc_chef.py
+++ b/tests/unittests/config/test_cc_chef.py
@@ -5,8 +5,8 @@ import logging
import os
import re
-import httpretty
import pytest
+import responses
from cloudinit import util
from cloudinit.config import cc_chef
@@ -17,44 +17,38 @@ from cloudinit.config.schema import (
)
from tests.unittests.helpers import (
FilesystemMockingTestCase,
- HttprettyTestCase,
+ ResponsesTestCase,
cloud_init_project_dir,
mock,
skipIf,
skipUnlessJsonSchema,
)
-from tests.unittests.util import get_cloud
+from tests.unittests.util import MockDistro, get_cloud
LOG = logging.getLogger(__name__)
CLIENT_TEMPL = cloud_init_project_dir("templates/chef_client.rb.tmpl")
-# This is adjusted to use http because using with https causes issue
-# in some openssl/httpretty combinations.
-# https://github.com/gabrielfalcao/HTTPretty/issues/242
-# We saw issue in opensuse 42.3 with
-# httpretty=0.8.8-7.1 ndg-httpsclient=0.4.0-3.2 pyOpenSSL=16.0.0-4.1
-OMNIBUS_URL_HTTP = cc_chef.OMNIBUS_URL.replace("https:", "http:")
-
-class TestInstallChefOmnibus(HttprettyTestCase):
+class TestInstallChefOmnibus(ResponsesTestCase):
def setUp(self):
super(TestInstallChefOmnibus, self).setUp()
self.new_root = self.tmp_dir()
- @mock.patch("cloudinit.config.cc_chef.OMNIBUS_URL", OMNIBUS_URL_HTTP)
+ @mock.patch("cloudinit.config.cc_chef.OMNIBUS_URL", cc_chef.OMNIBUS_URL)
def test_install_chef_from_omnibus_runs_chef_url_content(self):
"""install_chef_from_omnibus calls subp_blob_in_tempfile."""
response = b'#!/bin/bash\necho "Hi Mom"'
- httpretty.register_uri(
- httpretty.GET, cc_chef.OMNIBUS_URL, body=response, status=200
+ self.responses.add(
+ responses.GET, cc_chef.OMNIBUS_URL, body=response, status=200
)
ret = (None, None) # stdout, stderr but capture=False
+ distro = mock.Mock()
with mock.patch(
"cloudinit.config.cc_chef.subp_blob_in_tempfile", return_value=ret
) as m_subp_blob:
- cc_chef.install_chef_from_omnibus()
+ cc_chef.install_chef_from_omnibus(distro=distro)
# admittedly whitebox, but assuming subp_blob_in_tempfile works
# this should be fine.
self.assertEqual(
@@ -64,6 +58,7 @@ class TestInstallChefOmnibus(HttprettyTestCase):
args=[],
basename="chef-omnibus-install",
capture=False,
+ distro=distro,
)
],
m_subp_blob.call_args_list,
@@ -74,20 +69,21 @@ class TestInstallChefOmnibus(HttprettyTestCase):
def test_install_chef_from_omnibus_retries_url(self, m_subp_blob, m_rdurl):
"""install_chef_from_omnibus retries OMNIBUS_URL upon failure."""
- class FakeURLResponse(object):
+ class FakeURLResponse:
contents = '#!/bin/bash\necho "Hi Mom" > {0}/chef.out'.format(
self.new_root
)
m_rdurl.return_value = FakeURLResponse()
- cc_chef.install_chef_from_omnibus()
+ distro = mock.Mock()
+ cc_chef.install_chef_from_omnibus(distro=distro)
expected_kwargs = {
"retries": cc_chef.OMNIBUS_URL_RETRIES,
"url": cc_chef.OMNIBUS_URL,
}
self.assertCountEqual(expected_kwargs, m_rdurl.call_args_list[0][1])
- cc_chef.install_chef_from_omnibus(retries=10)
+ cc_chef.install_chef_from_omnibus(retries=10, distro=distro)
expected_kwargs = {"retries": 10, "url": cc_chef.OMNIBUS_URL}
self.assertCountEqual(expected_kwargs, m_rdurl.call_args_list[1][1])
expected_subp_kwargs = {
@@ -95,21 +91,21 @@ class TestInstallChefOmnibus(HttprettyTestCase):
"basename": "chef-omnibus-install",
"blob": m_rdurl.return_value.contents,
"capture": False,
+ "distro": distro,
}
self.assertCountEqual(
expected_subp_kwargs, m_subp_blob.call_args_list[0][1]
)
- @mock.patch("cloudinit.config.cc_chef.OMNIBUS_URL", OMNIBUS_URL_HTTP)
+ @mock.patch("cloudinit.config.cc_chef.OMNIBUS_URL", cc_chef.OMNIBUS_URL)
@mock.patch("cloudinit.config.cc_chef.subp_blob_in_tempfile")
def test_install_chef_from_omnibus_has_omnibus_version(self, m_subp_blob):
"""install_chef_from_omnibus provides version arg to OMNIBUS_URL."""
chef_outfile = self.tmp_path("chef.out", self.new_root)
response = '#!/bin/bash\necho "Hi Mom" > {0}'.format(chef_outfile)
- httpretty.register_uri(
- httpretty.GET, cc_chef.OMNIBUS_URL, body=response
- )
- cc_chef.install_chef_from_omnibus(omnibus_version="2.0")
+ self.responses.add(responses.GET, cc_chef.OMNIBUS_URL, body=response)
+ distro = mock.Mock()
+ cc_chef.install_chef_from_omnibus(distro=distro, omnibus_version="2.0")
called_kwargs = m_subp_blob.call_args_list[0][1]
expected_kwargs = {
@@ -117,6 +113,7 @@ class TestInstallChefOmnibus(HttprettyTestCase):
"basename": "chef-omnibus-install",
"blob": response,
"capture": False,
+ "distro": distro,
}
self.assertCountEqual(expected_kwargs, called_kwargs)
@@ -461,4 +458,33 @@ class TestBootCMDSchema:
validate_cloudconfig_schema(config, schema, strict=True)
-# vi: ts=4 expandtab
+class TestHelpers:
+ def test_subp_blob_in_tempfile(self, mocker, tmpdir):
+ mocker.patch(
+ "tests.unittests.util.MockDistro.get_tmp_exec_path",
+ return_value=tmpdir,
+ )
+ mocker.patch("cloudinit.temp_utils.mkdtemp", return_value=tmpdir)
+ write_file = mocker.patch("cloudinit.util.write_file")
+ m_subp = mocker.patch("cloudinit.config.cc_chef.subp.subp")
+ distro = MockDistro()
+
+ cc_chef.subp_blob_in_tempfile("hi", distro, args=[])
+ assert m_subp.call_args == mock.call(args=[f"{tmpdir}/subp_blob"])
+ assert write_file.call_args[0][1] == "hi"
+
+ def test_subp_blob_in_tempfile_args(self, mocker, tmpdir):
+ mocker.patch(
+ "tests.unittests.util.MockDistro.get_tmp_exec_path",
+ return_value=tmpdir,
+ )
+ mocker.patch("cloudinit.temp_utils.mkdtemp", return_value=tmpdir)
+ write_file = mocker.patch("cloudinit.util.write_file")
+ m_subp = mocker.patch("cloudinit.config.cc_chef.subp.subp")
+ distro = MockDistro()
+
+ cc_chef.subp_blob_in_tempfile("hi", distro, args=["aaa"])
+ assert m_subp.call_args == mock.call(
+ args=[f"{tmpdir}/subp_blob", "aaa"]
+ )
+ assert write_file.call_args[0][1] == "hi"
diff --git a/tests/unittests/config/test_cc_disk_setup.py b/tests/unittests/config/test_cc_disk_setup.py
index c61a26f3..9c02a651 100644
--- a/tests/unittests/config/test_cc_disk_setup.py
+++ b/tests/unittests/config/test_cc_disk_setup.py
@@ -263,12 +263,12 @@ class TestMkfsCommandHandling(CiTestCase):
subp.assert_called_once_with(
[
"/sbin/mkfs.ext4",
- "/dev/xdb1",
"-L",
"without_cmd",
"-F",
"are",
"added",
+ "/dev/xdb1",
],
shell=False,
)
@@ -292,7 +292,7 @@ class TestMkfsCommandHandling(CiTestCase):
m_which.call_args_list,
)
subp.assert_called_once_with(
- ["/sbin/mkswap", "/dev/xdb1", "-L", "swap", "-f"], shell=False
+ ["/sbin/mkswap", "-L", "swap", "-f", "/dev/xdb1"], shell=False
)
diff --git a/tests/unittests/config/test_cc_growpart.py b/tests/unittests/config/test_cc_growpart.py
index f4d4e579..c808333b 100644
--- a/tests/unittests/config/test_cc_growpart.py
+++ b/tests/unittests/config/test_cc_growpart.py
@@ -101,7 +101,7 @@ class TestDisabled(unittest.TestCase):
def setUp(self):
super(TestDisabled, self).setUp()
self.name = "growpart"
- self.cloud_init = None
+ self.cloud = None
self.log = logging.getLogger("TestDisabled")
self.args = []
@@ -114,9 +114,7 @@ class TestDisabled(unittest.TestCase):
config = {"growpart": {"mode": "off"}}
with mock.patch.object(cc_growpart, "resizer_factory") as mockobj:
- self.handle(
- self.name, config, self.cloud_init, self.log, self.args
- )
+ self.handle(self.name, config, self.cloud, self.log, self.args)
self.assertEqual(mockobj.call_count, 0)
@@ -125,11 +123,11 @@ class TestConfig(TestCase):
super(TestConfig, self).setUp()
self.name = "growpart"
self.paths = None
- self.cloud = cloud.Cloud(None, self.paths, None, None, None)
+ self.distro = mock.Mock()
+ self.cloud = cloud.Cloud(None, self.paths, None, self.distro, None)
self.log = logging.getLogger("TestConfig")
self.args = []
- self.cloud_init = None
self.handle = cc_growpart.handle
self.tmppath = "/tmp/cloudinit-test-file"
self.tmpdir = os.scandir("/tmp")
@@ -146,9 +144,7 @@ class TestConfig(TestCase):
) as mockobj:
config = {"growpart": {"mode": "auto"}}
- self.handle(
- self.name, config, self.cloud_init, self.log, self.args
- )
+ self.handle(self.name, config, self.cloud, self.log, self.args)
mockobj.assert_has_calls(
[
@@ -170,7 +166,7 @@ class TestConfig(TestCase):
self.handle,
self.name,
config,
- self.cloud_init,
+ self.cloud,
self.log,
self.args,
)
@@ -184,7 +180,7 @@ class TestConfig(TestCase):
with mock.patch.object(
subp, "subp", return_value=(HELP_GROWPART_RESIZE, "")
) as mockobj:
- ret = cc_growpart.resizer_factory(mode="auto")
+ ret = cc_growpart.resizer_factory(mode="auto", distro=mock.Mock())
self.assertIsInstance(ret, cc_growpart.ResizeGrowPart)
mockobj.assert_called_once_with(
@@ -210,7 +206,7 @@ class TestConfig(TestCase):
subp, "subp", return_value=(HELP_GROWPART_RESIZE, "")
) as mockobj:
- ret = cc_growpart.resizer_factory(mode="auto")
+ ret = cc_growpart.resizer_factory(mode="auto", distro=mock.Mock())
self.assertIsInstance(ret, cc_growpart.ResizeGrowPart)
diskdev = "/dev/sdb"
partnum = 1
@@ -234,7 +230,7 @@ class TestConfig(TestCase):
with mock.patch.object(
subp, "subp", return_value=("", HELP_GPART)
) as mockobj:
- ret = cc_growpart.resizer_factory(mode="auto")
+ ret = cc_growpart.resizer_factory(mode="auto", distro=mock.Mock())
self.assertIsInstance(ret, cc_growpart.ResizeGpart)
mockobj.assert_has_calls(
@@ -275,9 +271,9 @@ class TestConfig(TestCase):
)
)
- self.handle(self.name, {}, self.cloud_init, self.log, self.args)
+ self.handle(self.name, {}, self.cloud, self.log, self.args)
- factory.assert_called_once_with("auto")
+ factory.assert_called_once_with("auto", self.distro)
rsdevs.assert_called_once_with(myresizer, ["/"])
@@ -308,7 +304,7 @@ class TestResize(unittest.TestCase):
real_stat = os.stat
resize_calls = []
- class myresizer(object):
+ class myresizer:
def resize(self, diskdev, partnum, partdev):
resize_calls.append((diskdev, partnum, partdev))
if partdev == "/dev/YYda2":
@@ -588,7 +584,7 @@ def simple_device_part_info(devpath):
return x
-class Bunch(object):
+class Bunch:
def __init__(self, **kwds):
self.__dict__.update(kwds)
diff --git a/tests/unittests/config/test_cc_lxd.py b/tests/unittests/config/test_cc_lxd.py
index 8b75a1f7..184b586e 100644
--- a/tests/unittests/config/test_cc_lxd.py
+++ b/tests/unittests/config/test_cc_lxd.py
@@ -14,27 +14,27 @@ from cloudinit.config.schema import (
from tests.unittests import helpers as t_help
from tests.unittests.util import get_cloud
+BACKEND_DEF = (
+ ("zfs", "zfs", "zfsutils-linux"),
+ ("btrfs", "mkfs.btrfs", "btrfs-progs"),
+ ("lvm", "lvcreate", "lvm2"),
+ ("dir", None, None),
+)
+LXD_INIT_CFG = {
+ "lxd": {
+ "init": {
+ "network_address": "0.0.0.0",
+ "storage_backend": "zfs",
+ "storage_pool": "poolname",
+ }
+ }
+}
+
class TestLxd(t_help.CiTestCase):
with_logs = True
- lxd_cfg = {
- "lxd": {
- "init": {
- "network_address": "0.0.0.0",
- "storage_backend": "zfs",
- "storage_pool": "poolname",
- }
- }
- }
- backend_def = (
- ("zfs", "zfs", "zfsutils-linux"),
- ("btrfs", "mkfs.btrfs", "btrfs-progs"),
- ("lvm", "lvcreate", "lvm2"),
- ("dir", None, None),
- )
-
@mock.patch("cloudinit.config.cc_lxd.util.system_info")
@mock.patch("cloudinit.config.cc_lxd.os.path.exists", return_value=True)
@mock.patch("cloudinit.config.cc_lxd.subp.subp", return_value=True)
@@ -47,8 +47,8 @@ class TestLxd(t_help.CiTestCase):
cc = get_cloud(mocked_distro=True)
install = cc.distro.install_packages
- for backend, cmd, package in self.backend_def:
- lxd_cfg = deepcopy(self.lxd_cfg)
+ for backend, cmd, package in BACKEND_DEF:
+ lxd_cfg = deepcopy(LXD_INIT_CFG)
lxd_cfg["lxd"]["init"]["storage_backend"] = backend
subp.call_args_list = []
install.call_args_list = []
@@ -94,27 +94,16 @@ class TestLxd(t_help.CiTestCase):
else:
self.assertEqual([], exists.call_args_list)
- @mock.patch("cloudinit.config.cc_lxd.subp.which", return_value=False)
- def test_lxd_package_install(self, m_which):
- for backend, _, package in self.backend_def:
- lxd_cfg = deepcopy(self.lxd_cfg)
- lxd_cfg["lxd"]["init"]["storage_backend"] = backend
-
- packages = cc_lxd.get_required_packages(lxd_cfg["lxd"]["init"])
- assert "lxd" in packages
- if package:
- assert package in packages
-
@mock.patch("cloudinit.config.cc_lxd.maybe_cleanup_default")
@mock.patch("cloudinit.config.cc_lxd.subp")
def test_lxd_install(self, mock_subp, m_maybe_clean):
cc = get_cloud()
cc.distro = mock.MagicMock()
mock_subp.which.return_value = None
- cc_lxd.handle("cc_lxd", self.lxd_cfg, cc, self.logger, [])
+ cc_lxd.handle("cc_lxd", LXD_INIT_CFG, cc, self.logger, [])
self.assertNotIn("WARN", self.logs.getvalue())
self.assertTrue(cc.distro.install_packages.called)
- cc_lxd.handle("cc_lxd", self.lxd_cfg, cc, self.logger, [])
+ cc_lxd.handle("cc_lxd", LXD_INIT_CFG, cc, self.logger, [])
self.assertFalse(m_maybe_clean.called)
install_pkg = cc.distro.install_packages.call_args_list[0][0][0]
self.assertEqual(sorted(install_pkg), ["lxd", "zfsutils-linux"])
@@ -139,6 +128,25 @@ class TestLxd(t_help.CiTestCase):
self.assertFalse(mock_subp.subp.called)
self.assertFalse(m_maybe_clean.called)
+ @mock.patch("cloudinit.config.cc_lxd.subp")
+ def test_lxd_preseed(self, mock_subp):
+ cc = get_cloud()
+ cc.distro = mock.MagicMock()
+ cc_lxd.handle(
+ "cc_lxd",
+ {"lxd": {"preseed": '{"chad": True}'}},
+ cc,
+ self.logger,
+ [],
+ )
+ self.assertEqual(
+ [
+ mock.call(["lxd", "waitready", "--timeout=300"]),
+ mock.call(["lxd", "init", "--preseed"], data='{"chad": True}'),
+ ],
+ mock_subp.subp.call_args_list,
+ )
+
def test_lxd_debconf_new_full(self):
data = {
"mode": "new",
@@ -330,11 +338,42 @@ class TestLxdMaybeCleanupDefault(t_help.CiTestCase):
)
+class TestGetRequiredPackages:
+ @pytest.mark.parametrize(
+ "storage_type, cmd, preseed, package",
+ (
+ ("zfs", "zfs", "", "zfsutils-linux"),
+ ("btrfs", "mkfs.btrfs", "", "btrfs-progs"),
+ ("lvm", "lvcreate", "", "lvm2"),
+ ("lvm", "lvcreate", "storage_pools: [{driver: lvm}]", "lvm2"),
+ ("dir", None, "", None),
+ ),
+ )
+ @mock.patch("cloudinit.config.cc_lxd.subp.which", return_value=False)
+ def test_lxd_package_install(
+ self, m_which, storage_type, cmd, preseed, package
+ ):
+ if preseed: # preseed & lxd.init mutually exclusive
+ init_cfg = {}
+ else:
+ lxd_cfg = deepcopy(LXD_INIT_CFG)
+ lxd_cfg["lxd"]["init"]["storage_backend"] = storage_type
+ init_cfg = lxd_cfg["lxd"]["init"]
+
+ packages = cc_lxd.get_required_packages(init_cfg, preseed)
+ assert "lxd" in packages
+ which_calls = [mock.call("lxd")]
+ if package:
+ which_calls.append(mock.call(cmd))
+ assert package in packages
+ assert which_calls == m_which.call_args_list
+
+
class TestLXDSchema:
@pytest.mark.parametrize(
"config, error_msg",
[
- # Only allow init and bridge keys
+ # Only allow init, bridge and preseed keys
({"lxd": {"bridgeo": 1}}, "Additional properties are not allowed"),
# Only allow init.storage_backend values zfs and dir
(
@@ -347,7 +386,11 @@ class TestLXDSchema:
# Require bridge.mode
({"lxd": {"bridge": {}}}, "bridge: 'mode' is a required property"),
# Require init or bridge keys
- ({"lxd": {}}, "does not have enough properties"),
+ ({"lxd": {}}, "lxd: {} does not have enough properties"),
+ # Require some non-empty preseed config of type string
+ ({"lxd": {"preseed": {}}}, "not of type 'string'"),
+ ({"lxd": {"preseed": ""}}, None),
+ ({"lxd": {"preseed": "this is {} opaque"}}, None),
# Require bridge.mode
({"lxd": {"bridge": {"mode": "new", "mtu": 9000}}}, None),
# LXD's default value
@@ -371,5 +414,64 @@ class TestLXDSchema:
else:
validate_cloudconfig_schema(config, get_schema(), strict=True)
+ @pytest.mark.parametrize(
+ "init_cfg, bridge_cfg, preseed_str, error_expectation",
+ (
+ pytest.param(
+ {}, {}, "", t_help.does_not_raise(), id="empty_cfgs_no_errors"
+ ),
+ pytest.param(
+ {"init-cfg": 1},
+ {"bridge-cfg": 2},
+ "",
+ t_help.does_not_raise(),
+ id="cfg_init_and_bridge_allowed",
+ ),
+ pytest.param(
+ {},
+ {},
+ "profiles: []",
+ t_help.does_not_raise(),
+ id="cfg_preseed_allowed_without_bridge_or_init",
+ ),
+ pytest.param(
+ {"init-cfg": 1},
+ {"bridge-cfg": 2},
+ "profiles: []",
+ pytest.raises(
+ ValueError,
+ match=re.escape(
+ "Unable to configure LXD. lxd.preseed config can not"
+ " be provided with key(s): lxd.init, lxd.bridge"
+ ),
+ ),
+ ),
+ pytest.param(
+ "nope",
+ {},
+ "",
+ pytest.raises(
+ ValueError,
+ match=re.escape(
+ "lxd.init config must be a dictionary. found a 'str'"
+ ),
+ ),
+ ),
+ ),
+ )
+ def test_supplemental_schema_validation_raises_value_error(
+ self, init_cfg, bridge_cfg, preseed_str, error_expectation
+ ):
+ """LXD is strict on invalid user-data raising conspicuous ValueErrors
+ cc_lxd.supplemental_schema_validation
+
+ Hard errors result is faster triage/awareness of config problems than
+ warnings do.
+ """
+ with error_expectation:
+ cc_lxd.supplemental_schema_validation(
+ init_cfg, bridge_cfg, preseed_str
+ )
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/config/test_cc_ntp.py b/tests/unittests/config/test_cc_ntp.py
index 41b5fb9b..365945f8 100644
--- a/tests/unittests/config/test_cc_ntp.py
+++ b/tests/unittests/config/test_cc_ntp.py
@@ -372,6 +372,10 @@ class TestNtp(FilesystemMockingTestCase):
valid_empty_configs = [{"ntp": {}}, {"ntp": None}]
for valid_empty_config in valid_empty_configs:
for distro in cc_ntp.distros:
+ # skip the test if the distro is COS. As in COS, the default
+ # config file is installed
+ if distro == "cos":
+ return
mycloud = self._get_cloud(distro)
ntpconfig = self._mock_ntp_client_config(distro=distro)
confpath = ntpconfig["confpath"]
@@ -426,14 +430,15 @@ class TestNtp(FilesystemMockingTestCase):
cc_ntp.handle("notimportant", cfg, mycloud, None, None)
self.assertEqual(0, m_select.call_count)
- @mock.patch("cloudinit.distros.subp")
- @mock.patch("cloudinit.config.cc_ntp.subp")
+ @mock.patch("cloudinit.subp.subp")
+ @mock.patch("cloudinit.subp.which", return_value=True)
@mock.patch("cloudinit.config.cc_ntp.select_ntp_client")
@mock.patch("cloudinit.distros.Distro.uses_systemd")
- def test_ntp_the_whole_package(self, m_sysd, m_select, m_subp, m_dsubp):
+ def test_ntp_the_whole_package(self, m_sysd, m_select, m_which, m_subp):
"""Test enabled config renders template, and restarts service"""
cfg = {"ntp": {"enabled": True}}
for distro in cc_ntp.distros:
+ m_subp.reset_mock()
mycloud = self._get_cloud(distro)
ntpconfig = self._mock_ntp_client_config(distro=distro)
confpath = ntpconfig["confpath"]
@@ -442,6 +447,8 @@ class TestNtp(FilesystemMockingTestCase):
hosts = cc_ntp.generate_server_names(mycloud.distro.name)
uses_systemd = True
+ is_FreeBSD = False
+ is_OpenBSD = False
expected_service_call = [
"systemctl",
"reload-or-restart",
@@ -449,28 +456,54 @@ class TestNtp(FilesystemMockingTestCase):
]
expected_content = "servers []\npools {0}\n".format(hosts)
+ # skip the test if the distro is COS. As in COS, the default
+ # config file is installed
+ if distro == "cos":
+ return
+
if distro == "alpine":
uses_systemd = False
- expected_service_call = ["rc-service", service_name, "restart"]
+ expected_service_call = [
+ "rc-service",
+ "--nocolor",
+ service_name,
+ "restart",
+ ]
# _mock_ntp_client_config call above did not specify a client
# value and so it defaults to "ntp" which on Alpine Linux only
# supports servers and not pools.
expected_content = "servers {0}\npools []\n".format(hosts)
+ if distro == "freebsd":
+ uses_systemd = False
+ is_FreeBSD = True
+ if service_name != "ntpd":
+ expected_service_call = ["service", "ntpd", "disable"]
+ else:
+ expected_service_call = [
+ "service",
+ service_name,
+ "restart",
+ ]
+
+ if distro == "openbsd":
+ uses_systemd = False
+ is_OpenBSD = True
+ expected_service_call = ["rcctl", "restart", service_name]
+
m_sysd.return_value = uses_systemd
with mock.patch("cloudinit.config.cc_ntp.util") as m_util:
# allow use of util.mergemanydict
m_util.mergemanydict.side_effect = util.mergemanydict
- # default client is present
- m_subp.which.return_value = True
# use the config 'enabled' value
m_util.is_false.return_value = util.is_false(
cfg["ntp"]["enabled"]
)
+ m_util.is_BSD.return_value = is_FreeBSD or is_OpenBSD
+ m_util.is_FreeBSD.return_value = is_FreeBSD
+ m_util.is_OpenBSD.return_value = is_OpenBSD
cc_ntp.handle("notimportant", cfg, mycloud, None, None)
- m_dsubp.subp.assert_called_with(
- expected_service_call, capture=True
- )
+ m_subp.assert_called_with(expected_service_call, capture=True)
self.assertEqual(expected_content, util.load_file(confpath))
diff --git a/tests/unittests/config/test_cc_power_state_change.py b/tests/unittests/config/test_cc_power_state_change.py
index 82824306..81750f5b 100644
--- a/tests/unittests/config/test_cc_power_state_change.py
+++ b/tests/unittests/config/test_cc_power_state_change.py
@@ -86,12 +86,16 @@ class TestLoadPowerState(t_help.TestCase):
self.assertEqual(cond, True)
def test_freebsd_poweroff_uses_lowercase_p(self):
- cls = distros.fetch("freebsd")
- paths = helpers.Paths({})
- freebsd = cls("freebsd", {}, paths)
- cfg = {"power_state": {"mode": "poweroff"}}
- ret = psc.load_power_state(cfg, freebsd)
- self.assertIn("-p", ret[0])
+ with mock.patch(
+ "cloudinit.distros.networking.subp.subp",
+ return_value=("", None),
+ ):
+ cls = distros.fetch("freebsd")
+ paths = helpers.Paths({})
+ freebsd = cls("freebsd", {}, paths)
+ cfg = {"power_state": {"mode": "poweroff"}}
+ ret = psc.load_power_state(cfg, freebsd)
+ self.assertIn("-p", ret[0])
def test_alpine_delay(self):
# alpine takes delay in seconds.
diff --git a/tests/unittests/config/test_cc_puppet.py b/tests/unittests/config/test_cc_puppet.py
index 72e031ba..27a49722 100644
--- a/tests/unittests/config/test_cc_puppet.py
+++ b/tests/unittests/config/test_cc_puppet.py
@@ -3,6 +3,7 @@ import logging
import textwrap
import pytest
+import responses
from cloudinit import util
from cloudinit.config import cc_puppet
@@ -11,17 +12,19 @@ from cloudinit.config.schema import (
get_schema,
validate_cloudconfig_schema,
)
-from tests.unittests.helpers import (
- CiTestCase,
- HttprettyTestCase,
- mock,
- skipUnlessJsonSchema,
-)
+from tests.unittests.helpers import CiTestCase, mock, skipUnlessJsonSchema
from tests.unittests.util import get_cloud
LOG = logging.getLogger(__name__)
+@pytest.fixture
+def fake_tempdir(mocker, tmpdir):
+ mocker.patch(
+ "cloudinit.config.cc_puppet.temp_utils.tempdir"
+ ).return_value.__enter__.return_value = str(tmpdir)
+
+
@mock.patch("cloudinit.config.cc_puppet.subp.which")
@mock.patch("cloudinit.config.cc_puppet.subp.subp")
@mock.patch("cloudinit.config.cc_puppet.os")
@@ -143,11 +146,13 @@ class TestPuppetHandle(CiTestCase):
def test_puppet_config_installs_puppet_aio(self, m_subp, m_aio, _):
"""Cloud-config with 'puppet' key installs
when 'install_type' is 'aio'."""
-
- self.cloud.distro = mock.MagicMock()
+ distro = mock.MagicMock()
+ self.cloud.distro = distro
cfg = {"puppet": {"install": True, "install_type": "aio"}}
cc_puppet.handle("notimportant", cfg, self.cloud, LOG, None)
- m_aio.assert_called_with(cc_puppet.AIO_INSTALL_URL, None, None, True)
+ m_aio.assert_called_with(
+ distro, cc_puppet.AIO_INSTALL_URL, None, None, True
+ )
@mock.patch("cloudinit.config.cc_puppet.install_puppet_aio", autospec=True)
@mock.patch("cloudinit.config.cc_puppet.subp.subp", return_value=("", ""))
@@ -156,8 +161,8 @@ class TestPuppetHandle(CiTestCase):
):
"""Cloud-config with 'puppet' key installs
when 'install_type' is 'aio' and 'version' is specified."""
-
- self.cloud.distro = mock.MagicMock()
+ distro = mock.MagicMock()
+ self.cloud.distro = distro
cfg = {
"puppet": {
"install": True,
@@ -167,7 +172,7 @@ class TestPuppetHandle(CiTestCase):
}
cc_puppet.handle("notimportant", cfg, self.cloud, LOG, None)
m_aio.assert_called_with(
- cc_puppet.AIO_INSTALL_URL, "6.24.0", None, True
+ distro, cc_puppet.AIO_INSTALL_URL, "6.24.0", None, True
)
@mock.patch("cloudinit.config.cc_puppet.install_puppet_aio", autospec=True)
@@ -177,8 +182,8 @@ class TestPuppetHandle(CiTestCase):
):
"""Cloud-config with 'puppet' key installs
when 'install_type' is 'aio' and 'collection' is specified."""
-
- self.cloud.distro = mock.MagicMock()
+ distro = mock.MagicMock()
+ self.cloud.distro = distro
cfg = {
"puppet": {
"install": True,
@@ -188,7 +193,7 @@ class TestPuppetHandle(CiTestCase):
}
cc_puppet.handle("notimportant", cfg, self.cloud, LOG, None)
m_aio.assert_called_with(
- cc_puppet.AIO_INSTALL_URL, None, "puppet6", True
+ distro, cc_puppet.AIO_INSTALL_URL, None, "puppet6", True
)
@mock.patch("cloudinit.config.cc_puppet.install_puppet_aio", autospec=True)
@@ -198,8 +203,8 @@ class TestPuppetHandle(CiTestCase):
):
"""Cloud-config with 'puppet' key installs
when 'install_type' is 'aio' and 'aio_install_url' is specified."""
-
- self.cloud.distro = mock.MagicMock()
+ distro = mock.MagicMock()
+ self.cloud.distro = distro
cfg = {
"puppet": {
"install": True,
@@ -209,7 +214,7 @@ class TestPuppetHandle(CiTestCase):
}
cc_puppet.handle("notimportant", cfg, self.cloud, LOG, None)
m_aio.assert_called_with(
- "http://test.url/path/to/script.sh", None, None, True
+ distro, "http://test.url/path/to/script.sh", None, None, True
)
@mock.patch("cloudinit.config.cc_puppet.install_puppet_aio", autospec=True)
@@ -219,8 +224,8 @@ class TestPuppetHandle(CiTestCase):
):
"""Cloud-config with 'puppet' key installs
when 'install_type' is 'aio' and no cleanup."""
-
- self.cloud.distro = mock.MagicMock()
+ distro = mock.MagicMock()
+ self.cloud.distro = distro
cfg = {
"puppet": {
"install": True,
@@ -229,7 +234,9 @@ class TestPuppetHandle(CiTestCase):
}
}
cc_puppet.handle("notimportant", cfg, self.cloud, LOG, None)
- m_aio.assert_called_with(cc_puppet.AIO_INSTALL_URL, None, None, False)
+ m_aio.assert_called_with(
+ distro, cc_puppet.AIO_INSTALL_URL, None, None, False
+ )
@mock.patch("cloudinit.config.cc_puppet.subp.subp", return_value=("", ""))
def test_puppet_config_installs_puppet_version(self, m_subp, _):
@@ -402,68 +409,82 @@ URL_MOCK = mock.Mock()
URL_MOCK.contents = b'#!/bin/bash\necho "Hi Mom"'
+@pytest.mark.usefixtures("fake_tempdir")
@mock.patch("cloudinit.config.cc_puppet.subp.subp", return_value=(None, None))
@mock.patch(
"cloudinit.config.cc_puppet.url_helper.readurl",
return_value=URL_MOCK,
autospec=True,
)
-class TestInstallPuppetAio(HttprettyTestCase):
- def test_install_with_default_arguments(self, m_readurl, m_subp):
- """Install AIO with no arguments"""
- cc_puppet.install_puppet_aio()
-
- self.assertEqual(
- [mock.call([mock.ANY, "--cleanup"], capture=False)],
- m_subp.call_args_list,
- )
-
- def test_install_with_custom_url(self, m_readurl, m_subp):
- """Install AIO from custom URL"""
- cc_puppet.install_puppet_aio("http://custom.url/path/to/script.sh")
- m_readurl.assert_called_with(
- url="http://custom.url/path/to/script.sh", retries=5
- )
-
- self.assertEqual(
- [mock.call([mock.ANY, "--cleanup"], capture=False)],
- m_subp.call_args_list,
- )
-
- def test_install_with_version(self, m_readurl, m_subp):
- """Install AIO with specific version"""
- cc_puppet.install_puppet_aio(cc_puppet.AIO_INSTALL_URL, "7.6.0")
-
- self.assertEqual(
- [mock.call([mock.ANY, "-v", "7.6.0", "--cleanup"], capture=False)],
- m_subp.call_args_list,
- )
-
- def test_install_with_collection(self, m_readurl, m_subp):
- """Install AIO with specific collection"""
- cc_puppet.install_puppet_aio(
- cc_puppet.AIO_INSTALL_URL, None, "puppet6-nightly"
- )
-
- self.assertEqual(
- [
- mock.call(
- [mock.ANY, "-c", "puppet6-nightly", "--cleanup"],
- capture=False,
- )
- ],
- m_subp.call_args_list,
- )
-
- def test_install_with_no_cleanup(self, m_readurl, m_subp):
- """Install AIO with no cleanup"""
- cc_puppet.install_puppet_aio(
- cc_puppet.AIO_INSTALL_URL, None, None, False
- )
-
- self.assertEqual(
- [mock.call([mock.ANY], capture=False)], m_subp.call_args_list
- )
+class TestInstallPuppetAio:
+ @pytest.mark.parametrize(
+ "args, expected_subp_call_args_list, expected_readurl_call_args_list",
+ [
+ pytest.param(
+ [],
+ [mock.call([mock.ANY, "--cleanup"], capture=False)],
+ [
+ mock.call(
+ url="https://raw.githubusercontent.com/puppetlabs/install-puppet/main/install.sh", # noqa: 501
+ retries=5,
+ )
+ ],
+ id="default_arguments",
+ ),
+ pytest.param(
+ ["http://custom.url/path/to/script.sh"],
+ [mock.call([mock.ANY, "--cleanup"], capture=False)],
+ [
+ mock.call(
+ url="http://custom.url/path/to/script.sh", retries=5
+ )
+ ],
+ id="custom_url",
+ ),
+ pytest.param(
+ [cc_puppet.AIO_INSTALL_URL, "7.6.0"],
+ [
+ mock.call(
+ [mock.ANY, "-v", "7.6.0", "--cleanup"], capture=False
+ )
+ ],
+ [mock.call(url=cc_puppet.AIO_INSTALL_URL, retries=5)],
+ id="version",
+ ),
+ pytest.param(
+ [cc_puppet.AIO_INSTALL_URL, None, "puppet6-nightly"],
+ [
+ mock.call(
+ [mock.ANY, "-c", "puppet6-nightly", "--cleanup"],
+ capture=False,
+ )
+ ],
+ [mock.call(url=cc_puppet.AIO_INSTALL_URL, retries=5)],
+ id="collection",
+ ),
+ pytest.param(
+ [cc_puppet.AIO_INSTALL_URL, None, None, False],
+ [mock.call([mock.ANY], capture=False)],
+ [mock.call(url=cc_puppet.AIO_INSTALL_URL, retries=5)],
+ id="no_cleanup",
+ ),
+ ],
+ )
+ @responses.activate
+ def test_install_puppet_aio(
+ self,
+ m_readurl,
+ m_subp,
+ args,
+ expected_subp_call_args_list,
+ expected_readurl_call_args_list,
+ tmpdir,
+ ):
+ distro = mock.Mock()
+ distro.get_tmp_exec_path.return_value = str(tmpdir)
+ cc_puppet.install_puppet_aio(distro, *args)
+ assert expected_readurl_call_args_list == m_readurl.call_args_list
+ assert expected_subp_call_args_list == m_subp.call_args_list
class TestPuppetSchema:
diff --git a/tests/unittests/config/test_cc_resizefs.py b/tests/unittests/config/test_cc_resizefs.py
index 44659f7d..b46fab51 100644
--- a/tests/unittests/config/test_cc_resizefs.py
+++ b/tests/unittests/config/test_cc_resizefs.py
@@ -83,7 +83,7 @@ class TestResizefs(CiTestCase):
def test_handle_noops_on_disabled(self):
"""The handle function logs when the configuration disables resize."""
cfg = {"resize_rootfs": False}
- handle("cc_resizefs", cfg, _cloud=None, log=LOG, args=[])
+ handle("cc_resizefs", cfg, cloud=None, log=LOG, args=[])
self.assertIn(
"DEBUG: Skipping module named cc_resizefs, resizing disabled\n",
self.logs.getvalue(),
@@ -94,7 +94,7 @@ class TestResizefs(CiTestCase):
"""handle warns when get_mount_info sees unknown filesystem for /."""
m_get_mount_info.return_value = None
cfg = {"resize_rootfs": True}
- handle("cc_resizefs", cfg, _cloud=None, log=LOG, args=[])
+ handle("cc_resizefs", cfg, cloud=None, log=LOG, args=[])
logs = self.logs.getvalue()
self.assertNotIn(
"WARNING: Invalid cloud-config provided:\nresize_rootfs:", logs
@@ -128,7 +128,7 @@ class TestResizefs(CiTestCase):
handle,
"cc_resizefs",
cfg,
- _cloud=None,
+ cloud=None,
log=LOG,
args=[],
)
@@ -183,7 +183,7 @@ class TestResizefs(CiTestCase):
cfg = {"resize_rootfs": True}
with mock.patch("cloudinit.config.cc_resizefs.do_resize") as dresize:
- handle("cc_resizefs", cfg, _cloud=None, log=LOG, args=[])
+ handle("cc_resizefs", cfg, cloud=None, log=LOG, args=[])
ret = dresize.call_args[0][0]
self.assertEqual(("zpool", "online", "-e", "vmzroot", disk), ret)
@@ -217,7 +217,7 @@ class TestResizefs(CiTestCase):
with mock.patch("cloudinit.config.cc_resizefs.do_resize") as dresize:
with mock.patch("cloudinit.config.cc_resizefs.os.stat") as m_stat:
m_stat.side_effect = fake_stat
- handle("cc_resizefs", cfg, _cloud=None, log=LOG, args=[])
+ handle("cc_resizefs", cfg, cloud=None, log=LOG, args=[])
self.assertEqual(
("zpool", "online", "-e", "zroot", "/dev/" + disk),
diff --git a/tests/unittests/config/test_cc_set_passwords.py b/tests/unittests/config/test_cc_set_passwords.py
index 10473c3b..be26103f 100644
--- a/tests/unittests/config/test_cc_set_passwords.py
+++ b/tests/unittests/config/test_cc_set_passwords.py
@@ -357,7 +357,11 @@ class TestSetPasswordsHandle:
"""BSD don't use chpasswd"""
mocker.patch(f"{MODPATH}util.is_BSD", return_value=True)
m_subp = mocker.patch(f"{MODPATH}subp.subp")
- cloud = get_cloud(distro="freebsd")
+ # patch for ifconfig -a
+ with mock.patch(
+ "cloudinit.distros.networking.subp.subp", return_values=("", None)
+ ):
+ cloud = get_cloud(distro="freebsd")
cfg = {"chpasswd": user_cfg}
with mock.patch.object(
cloud.distro, "uses_systemd", return_value=False
diff --git a/tests/unittests/config/test_cc_ubuntu_advantage.py b/tests/unittests/config/test_cc_ubuntu_advantage.py
index 657bfe51..34038cca 100644
--- a/tests/unittests/config/test_cc_ubuntu_advantage.py
+++ b/tests/unittests/config/test_cc_ubuntu_advantage.py
@@ -1,15 +1,22 @@
# This file is part of cloud-init. See LICENSE file for license information.
+import json
import logging
import re
+import sys
+from collections import namedtuple
import pytest
from cloudinit import subp
from cloudinit.config.cc_ubuntu_advantage import (
+ _attach,
+ _auto_attach,
+ _should_auto_attach,
configure_ua,
handle,
maybe_install_ua_tools,
- supplemental_schema_validation,
+ set_ua_config,
+ validate_schema_features,
)
from cloudinit.config.schema import (
SchemaValidationError,
@@ -23,6 +30,48 @@ from tests.unittests.util import get_cloud
MPATH = "cloudinit.config.cc_ubuntu_advantage"
+class FakeUserFacingError(Exception):
+ def __init__(self, msg: str):
+ self.msg = msg
+
+
+class FakeAlreadyAttachedError(FakeUserFacingError):
+ pass
+
+
+class FakeAlreadyAttachedOnPROError(FakeUserFacingError):
+ pass
+
+
+@pytest.fixture
+def fake_uaclient(mocker):
+ """Mocks `uaclient` module"""
+
+ mocker.patch.dict("sys.modules")
+ m_uaclient = mock.Mock()
+
+ sys.modules["uaclient"] = m_uaclient
+
+ # Exceptions
+ _exceptions = namedtuple(
+ "exceptions",
+ [
+ "UserFacingError",
+ "AlreadyAttachedError",
+ ],
+ )(
+ FakeUserFacingError,
+ FakeAlreadyAttachedError,
+ )
+ sys.modules["uaclient.api.exceptions"] = _exceptions
+
+ # Messages
+ m_messages = mock.Mock()
+ m_messages.ALREADY_ENABLED.name = "service-already-enabled"
+ sys.modules["uaclient.messages"] = m_messages
+
+
+@pytest.mark.usefixtures("fake_uaclient")
@mock.patch(f"{MPATH}.subp.subp")
class TestConfigureUA:
def test_configure_ua_attach_error(self, m_subp):
@@ -33,7 +82,7 @@ class TestConfigureUA:
match = (
"Failure attaching Ubuntu Advantage:\nUnexpected error while"
" running command.\nCommand: -\nExit code: -\nReason: -\n"
- "Stdout: Invalid token SomeToken\nStderr: -"
+ "Stdout: Invalid token REDACTED\nStderr: -"
)
with pytest.raises(RuntimeError, match=match):
configure_ua(token="SomeToken")
@@ -44,12 +93,18 @@ class TestConfigureUA:
# When token is provided, attach the machine to ua using the token.
pytest.param(
{"token": "SomeToken"},
- [mock.call(["ua", "attach", "SomeToken"])],
+ [
+ mock.call(
+ ["ua", "attach", "SomeToken"],
+ logstring=["ua", "attach", "REDACTED"],
+ rcs={0, 2},
+ )
+ ],
[
(
MPATH,
logging.DEBUG,
- "Attaching to Ubuntu Advantage. ua attach SomeToken",
+ "Attaching to Ubuntu Advantage. ua attach REDACTED",
)
],
id="with_token",
@@ -57,12 +112,18 @@ class TestConfigureUA:
# When services is an empty list, do not auto-enable attach.
pytest.param(
{"token": "SomeToken", "enable": []},
- [mock.call(["ua", "attach", "SomeToken"])],
+ [
+ mock.call(
+ ["ua", "attach", "SomeToken"],
+ logstring=["ua", "attach", "REDACTED"],
+ rcs={0, 2},
+ )
+ ],
[
(
MPATH,
logging.DEBUG,
- "Attaching to Ubuntu Advantage. ua attach SomeToken",
+ "Attaching to Ubuntu Advantage. ua attach REDACTED",
)
],
id="with_empty_services",
@@ -71,16 +132,36 @@ class TestConfigureUA:
pytest.param(
{"token": "SomeToken", "enable": ["fips"]},
[
- mock.call(["ua", "attach", "SomeToken"]),
mock.call(
- ["ua", "enable", "--assume-yes", "fips"], capture=True
+ ["ua", "attach", "--no-auto-enable", "SomeToken"],
+ logstring=[
+ "ua",
+ "attach",
+ "--no-auto-enable",
+ "REDACTED",
+ ],
+ rcs={0, 2},
+ ),
+ mock.call(
+ [
+ "ua",
+ "enable",
+ "--assume-yes",
+ "--format",
+ "json",
+ "--",
+ "fips",
+ ],
+ capture=True,
+ rcs={0, 1},
),
],
[
(
MPATH,
logging.DEBUG,
- "Attaching to Ubuntu Advantage. ua attach SomeToken",
+ "Attaching to Ubuntu Advantage. ua attach"
+ " --no-auto-enable REDACTED",
)
],
id="with_specific_services",
@@ -89,16 +170,36 @@ class TestConfigureUA:
pytest.param(
{"token": "SomeToken", "enable": "fips"},
[
- mock.call(["ua", "attach", "SomeToken"]),
mock.call(
- ["ua", "enable", "--assume-yes", "fips"], capture=True
+ ["ua", "attach", "--no-auto-enable", "SomeToken"],
+ logstring=[
+ "ua",
+ "attach",
+ "--no-auto-enable",
+ "REDACTED",
+ ],
+ rcs={0, 2},
+ ),
+ mock.call(
+ [
+ "ua",
+ "enable",
+ "--assume-yes",
+ "--format",
+ "json",
+ "--",
+ "fips",
+ ],
+ capture=True,
+ rcs={0, 1},
),
],
[
(
MPATH,
logging.DEBUG,
- "Attaching to Ubuntu Advantage. ua attach SomeToken",
+ "Attaching to Ubuntu Advantage. ua attach"
+ " --no-auto-enable REDACTED",
),
(
MPATH,
@@ -112,12 +213,18 @@ class TestConfigureUA:
# When services not string or list, warn but still attach
pytest.param(
{"token": "SomeToken", "enable": {"deffo": "wont work"}},
- [mock.call(["ua", "attach", "SomeToken"])],
+ [
+ mock.call(
+ ["ua", "attach", "SomeToken"],
+ logstring=["ua", "attach", "REDACTED"],
+ rcs={0, 2},
+ )
+ ],
[
(
MPATH,
logging.DEBUG,
- "Attaching to Ubuntu Advantage. ua attach SomeToken",
+ "Attaching to Ubuntu Advantage. ua attach REDACTED",
),
(
MPATH,
@@ -134,130 +241,416 @@ class TestConfigureUA:
def test_configure_ua_attach(
self, m_subp, kwargs, call_args_list, log_record_tuples, caplog
):
+ m_subp.return_value = subp.SubpResult(json.dumps({"errors": []}), "")
configure_ua(**kwargs)
assert call_args_list == m_subp.call_args_list
for record_tuple in log_record_tuples:
assert record_tuple in caplog.record_tuples
+ def test_configure_ua_already_attached(self, m_subp, caplog):
+ """ua is already attached to an subscription"""
+ m_subp.rcs = 2
+ configure_ua(token="SomeToken")
+ assert m_subp.call_args_list == [
+ mock.call(
+ ["ua", "attach", "SomeToken"],
+ logstring=["ua", "attach", "REDACTED"],
+ rcs={0, 2},
+ )
+ ]
+ assert (
+ MPATH,
+ logging.DEBUG,
+ "Attaching to Ubuntu Advantage. ua attach REDACTED",
+ ) in caplog.record_tuples
+
+ def test_configure_ua_attach_on_service_enabled(
+ self, m_subp, caplog, fake_uaclient
+ ):
+ """retry enabling an already enabled service"""
+
+ def fake_subp(cmd, capture=None, rcs=None, logstring=None):
+ fail_cmds = [
+ "ua",
+ "enable",
+ "--assume-yes",
+ "--format",
+ "json",
+ "--",
+ "livepatch",
+ ]
+ if cmd == fail_cmds and capture:
+ response = {
+ "errors": [
+ {
+ "message": "Does not matter",
+ "message_code": "service-already-enabled",
+ "service": cmd[-1],
+ "type": "service",
+ }
+ ]
+ }
+ return subp.SubpResult(json.dumps(response), "")
+
+ m_subp.side_effect = fake_subp
+
+ configure_ua(token="SomeToken", enable=["livepatch"])
+ assert m_subp.call_args_list == [
+ mock.call(
+ ["ua", "attach", "--no-auto-enable", "SomeToken"],
+ logstring=["ua", "attach", "--no-auto-enable", "REDACTED"],
+ rcs={0, 2},
+ ),
+ mock.call(
+ [
+ "ua",
+ "enable",
+ "--assume-yes",
+ "--format",
+ "json",
+ "--",
+ "livepatch",
+ ],
+ capture=True,
+ rcs={0, 1},
+ ),
+ ]
+ assert (
+ MPATH,
+ logging.DEBUG,
+ "Service `livepatch` already enabled.",
+ ) in caplog.record_tuples
+
def test_configure_ua_attach_on_service_error(self, m_subp, caplog):
"""all services should be enabled and then any failures raised"""
- def fake_subp(cmd, capture=None):
- fail_cmds = [
- ["ua", "enable", "--assume-yes", svc] for svc in ["esm", "cc"]
+ def fake_subp(cmd, capture=None, rcs=None, logstring=None):
+ fail_cmd = [
+ "ua",
+ "enable",
+ "--assume-yes",
+ "--format",
+ "json",
+ "--",
]
- if cmd in fail_cmds and capture:
- svc = cmd[-1]
- raise subp.ProcessExecutionError(
- "Invalid {} credentials".format(svc.upper())
- )
+ if cmd[: len(fail_cmd)] == fail_cmd and capture:
+ response = {
+ "errors": [
+ {
+ "message": f"Invalid {svc} credentials",
+ "message_code": "some-code",
+ "service": svc,
+ "type": "service",
+ }
+ for svc in ["esm", "cc"]
+ ]
+ + [
+ {
+ "message": "Cannot enable unknown service 'asdf'",
+ "message_code": "invalid-service-or-failure",
+ "service": None,
+ "type": "system",
+ }
+ ]
+ }
+ return subp.SubpResult(json.dumps(response), "")
+ return subp.SubpResult(json.dumps({"errors": []}), "")
m_subp.side_effect = fake_subp
with pytest.raises(
RuntimeError,
match=re.escape(
- 'Failure enabling Ubuntu Advantage service(s): "esm", "cc"'
+ "Failure enabling Ubuntu Advantage service(s): esm, cc"
),
):
- configure_ua(token="SomeToken", enable=["esm", "cc", "fips"])
+ configure_ua(
+ token="SomeToken", enable=["esm", "cc", "fips", "asdf"]
+ )
assert m_subp.call_args_list == [
- mock.call(["ua", "attach", "SomeToken"]),
- mock.call(["ua", "enable", "--assume-yes", "esm"], capture=True),
- mock.call(["ua", "enable", "--assume-yes", "cc"], capture=True),
- mock.call(["ua", "enable", "--assume-yes", "fips"], capture=True),
+ mock.call(
+ ["ua", "attach", "--no-auto-enable", "SomeToken"],
+ logstring=["ua", "attach", "--no-auto-enable", "REDACTED"],
+ rcs={0, 2},
+ ),
+ mock.call(
+ [
+ "ua",
+ "enable",
+ "--assume-yes",
+ "--format",
+ "json",
+ "--",
+ "esm",
+ "cc",
+ "fips",
+ "asdf",
+ ],
+ capture=True,
+ rcs={0, 1},
+ ),
]
assert (
MPATH,
logging.WARNING,
- 'Failure enabling "esm":\nUnexpected error'
- " while running command.\nCommand: -\nExit code: -\nReason: -\n"
- "Stdout: Invalid ESM credentials\nStderr: -",
+ "Failure enabling `esm`: Invalid esm credentials",
) in caplog.record_tuples
assert (
MPATH,
logging.WARNING,
- 'Failure enabling "cc":\nUnexpected error'
- " while running command.\nCommand: -\nExit code: -\nReason: -\n"
- "Stdout: Invalid CC credentials\nStderr: -",
+ "Failure enabling `cc`: Invalid cc credentials",
) in caplog.record_tuples
- assert 'Failure enabling "fips"' not in caplog.text
-
- def test_configure_ua_config_with_weird_params(self, m_subp, caplog):
- """When configs not string or list, warn but still attach"""
- configure_ua(
- token="SomeToken", config=["http_proxy=http://some-proxy.net:3128"]
- )
- assert [
- mock.call(["ua", "attach", "SomeToken"])
- ] == m_subp.call_args_list
assert (
MPATH,
logging.WARNING,
- "ubuntu_advantage: config should be a dict, not a"
- " list; skipping enabling config parameters",
- ) == caplog.record_tuples[-2]
- assert (
- MPATH,
- logging.DEBUG,
- "Attaching to Ubuntu Advantage. ua attach SomeToken",
- ) == caplog.record_tuples[-1]
+ "Failure of type `system`: Cannot enable unknown service 'asdf'",
+ ) in caplog.record_tuples
+ assert 'Failure enabling "fips"' not in caplog.text
+
+ def test_ua_enable_unexpected_error_codes(self, m_subp):
+ def fake_subp(cmd, capture=None, **kwargs):
+ if cmd[:2] == ["ua", "enable"] and capture:
+ raise subp.ProcessExecutionError(exit_code=255)
+ return subp.SubpResult(json.dumps({"errors": []}), "")
+
+ m_subp.side_effect = fake_subp
- def test_configure_ua_config_error_invalid_url(self, m_subp, caplog):
- """Errors from ua config command are raised."""
- m_subp.side_effect = subp.ProcessExecutionError(
- 'Failure enabling "http_proxy"'
- )
with pytest.raises(
RuntimeError,
- match=re.escape(
- 'Failure enabling Ubuntu Advantage config(s): "http_proxy"'
- ),
+ match=re.escape("Error while enabling service(s): esm"),
):
- configure_ua(
- token="SomeToken", config={"http_proxy": "not-a-valid-url"}
- )
+ configure_ua(token="SomeToken", enable=["esm"])
- def test_configure_ua_config_error_non_string_values(self, m_subp):
- """ValueError raised for any values expected as string type."""
- cfg = {
- "global_apt_http_proxy": "noscheme",
- "http_proxy": ["no-proxy"],
- "https_proxy": 1,
- }
- match = re.escape(
- "Expected URL scheme http/https for"
- " ua:config:global_apt_http_proxy. Found: noscheme\n"
- "Expected a URL for ua:config:http_proxy. Found: ['no-proxy']\n"
- "Expected a URL for ua:config:https_proxy. Found: 1"
- )
- with pytest.raises(ValueError, match=match):
- supplemental_schema_validation(cfg)
- assert 0 == m_subp.call_count
+ def test_ua_enable_non_json_response(self, m_subp):
+ def fake_subp(cmd, capture=None, **kwargs):
+ if cmd[:2] == ["ua", "enable"] and capture:
+ return subp.SubpResult("I dream to be a Json", "")
+ return subp.SubpResult(json.dumps({"errors": []}), "")
+
+ m_subp.side_effect = fake_subp
+
+ with pytest.raises(
+ RuntimeError,
+ match=re.escape("UA response was not json: I dream to be a Json"),
+ ):
+ configure_ua(token="SomeToken", enable=["esm"])
class TestUbuntuAdvantageSchema:
@pytest.mark.parametrize(
- "config, error_msg",
+ "config, expectation",
[
- ({"ubuntu_advantage": {}}, "'token' is a required property"),
+ ({"ubuntu_advantage": {}}, does_not_raise()),
# Strict keys
- (
+ pytest.param(
{"ubuntu_advantage": {"token": "win", "invalidkey": ""}},
- re.escape(
- "ubuntu_advantage: Additional properties are not allowed"
- " ('invalidkey"
+ pytest.raises(
+ SchemaValidationError,
+ match=re.escape(
+ "ubuntu_advantage: Additional properties are not"
+ " allowed ('invalidkey"
+ ),
+ ),
+ id="additional_properties",
+ ),
+ pytest.param(
+ {
+ "ubuntu_advantage": {
+ "features": {"disable_auto_attach": True}
+ }
+ },
+ does_not_raise(),
+ id="disable_auto_attach",
+ ),
+ pytest.param(
+ {
+ "ubuntu_advantage": {
+ "features": {"disable_auto_attach": False},
+ "enable": ["fips"],
+ "enable_beta": ["realtime-kernel"],
+ "token": "<token>",
+ }
+ },
+ does_not_raise(),
+ id="pro_custom_services",
+ ),
+ pytest.param(
+ {
+ "ubuntu_advantage": {
+ "enable": ["fips"],
+ "enable_beta": ["realtime-kernel"],
+ "token": "<token>",
+ }
+ },
+ does_not_raise(),
+ id="non_pro_beta_services",
+ ),
+ pytest.param(
+ {
+ "ubuntu_advantage": {
+ "features": {"asdf": False},
+ "enable": ["fips"],
+ "enable_beta": ["realtime-kernel"],
+ "token": "<token>",
+ }
+ },
+ pytest.raises(
+ SchemaValidationError,
+ match=re.escape(
+ "ubuntu_advantage.features: Additional properties are"
+ " not allowed ('asdf'"
+ ),
+ ),
+ id="pro_additional_features",
+ ),
+ pytest.param(
+ {
+ "ubuntu_advantage": {
+ "enable": ["fips"],
+ "token": "<token>",
+ "config": {
+ "http_proxy": "http://some-proxy:8088",
+ "https_proxy": "https://some-proxy:8088",
+ "global_apt_https_proxy": "https://some-global-apt-proxy:8088/", # noqa: E501
+ "global_apt_http_proxy": "http://some-global-apt-proxy:8088/", # noqa: E501
+ "ua_apt_http_proxy": "http://10.0.10.10:3128",
+ "ua_apt_https_proxy": "https://10.0.10.10:3128",
+ },
+ }
+ },
+ does_not_raise(),
+ id="ua_config_valid_set",
+ ),
+ pytest.param(
+ {
+ "ubuntu_advantage": {
+ "enable": ["fips"],
+ "token": "<token>",
+ "config": {
+ "http_proxy": None,
+ "https_proxy": None,
+ "global_apt_https_proxy": None,
+ "global_apt_http_proxy": None,
+ "ua_apt_http_proxy": None,
+ "ua_apt_https_proxy": None,
+ },
+ }
+ },
+ does_not_raise(),
+ id="ua_config_valid_unset",
+ ),
+ pytest.param(
+ {
+ "ubuntu_advantage": {
+ "enable": ["fips"],
+ "token": "<token>",
+ "config": ["http_proxy=http://some-proxy:8088"],
+ }
+ },
+ pytest.raises(
+ SchemaValidationError,
+ match=re.escape(
+ "errors: ubuntu_advantage.config:"
+ " ['http_proxy=http://some-proxy:8088']"
+ ),
+ ),
+ id="ua_config_invalid_type",
+ ),
+ pytest.param(
+ {
+ "ubuntu_advantage": {
+ "enable": ["fips"],
+ "token": "<token>",
+ "config": {
+ "http_proxy": 8888,
+ "https_proxy": ["http://some-proxy:8088"],
+ },
+ }
+ },
+ pytest.raises(
+ SchemaValidationError,
+ match=re.escape(
+ "errors: ubuntu_advantage.config.http_proxy: 8888"
+ " is not of type 'string', 'null',"
+ " ubuntu_advantage.config.https_proxy:"
+ " ['http://some-proxy:8088']"
+ ),
),
+ id="ua_config_invalid_type",
+ ),
+ pytest.param(
+ {
+ "ubuntu_advantage": {
+ "enable": ["fips"],
+ "token": "<token>",
+ "config": {
+ "http_proxy": "http://some-proxy:8088",
+ "hola": "adios",
+ },
+ }
+ },
+ does_not_raise(),
+ id="ua_config_unknown_props_allowed",
),
],
)
@skipUnlessJsonSchema()
- def test_schema_validation(self, config, error_msg):
- if error_msg is None:
+ def test_schema_validation(self, config, expectation, caplog):
+ with expectation:
validate_cloudconfig_schema(config, get_schema(), strict=True)
+
+ @pytest.mark.parametrize(
+ "ua_section, expectation, log_msgs",
+ [
+ ({}, does_not_raise(), None),
+ ({"features": {}}, does_not_raise(), None),
+ (
+ {"features": {"disable_auto_attach": True}},
+ does_not_raise(),
+ None,
+ ),
+ (
+ {"features": {"disable_auto_attach": False}},
+ does_not_raise(),
+ None,
+ ),
+ (
+ {"features": [0, 1]},
+ pytest.raises(
+ RuntimeError,
+ match=(
+ "'ubuntu_advantage.features' should be a dict,"
+ " not a list"
+ ),
+ ),
+ ["'ubuntu_advantage.features' should be a dict, not a list\n"],
+ ),
+ (
+ {"features": {"disable_auto_attach": [0, 1]}},
+ pytest.raises(
+ RuntimeError,
+ match=(
+ "'ubuntu_advantage.features.disable_auto_attach'"
+ " should be a bool, not a list"
+ ),
+ ),
+ [
+ "'ubuntu_advantage.features.disable_auto_attach' should be"
+ " a bool, not a list\n"
+ ],
+ ),
+ ],
+ )
+ def test_validate_schema_features(
+ self, ua_section, expectation, log_msgs, caplog
+ ):
+ with expectation:
+ validate_schema_features(ua_section)
+ if log_msgs is not None:
+ for log_msg in log_msgs:
+ assert log_msg in caplog.text
else:
- with pytest.raises(SchemaValidationError, match=error_msg):
- validate_cloudconfig_schema(config, get_schema(), strict=True)
+ assert not caplog.text
class TestHandle:
@@ -270,6 +663,7 @@ class TestHandle:
"cloud",
"log_record_tuples",
"maybe_install_call_args_list",
+ "set_ua_config_call_args_list",
"configure_ua_call_args_list",
],
[
@@ -287,6 +681,7 @@ class TestHandle:
],
[],
[],
+ [],
id="no_config",
),
# If ubuntu_advantage is provided, try installing ua-tools package.
@@ -295,16 +690,33 @@ class TestHandle:
cloud,
[],
[mock.call(cloud)],
+ [mock.call(None)],
None,
id="tries_to_install_ubuntu_advantage_tools",
),
+ # If ubuntu_advantage config provided, configure it.
+ pytest.param(
+ {
+ "ubuntu_advantage": {
+ "token": "valid",
+ "config": {"http_proxy": "http://proxy.org"},
+ }
+ },
+ cloud,
+ [],
+ None,
+ [mock.call({"http_proxy": "http://proxy.org"})],
+ None,
+ id="set_ua_config",
+ ),
# All ubuntu_advantage config keys are passed to configure_ua.
pytest.param(
{"ubuntu_advantage": {"token": "token", "enable": ["esm"]}},
cloud,
[],
[mock.call(cloud)],
- [mock.call(token="token", enable=["esm"], config=None)],
+ [mock.call(None)],
+ [mock.call(token="token", enable=["esm"])],
id="passes_credentials_and_services_to_configure_ua",
),
# Warning when ubuntu-advantage key is present with new config
@@ -321,9 +733,33 @@ class TestHandle:
)
],
None,
- [mock.call(token="token", enable=["esm"], config=None)],
+ [mock.call(None)],
+ [mock.call(token="token", enable=["esm"])],
id="warns_on_deprecated_ubuntu_advantage_key_w_config",
),
+ # Warning with beta services during attach
+ pytest.param(
+ {
+ "ubuntu_advantage": {
+ "token": "token",
+ "enable": ["esm"],
+ "enable_beta": ["realtime-kernel"],
+ }
+ },
+ None,
+ [
+ (
+ MPATH,
+ logging.DEBUG,
+ "Ignoring `ubuntu-advantage.enable_beta` services in"
+ " UA attach: realtime-kernel",
+ )
+ ],
+ None,
+ [mock.call(None)],
+ [mock.call(token="token", enable=["esm"])],
+ id="warns_on_enable_beta_in_attach",
+ ),
# ubuntu_advantage should be preferred over ubuntu-advantage
pytest.param(
{
@@ -341,24 +777,33 @@ class TestHandle:
)
],
None,
- [mock.call(token="token", enable=["esm"], config=None)],
+ [mock.call(None)],
+ [mock.call(token="token", enable=["esm"])],
id="prefers_new_style_config",
),
],
)
+ @mock.patch(f"{MPATH}._should_auto_attach", return_value=False)
+ @mock.patch(f"{MPATH}._auto_attach")
@mock.patch(f"{MPATH}.configure_ua")
+ @mock.patch(f"{MPATH}.set_ua_config")
@mock.patch(f"{MPATH}.maybe_install_ua_tools")
- def test_handle(
+ def test_handle_attach(
self,
m_maybe_install_ua_tools,
+ m_set_ua_config,
m_configure_ua,
+ m_auto_attach,
+ m_should_auto_attach,
cfg,
cloud,
log_record_tuples,
maybe_install_call_args_list,
+ set_ua_config_call_args_list,
configure_ua_call_args_list,
caplog,
):
+ """Non-Pro schemas and instance."""
handle("nomatter", cfg=cfg, cloud=cloud, log=None, args=None)
for record_tuple in log_record_tuples:
assert record_tuple in caplog.record_tuples
@@ -367,8 +812,177 @@ class TestHandle:
maybe_install_call_args_list
== m_maybe_install_ua_tools.call_args_list
)
+ if set_ua_config_call_args_list is not None:
+ assert (
+ set_ua_config_call_args_list == m_set_ua_config.call_args_list
+ )
if configure_ua_call_args_list is not None:
assert configure_ua_call_args_list == m_configure_ua.call_args_list
+ assert [] == m_auto_attach.call_args_list
+
+ @pytest.mark.parametrize(
+ [
+ "cfg",
+ "cloud",
+ "log_record_tuples",
+ "auto_attach_side_effect",
+ "should_auto_attach",
+ "auto_attach_call_args_list",
+ "attach_call_args_list",
+ "expectation",
+ ],
+ [
+ # When auto_attach successes, no call to configure_ua.
+ pytest.param(
+ {
+ "ubuntu_advantage": {
+ "features": {"disable_auto_attach": False}
+ }
+ },
+ cloud,
+ [],
+ None, # auto_attach successes
+ True, # Pro instance
+ [
+ mock.call({"features": {"disable_auto_attach": False}})
+ ], # auto_attach_call_args_list
+ [], # attach_call_args_list
+ does_not_raise(),
+ id="auto_attach_success",
+ ),
+ # When auto_attach fails in a Pro instance, no call to
+ # configure_ua.
+ pytest.param(
+ {
+ "ubuntu_advantage": {
+ "features": {"disable_auto_attach": False}
+ }
+ },
+ cloud,
+ [],
+ RuntimeError("Auto attach error"),
+ True, # Pro instance
+ [
+ mock.call({"features": {"disable_auto_attach": False}})
+ ], # auto_attach_call_args_list
+ [], # attach_call_args_list
+ pytest.raises(RuntimeError, match="Auto attach error"),
+ id="auto_attach_error",
+ ),
+ # In a non-Pro instance with token, fallback to normal attach.
+ pytest.param(
+ {
+ "ubuntu_advantage": {
+ "features": {"disable_auto_attach": False},
+ "token": "token",
+ }
+ },
+ cloud,
+ [],
+ None,
+ False, # non-Pro instance
+ [], # auto_attach_call_args_list
+ [
+ mock.call(
+ {
+ "features": {"disable_auto_attach": False},
+ "token": "token",
+ },
+ )
+ ], # attach_call_args_list
+ does_not_raise(),
+ id="not_pro_with_token",
+ ),
+ # In a non-Pro instance with enable, fallback to normal attach.
+ pytest.param(
+ {"ubuntu_advantage": {"enable": ["esm"]}},
+ cloud,
+ [],
+ None,
+ False, # non-Pro instance
+ [], # auto_attach_call_args_list
+ [
+ mock.call(
+ {
+ "enable": ["esm"],
+ },
+ )
+ ], # attach_call_args_list
+ does_not_raise(),
+ id="not_pro_with_enable",
+ ),
+ ],
+ )
+ @mock.patch(f"{MPATH}._should_auto_attach")
+ @mock.patch(f"{MPATH}._auto_attach")
+ @mock.patch(f"{MPATH}._attach")
+ def test_handle_auto_attach_vs_attach(
+ self,
+ m_attach,
+ m_auto_attach,
+ m_should_auto_attach,
+ cfg,
+ cloud,
+ log_record_tuples,
+ auto_attach_side_effect,
+ should_auto_attach,
+ auto_attach_call_args_list,
+ attach_call_args_list,
+ expectation,
+ caplog,
+ ):
+ m_should_auto_attach.return_value = should_auto_attach
+ if auto_attach_side_effect is not None:
+ m_auto_attach.side_effect = auto_attach_side_effect
+
+ with expectation:
+ handle("nomatter", cfg=cfg, cloud=cloud, log=None, args=None)
+
+ for record_tuple in log_record_tuples:
+ assert record_tuple in caplog.record_tuples
+ if attach_call_args_list is not None:
+ assert attach_call_args_list == m_attach.call_args_list
+ else:
+ assert [] == m_attach.call_args_list
+ assert auto_attach_call_args_list == m_auto_attach.call_args_list
+
+ @pytest.mark.parametrize("is_pro", [False, True])
+ @pytest.mark.parametrize(
+ "cfg",
+ [
+ (
+ {
+ "ubuntu_advantage": {
+ "features": {"disable_auto_attach": False},
+ }
+ }
+ ),
+ (
+ {
+ "ubuntu_advantage": {
+ "features": {"disable_auto_attach": True},
+ }
+ }
+ ),
+ ],
+ )
+ @mock.patch(f"{MPATH}._should_auto_attach")
+ @mock.patch(f"{MPATH}._auto_attach")
+ @mock.patch(f"{MPATH}._attach")
+ def test_no_fallback_attach(
+ self,
+ m_attach,
+ m_auto_attach,
+ m_should_auto_attach,
+ cfg,
+ is_pro,
+ ):
+ """Checks that attach is not called in the case where we want only to
+ enable or disable ua auto-attach.
+ """
+ m_should_auto_attach.return_value = is_pro
+ handle("nomatter", cfg=cfg, cloud=self.cloud, log=None, args=None)
+ assert not m_attach.call_args_list
@pytest.mark.parametrize(
"cfg, handle_kwargs, match",
@@ -401,6 +1015,172 @@ class TestHandle:
handle("nomatter", cfg=cfg, log=mock.Mock(), **handle_kwargs)
assert 0 == m_configure_ua.call_count
+ @pytest.mark.parametrize(
+ "cfg, match",
+ [
+ pytest.param(
+ {"ubuntu_advantage": [0, 1]},
+ "'ubuntu_advantage' should be a dict, not a list",
+ id="on_non_dict_config",
+ ),
+ pytest.param(
+ {"ubuntu_advantage": {"features": [0, 1]}},
+ "'ubuntu_advantage.features' should be a dict, not a list",
+ id="on_non_dict_ua_section",
+ ),
+ ],
+ )
+ def test_handle_errors(self, cfg, match):
+ with pytest.raises(RuntimeError, match=match):
+ handle(
+ "nomatter",
+ cfg=cfg,
+ log=mock.Mock(),
+ cloud=self.cloud,
+ args=None,
+ )
+
+ @mock.patch(f"{MPATH}.subp.subp")
+ def test_ua_config_error_invalid_url(self, m_subp, caplog):
+ """Errors from ua config command are raised."""
+ cfg = {
+ "ubuntu_advantage": {
+ "token": "SomeToken",
+ "config": {"http_proxy": "not-a-valid-url"},
+ }
+ }
+ m_subp.side_effect = subp.ProcessExecutionError(
+ 'Failure enabling "http_proxy"'
+ )
+ with pytest.raises(
+ ValueError,
+ match=re.escape(
+ "Invalid ubuntu_advantage configuration:\nExpected URL scheme"
+ " http/https for ua:config:http_proxy"
+ ),
+ ):
+ handle(
+ "nomatter",
+ cfg=cfg,
+ log=mock.Mock(),
+ cloud=self.cloud,
+ args=None,
+ )
+ assert not caplog.text
+
+ @mock.patch(f"{MPATH}._should_auto_attach", return_value=False)
+ @mock.patch(f"{MPATH}.subp.subp")
+ def test_fallback_to_attach_no_token(
+ self, m_subp, m_should_auto_attach, caplog
+ ):
+ cfg = {"ubuntu_advantage": {"enable": ["esm"]}}
+ with pytest.raises(
+ RuntimeError,
+ match=re.escape(
+ "`ubuntu-advantage.token` required in non-Pro Ubuntu"
+ " instances."
+ ),
+ ):
+ handle(
+ "nomatter",
+ cfg=cfg,
+ log=mock.Mock(),
+ cloud=self.cloud,
+ args=None,
+ )
+ assert [] == m_subp.call_args_list
+ assert (
+ "`ubuntu-advantage.token` required in non-Pro Ubuntu"
+ " instances.\n"
+ ) in caplog.text
+
+
+class TestShouldAutoAttach:
+ def test_should_auto_attach_error(self, caplog, fake_uaclient):
+ m_should_auto_attach = mock.Mock()
+ m_should_auto_attach.should_auto_attach.side_effect = (
+ FakeUserFacingError("Some error") # noqa: E501
+ )
+ sys.modules[
+ "uaclient.api.u.pro.attach.auto.should_auto_attach.v1"
+ ] = m_should_auto_attach
+ assert not _should_auto_attach({})
+ assert "Error during `should_auto_attach`: Some error" in caplog.text
+ assert (
+ "Unable to determine if this is an Ubuntu Pro instance."
+ " Fallback to normal UA attach." in caplog.text
+ )
+
+ @pytest.mark.parametrize(
+ "ua_section, expected_result",
+ [
+ ({}, None),
+ ({"features": {"disable_auto_attach": False}}, None),
+ # The user explicitly disables auto-attach, therefore we do not do
+ # it:
+ ({"features": {"disable_auto_attach": True}}, False),
+ ],
+ )
+ def test_happy_path(
+ self, ua_section, expected_result, caplog, fake_uaclient
+ ):
+ m_should_auto_attach = mock.Mock()
+ sys.modules[
+ "uaclient.api.u.pro.attach.auto.should_auto_attach.v1"
+ ] = m_should_auto_attach
+ should_auto_attach_value = object()
+ m_should_auto_attach.should_auto_attach.return_value.should_auto_attach = ( # noqa: E501
+ should_auto_attach_value
+ )
+ if expected_result is None: # UA API does respond
+ assert should_auto_attach_value == _should_auto_attach(ua_section)
+ else: # cloud-init does respond
+ assert expected_result == _should_auto_attach(ua_section)
+ assert not caplog.text
+
+
+class TestAutoAttach:
+
+ ua_section: dict = {}
+
+ def test_full_auto_attach_error(self, caplog, mocker, fake_uaclient):
+ mocker.patch.dict("sys.modules")
+ sys.modules["uaclient.config"] = mock.Mock()
+ m_full_auto_attach = mock.Mock()
+ m_full_auto_attach.full_auto_attach.side_effect = FakeUserFacingError(
+ "Some error"
+ )
+ sys.modules[
+ "uaclient.api.u.pro.attach.auto.full_auto_attach.v1"
+ ] = m_full_auto_attach
+ expected_msg = "Error during `full_auto_attach`: Some error"
+ with pytest.raises(RuntimeError, match=re.escape(expected_msg)):
+ _auto_attach(self.ua_section)
+ assert expected_msg in caplog.text
+
+ def test_happy_path(self, caplog, mocker, fake_uaclient):
+ mocker.patch.dict("sys.modules")
+ sys.modules["uaclient.config"] = mock.Mock()
+ sys.modules[
+ "uaclient.api.u.pro.attach.auto.full_auto_attach.v1"
+ ] = mock.Mock()
+ _auto_attach(self.ua_section)
+ assert not caplog.text
+
+
+class TestAttach:
+ @mock.patch(f"{MPATH}.configure_ua")
+ def test_attach_without_token_raises_error(self, m_configure_ua):
+ with pytest.raises(
+ RuntimeError,
+ match=(
+ "`ubuntu-advantage.token` required in non-Pro Ubuntu"
+ " instances."
+ ),
+ ):
+ _attach({"enable": ["esm"]})
+ assert [] == m_configure_ua.call_args_list
+
@mock.patch(f"{MPATH}.subp.which")
class TestMaybeInstallUATools:
@@ -479,4 +1259,167 @@ class TestMaybeInstallUATools:
] == cloud.distro.install_packages.call_args_list
+@mock.patch(f"{MPATH}.subp.subp")
+class TestSetUAConfig:
+ def test_valid_config(self, m_subp, caplog):
+ ua_config = {
+ "http_proxy": "http://some-proxy:8088",
+ "https_proxy": "https://user:pass@some-proxy:8088",
+ "global_apt_https_proxy": "https://some-global-apt-proxy:8088/",
+ "global_apt_http_proxy": "http://some-global-apt-proxy:8088/",
+ "ua_apt_http_proxy": "http://10.0.10.10:3128",
+ "ua_apt_https_proxy": "https://10.0.10.10:3128",
+ }
+ set_ua_config(ua_config)
+ for ua_arg, redacted_arg in [
+ (
+ "http_proxy=http://some-proxy:8088",
+ "http_proxy=REDACTED",
+ ),
+ (
+ "https_proxy=https://user:pass@some-proxy:8088",
+ "https_proxy=REDACTED",
+ ),
+ (
+ "global_apt_https_proxy=https://some-global-apt-proxy:8088/",
+ "global_apt_https_proxy=REDACTED",
+ ),
+ (
+ "global_apt_http_proxy=http://some-global-apt-proxy:8088/",
+ "global_apt_http_proxy=REDACTED",
+ ),
+ (
+ "ua_apt_http_proxy=http://10.0.10.10:3128",
+ "ua_apt_http_proxy=REDACTED",
+ ),
+ (
+ "ua_apt_https_proxy=https://10.0.10.10:3128",
+ "ua_apt_https_proxy=REDACTED",
+ ),
+ ]:
+ assert (
+ mock.call(
+ ["ua", "config", "set", ua_arg],
+ logstring=["ua", "config", "set", redacted_arg],
+ )
+ in m_subp.call_args_list
+ )
+ assert f"Enabling UA config {redacted_arg}\n" in caplog.text
+ assert ua_arg not in caplog.text
+
+ assert 6 == m_subp.call_count
+
+ def test_ua_config_unset(self, m_subp, caplog):
+ ua_config = {
+ "https_proxy": "https://user:pass@some-proxy:8088",
+ "http_proxy": None,
+ }
+ set_ua_config(ua_config)
+ for call in [
+ mock.call(["ua", "config", "unset", "http_proxy"]),
+ mock.call(
+ [
+ "ua",
+ "config",
+ "set",
+ "https_proxy=https://user:pass@some-proxy:8088",
+ ],
+ logstring=["ua", "config", "set", "https_proxy=REDACTED"],
+ ),
+ ]:
+ assert call in m_subp.call_args_list
+ assert 2 == m_subp.call_count
+ assert "Enabling UA config https_proxy=REDACTED\n" in caplog.text
+ assert "https://user:pass@some-proxy:8088" not in caplog.text
+ assert "Disabling UA config for http_proxy\n" in caplog.text
+
+ def test_ua_config_error_non_string_values(self, m_subp, caplog):
+ """ValueError raised for any values expected as string type."""
+ ua_config = {
+ "global_apt_http_proxy": "noscheme",
+ "http_proxy": ["no-proxy"],
+ "https_proxy": 3.14,
+ }
+ match = re.escape(
+ "Invalid ubuntu_advantage configuration:\n"
+ "Expected URL scheme http/https for"
+ " ua:config:global_apt_http_proxy\n"
+ "Expected a URL for ua:config:http_proxy\n"
+ "Expected a URL for ua:config:https_proxy"
+ )
+ with pytest.raises(ValueError, match=match):
+ set_ua_config(ua_config)
+ assert 0 == m_subp.call_count
+ assert not caplog.text
+
+ def test_ua_config_unknown_prop(self, m_subp, caplog):
+ """On unknown config props, a log is issued and the prop is set."""
+ ua_config = {"asdf": "qwer"}
+ set_ua_config(ua_config)
+ assert [
+ mock.call(
+ ["ua", "config", "set", "asdf=qwer"],
+ logstring=["ua", "config", "set", "asdf=REDACTED"],
+ )
+ ] == m_subp.call_args_list
+ assert "qwer" not in caplog.text
+ assert (
+ "Not validating unknown ubuntu_advantage.config.asdf property\n"
+ in caplog.text
+ )
+
+ def test_ua_config_wrong_type(self, m_subp, caplog):
+ ua_config = ["asdf", "qwer"]
+ with pytest.raises(
+ RuntimeError,
+ match=(
+ "ubuntu_advantage: config should be a dict, not"
+ " a list; skipping enabling config parameters"
+ ),
+ ):
+ set_ua_config(ua_config)
+ assert 0 == m_subp.call_count
+ assert not caplog.text
+
+ def test_set_ua_config_error(self, m_subp, caplog):
+ ua_config = {
+ "https_proxy": "https://user:pass@some-proxy:8088",
+ }
+ # Simulate UA error
+ m_subp.side_effect = subp.ProcessExecutionError(
+ "Invalid proxy: https://user:pass@some-proxy:8088"
+ )
+ with pytest.raises(
+ RuntimeError,
+ match=re.escape(
+ "Failure enabling/disabling Ubuntu Advantage config(s):"
+ ' "https_proxy"'
+ ),
+ ):
+ set_ua_config(ua_config)
+ assert 1 == m_subp.call_count
+ assert "https://user:pass@some-proxy:8088" not in caplog.text
+ assert "Enabling UA config https_proxy=REDACTED\n" in caplog.text
+ assert 'Failure enabling/disabling "https_proxy":\n' in caplog.text
+
+ def test_unset_ua_config_error(self, m_subp, caplog):
+ ua_config = {"https_proxy": None}
+ # Simulate UA error
+ m_subp.side_effect = subp.ProcessExecutionError(
+ "Error unsetting https_proxy"
+ )
+ with pytest.raises(
+ RuntimeError,
+ match=re.escape(
+ "Failure enabling/disabling Ubuntu Advantage config(s): "
+ '"https_proxy"'
+ ),
+ ):
+ set_ua_config(ua_config)
+ assert 1 == m_subp.call_count
+ assert "https://user:pass@some-proxy:8088" not in caplog.text
+ assert "Disabling UA config for https_proxy\n" in caplog.text
+ assert 'Failure enabling/disabling "https_proxy":\n' in caplog.text
+
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/config/test_cc_ubuntu_drivers.py b/tests/unittests/config/test_cc_ubuntu_drivers.py
index 9d54467e..6fbc47bd 100644
--- a/tests/unittests/config/test_cc_ubuntu_drivers.py
+++ b/tests/unittests/config/test_cc_ubuntu_drivers.py
@@ -218,8 +218,11 @@ class TestUbuntuDrivers:
debconf_file = tmpdir.join("nvidia.template")
m_tmp.return_value = tdir
pkg_install = mock.MagicMock()
+ distro = mock.Mock()
drivers.install_drivers(
- cfg_accepted["drivers"], pkg_install_func=pkg_install
+ cfg_accepted["drivers"],
+ pkg_install_func=pkg_install,
+ distro=distro,
)
assert 0 == pkg_install.call_count
assert [mock.call("ubuntu-drivers")] == m_which.call_args_list
@@ -233,8 +236,11 @@ class TestUbuntuDrivers:
):
"""install_drivers should raise TypeError if not given a config dict"""
pkg_install = mock.MagicMock()
+ distro = mock.Mock()
with pytest.raises(TypeError, match=".*expected dict.*"):
- drivers.install_drivers("mystring", pkg_install_func=pkg_install)
+ drivers.install_drivers(
+ "mystring", pkg_install_func=pkg_install, distro=distro
+ )
assert 0 == pkg_install.call_count
@mock.patch(M_TMP_PATH)
diff --git a/tests/unittests/config/test_cc_users_groups.py b/tests/unittests/config/test_cc_users_groups.py
index 12cdaa19..00eca93b 100644
--- a/tests/unittests/config/test_cc_users_groups.py
+++ b/tests/unittests/config/test_cc_users_groups.py
@@ -98,9 +98,13 @@ class TestHandleUsersGroups(CiTestCase):
}
}
metadata = {}
- cloud = self.tmp_cloud(
- distro="freebsd", sys_cfg=sys_cfg, metadata=metadata
- )
+ # patch ifconfig -a
+ with mock.patch(
+ "cloudinit.distros.networking.subp.subp", return_value=("", None)
+ ):
+ cloud = self.tmp_cloud(
+ distro="freebsd", sys_cfg=sys_cfg, metadata=metadata
+ )
cc_users_groups.handle("modulename", cfg, cloud, None, None)
self.assertCountEqual(
m_fbsd_user.call_args_list,
diff --git a/tests/unittests/config/test_cc_wireguard.py b/tests/unittests/config/test_cc_wireguard.py
index 59a5223b..6c91625b 100644
--- a/tests/unittests/config/test_cc_wireguard.py
+++ b/tests/unittests/config/test_cc_wireguard.py
@@ -16,7 +16,7 @@ MPATH = "cloudinit.config.cc_wireguard"
MIN_KERNEL_VERSION = (5, 6)
-class FakeCloud(object):
+class FakeCloud:
def __init__(self, distro):
self.distro = distro
diff --git a/tests/unittests/config/test_schema.py b/tests/unittests/config/test_schema.py
index a401ffd4..50128f2c 100644
--- a/tests/unittests/config/test_schema.py
+++ b/tests/unittests/config/test_schema.py
@@ -165,6 +165,7 @@ class TestGetSchema:
assert ["$defs", "$schema", "allOf"] == sorted(list(schema.keys()))
# New style schema should be defined in static schema file in $defs
expected_subschema_defs = [
+ {"$ref": "#/$defs/base_config"},
{"$ref": "#/$defs/cc_ansible"},
{"$ref": "#/$defs/cc_apk_configure"},
{"$ref": "#/$defs/cc_apt_configure"},
diff --git a/tests/unittests/distros/test_generic.py b/tests/unittests/distros/test__init__.py
index fedc7300..7c5187fd 100644
--- a/tests/unittests/distros/test_generic.py
+++ b/tests/unittests/distros/test__init__.py
@@ -10,6 +10,8 @@ import pytest
from cloudinit import distros, util
from tests.unittests import helpers
+M_PATH = "cloudinit.distros."
+
unknown_arch_info = {
"arches": ["default"],
"failsafe": {
@@ -208,7 +210,11 @@ class TestGenericDistro(helpers.FilesystemMockingTestCase):
def test_expire_passwd_freebsd_uses_pw_command(self):
"""Test FreeBSD.expire_passwd uses the pw command."""
cls = distros.fetch("freebsd")
- d = cls("freebsd", {}, None)
+ # patch ifconfig -a
+ with mock.patch(
+ "cloudinit.distros.networking.subp.subp", return_value=("", None)
+ ):
+ d = cls("freebsd", {}, None)
with mock.patch("cloudinit.subp.subp") as m_subp:
d.expire_passwd("myuser")
m_subp.assert_called_once_with(
@@ -382,4 +388,23 @@ class TestGetPackageMirrors:
}
+@pytest.mark.usefixtures("fake_filesystem")
+class TestDistro:
+ @pytest.mark.parametrize("is_noexec", [False, True])
+ @mock.patch(M_PATH + "util.has_mount_opt")
+ @mock.patch(M_PATH + "temp_utils.get_tmp_ancestor", return_value="/tmp")
+ def test_get_tmp_exec_path(
+ self, m_get_tmp_ancestor, m_has_mount_opt, is_noexec, mocker
+ ):
+ m_has_mount_opt.return_value = not is_noexec
+ cls = distros.fetch("ubuntu")
+ distro = cls("ubuntu", {}, None)
+ mocker.patch.object(distro, "usr_lib_exec", "/usr_lib_exec")
+ tmp_path = distro.get_tmp_exec_path()
+ if is_noexec:
+ assert "/tmp" == tmp_path
+ else:
+ assert "/usr_lib_exec/cloud-init/clouddir" == tmp_path
+
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/distros/test_arch.py b/tests/unittests/distros/test_arch.py
index 5446295e..fc54ceee 100644
--- a/tests/unittests/distros/test_arch.py
+++ b/tests/unittests/distros/test_arch.py
@@ -2,10 +2,9 @@
from cloudinit import util
from cloudinit.distros.arch import _render_network
+from tests.unittests.distros import _get_distro
from tests.unittests.helpers import CiTestCase, dir2dict
-from . import _get_distro
-
class TestArch(CiTestCase):
def test_get_distro(self):
diff --git a/tests/unittests/distros/test_gentoo.py b/tests/unittests/distros/test_gentoo.py
index dadf5df5..34be3c56 100644
--- a/tests/unittests/distros/test_gentoo.py
+++ b/tests/unittests/distros/test_gentoo.py
@@ -1,10 +1,9 @@
# This file is part of cloud-init. See LICENSE file for license information.
from cloudinit import atomic_helper, util
+from tests.unittests.distros import _get_distro
from tests.unittests.helpers import CiTestCase
-from . import _get_distro
-
class TestGentoo(CiTestCase):
def test_write_hostname(self):
diff --git a/tests/unittests/distros/test_ifconfig.py b/tests/unittests/distros/test_ifconfig.py
new file mode 100644
index 00000000..ce595746
--- /dev/null
+++ b/tests/unittests/distros/test_ifconfig.py
@@ -0,0 +1,72 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+from cloudinit.distros.parsers.ifconfig import Ifconfig
+from tests.unittests.helpers import TestCase, readResource
+
+
+class TestIfconfigParserFreeBSD(TestCase):
+ def setUp(self):
+ super(TestIfconfigParserFreeBSD, self).setUp()
+ self.ifs_txt = readResource("netinfo/freebsd-ifconfig-output")
+
+ def test_parse_freebsd(self):
+ """assert parsing works without any exceptions"""
+ Ifconfig().parse(self.ifs_txt)
+
+ def test_is_bridge(self):
+ """assert bridge0 is_bridge"""
+ ifs = Ifconfig().parse(self.ifs_txt)
+ assert ifs["bridge0"].is_bridge
+
+ def test_index(self):
+ """assert vtnet0 index is 1"""
+ ifs = Ifconfig().parse(self.ifs_txt)
+ assert ifs["vtnet0"].index == 1
+
+ def test_is_vlan(self):
+ """assert re0.33 is_vlan"""
+ ifs = Ifconfig().parse(self.ifs_txt)
+ assert ifs["re0.33"].is_vlan
+
+ def test_description(self):
+ """assert vnet0:11 is associated with jail: webirc"""
+ ifs = Ifconfig().parse(self.ifs_txt)
+ assert ifs["vnet0:11"].description == "'associated with jail: webirc'"
+
+ def test_vtnet_options(self):
+ """assert vtnet has TXCSUM"""
+ ifs = Ifconfig().parse(self.ifs_txt)
+ assert "txcsum" in ifs["vtnet0"].options
+
+
+class TestIfconfigParserOpenBSD(TestCase):
+ def setUp(self):
+ super(TestIfconfigParserOpenBSD, self).setUp()
+ self.ifs_txt = readResource("netinfo/openbsd-ifconfig-output")
+
+ def test_parse_openbsd(self):
+ """assert parsing works without any exceptions"""
+ Ifconfig().parse(self.ifs_txt)
+
+ def test_is_not_physical(self):
+ """assert enc0 is not physical"""
+ ifs = Ifconfig().parse(self.ifs_txt)
+ assert not ifs["enc0"].is_physical
+
+ def test_is_physical(self):
+ """assert enc0 is not physical"""
+ ifs = Ifconfig().parse(self.ifs_txt)
+ assert ifs["vio0"].is_physical
+
+ def test_index(self):
+ """assert vio0 index is 1"""
+ ifs = Ifconfig().parse(self.ifs_txt)
+ assert ifs["vio0"].index == 1
+
+ def test_gif_ipv6(self):
+ """assert that we can parse a gif inet6 address, despite the -->"""
+ ifs = Ifconfig().parse(self.ifs_txt)
+ assert ifs["gif0"].inet6["fe80::be30:5bff:fed0:471"] == {
+ "prefixlen": "64",
+ "scope": "link-local",
+ }
diff --git a/tests/unittests/distros/test_manage_service.py b/tests/unittests/distros/test_manage_service.py
index 9e64b35c..98823770 100644
--- a/tests/unittests/distros/test_manage_service.py
+++ b/tests/unittests/distros/test_manage_service.py
@@ -1,5 +1,6 @@
# This file is part of cloud-init. See LICENSE file for license information.
+from tests.unittests.distros import _get_distro
from tests.unittests.helpers import CiTestCase, mock
from tests.unittests.util import MockDistro
@@ -28,6 +29,39 @@ class TestManageService(CiTestCase):
self.dist.manage_service("start", "myssh")
m_subp.assert_called_with(["service", "myssh", "start"], capture=True)
+ @mock.patch.object(MockDistro, "uses_systemd", return_value=False)
+ @mock.patch("cloudinit.distros.subp.subp")
+ def test_manage_service_rcservice_initcmd(self, m_subp, m_sysd):
+ dist = _get_distro("alpine")
+ dist.init_cmd = ["rc-service", "--nocolor"]
+ dist.manage_service("start", "myssh")
+ m_subp.assert_called_with(
+ ["rc-service", "--nocolor", "myssh", "start"], capture=True
+ )
+
+ @mock.patch("cloudinit.distros.subp.subp")
+ def test_manage_service_alpine_rcupdate_cmd(self, m_subp):
+ dist = _get_distro("alpine")
+ dist.update_cmd = ["rc-update", "--nocolor"]
+ dist.manage_service("enable", "myssh")
+ m_subp.assert_called_with(
+ ["rc-update", "--nocolor", "add", "myssh"], capture=True
+ )
+
+ @mock.patch("cloudinit.distros.subp.subp")
+ def test_manage_service_rcctl_initcmd(self, m_subp):
+ dist = _get_distro("openbsd")
+ dist.init_cmd = ["rcctl"]
+ dist.manage_service("start", "myssh")
+ m_subp.assert_called_with(["rcctl", "start", "myssh"], capture=True)
+
+ @mock.patch("cloudinit.distros.subp.subp")
+ def test_manage_service_fbsd_service_initcmd(self, m_subp):
+ dist = _get_distro("freebsd")
+ dist.init_cmd = ["service"]
+ dist.manage_service("enable", "myssh")
+ m_subp.assert_called_with(["service", "myssh", "enable"], capture=True)
+
@mock.patch.object(MockDistro, "uses_systemd", return_value=True)
@mock.patch("cloudinit.distros.subp.subp")
def test_manage_service_systemctl(self, m_subp, m_sysd):
@@ -37,5 +71,11 @@ class TestManageService(CiTestCase):
["systemctl", "start", "myssh"], capture=True
)
-
-# vi: ts=4 sw=4 expandtab
+ @mock.patch.object(MockDistro, "uses_systemd", return_value=True)
+ @mock.patch("cloudinit.distros.subp.subp")
+ def test_manage_service_disable_systemctl(self, m_subp, m_sysd):
+ self.dist.init_cmd = ["ignore"]
+ self.dist.manage_service("disable", "myssh")
+ m_subp.assert_called_with(
+ ["systemctl", "disable", "myssh"], capture=True
+ )
diff --git a/tests/unittests/distros/test_mariner.py b/tests/unittests/distros/test_mariner.py
new file mode 100644
index 00000000..57f8d498
--- /dev/null
+++ b/tests/unittests/distros/test_mariner.py
@@ -0,0 +1,25 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+from tests.unittests.helpers import CiTestCase
+
+from . import _get_distro
+
+SYSTEM_INFO = {
+ "paths": {
+ "cloud_dir": "/var/lib/cloud/",
+ "templates_dir": "/etc/cloud/templates/",
+ },
+ "network": {"renderers": "networkd"},
+}
+
+
+class TestMariner(CiTestCase):
+ with_logs = True
+ distro = _get_distro("mariner", SYSTEM_INFO)
+ expected_log_line = "Rely on MarinerOS default network config"
+
+ def test_network_renderer(self):
+ self.assertEqual(self.distro._cfg["network"]["renderers"], "networkd")
+
+ def test_get_distro(self):
+ self.assertEqual(self.distro.osfamily, "mariner")
diff --git a/tests/unittests/distros/test_netbsd.py b/tests/unittests/distros/test_netbsd.py
index 0bc6dfbd..4ff0da0b 100644
--- a/tests/unittests/distros/test_netbsd.py
+++ b/tests/unittests/distros/test_netbsd.py
@@ -12,7 +12,11 @@ def test_init(m_os, with_pkgin):
m_os.path.exists.return_value = with_pkgin
cfg = {}
- distro = cloudinit.distros.netbsd.NetBSD("netbsd", cfg, None)
+ # patch ifconfig -a
+ with mock.patch(
+ "cloudinit.distros.networking.subp.subp", return_value=("", None)
+ ):
+ distro = cloudinit.distros.netbsd.NetBSD("netbsd", cfg, None)
expectation = ["pkgin", "-y", "full-upgrade"] if with_pkgin else None
assert distro.pkg_cmd_upgrade_prefix == expectation
assert [mock.call("/usr/pkg/bin/pkgin")] == m_os.path.exists.call_args_list
diff --git a/tests/unittests/distros/test_netconfig.py b/tests/unittests/distros/test_netconfig.py
index 6509f1de..f17a5d21 100644
--- a/tests/unittests/distros/test_netconfig.py
+++ b/tests/unittests/distros/test_netconfig.py
@@ -10,7 +10,11 @@ from unittest import mock
from cloudinit import distros, helpers, safeyaml, settings, subp, util
from cloudinit.distros.parsers.sys_conf import SysConf
from cloudinit.net.activators import IfUpDownActivator
-from tests.unittests.helpers import FilesystemMockingTestCase, dir2dict
+from tests.unittests.helpers import (
+ FilesystemMockingTestCase,
+ dir2dict,
+ readResource,
+)
BASE_NET_CFG = """
auto lo
@@ -267,7 +271,7 @@ network:
"""
-class WriteBuffer(object):
+class WriteBuffer:
def __init__(self):
self.buffer = StringIO()
self.mode = None
@@ -314,7 +318,12 @@ class TestNetCfgDistroBase(FilesystemMockingTestCase):
class TestNetCfgDistroFreeBSD(TestNetCfgDistroBase):
def setUp(self):
super(TestNetCfgDistroFreeBSD, self).setUp()
- self.distro = self._get_distro("freebsd", renderers=["freebsd"])
+ ifs_txt = readResource("netinfo/freebsd-ifconfig-output")
+ with mock.patch(
+ "cloudinit.distros.networking.subp.subp",
+ return_value=(ifs_txt, None),
+ ):
+ self.distro = self._get_distro("freebsd", renderers=["freebsd"])
def _apply_and_verify_freebsd(
self, apply_fn, config, expected_cfgs=None, bringup=False
@@ -1110,6 +1119,131 @@ class TestNetCfgDistroPhoton(TestNetCfgDistroBase):
)
+class TestNetCfgDistroMariner(TestNetCfgDistroBase):
+ def setUp(self):
+ super(TestNetCfgDistroMariner, self).setUp()
+ self.distro = self._get_distro("mariner", renderers=["networkd"])
+
+ def create_conf_dict(self, contents):
+ content_dict = {}
+ for line in contents:
+ if line:
+ line = line.strip()
+ if line and re.search(r"^\[(.+)\]$", line):
+ content_dict[line] = []
+ key = line
+ elif line:
+ assert key
+ content_dict[key].append(line)
+
+ return content_dict
+
+ def compare_dicts(self, actual, expected):
+ for k, v in actual.items():
+ self.assertEqual(sorted(expected[k]), sorted(v))
+
+ def _apply_and_verify(
+ self, apply_fn, config, expected_cfgs=None, bringup=False
+ ):
+ if not expected_cfgs:
+ raise ValueError("expected_cfg must not be None")
+
+ tmpd = None
+ with mock.patch("cloudinit.net.networkd.available") as m_avail:
+ m_avail.return_value = True
+ with self.reRooted(tmpd) as tmpd:
+ apply_fn(config, bringup)
+
+ results = dir2dict(tmpd)
+ for cfgpath, expected in expected_cfgs.items():
+ actual = self.create_conf_dict(results[cfgpath].splitlines())
+ self.compare_dicts(actual, expected)
+ self.assertEqual(0o644, get_mode(cfgpath, tmpd))
+
+ def nwk_file_path(self, ifname):
+ return "/etc/systemd/network/10-cloud-init-%s.network" % ifname
+
+ def net_cfg_1(self, ifname):
+ ret = (
+ """\
+ [Match]
+ Name=%s
+ [Network]
+ DHCP=no
+ [Address]
+ Address=192.168.1.5/24
+ [Route]
+ Gateway=192.168.1.254"""
+ % ifname
+ )
+ return ret
+
+ def net_cfg_2(self, ifname):
+ ret = (
+ """\
+ [Match]
+ Name=%s
+ [Network]
+ DHCP=ipv4"""
+ % ifname
+ )
+ return ret
+
+ def test_mariner_network_config_v1(self):
+ tmp = self.net_cfg_1("eth0").splitlines()
+ expected_eth0 = self.create_conf_dict(tmp)
+
+ tmp = self.net_cfg_2("eth1").splitlines()
+ expected_eth1 = self.create_conf_dict(tmp)
+
+ expected_cfgs = {
+ self.nwk_file_path("eth0"): expected_eth0,
+ self.nwk_file_path("eth1"): expected_eth1,
+ }
+
+ self._apply_and_verify(
+ self.distro.apply_network_config, V1_NET_CFG, expected_cfgs.copy()
+ )
+
+ def test_mariner_network_config_v2(self):
+ tmp = self.net_cfg_1("eth7").splitlines()
+ expected_eth7 = self.create_conf_dict(tmp)
+
+ tmp = self.net_cfg_2("eth9").splitlines()
+ expected_eth9 = self.create_conf_dict(tmp)
+
+ expected_cfgs = {
+ self.nwk_file_path("eth7"): expected_eth7,
+ self.nwk_file_path("eth9"): expected_eth9,
+ }
+
+ self._apply_and_verify(
+ self.distro.apply_network_config, V2_NET_CFG, expected_cfgs.copy()
+ )
+
+ def test_mariner_network_config_v1_with_duplicates(self):
+ expected = """\
+ [Match]
+ Name=eth0
+ [Network]
+ DHCP=no
+ DNS=1.2.3.4
+ Domains=test.com
+ [Address]
+ Address=192.168.0.102/24"""
+
+ net_cfg = safeyaml.load(V1_NET_CFG_WITH_DUPS)
+
+ expected = self.create_conf_dict(expected.splitlines())
+ expected_cfgs = {
+ self.nwk_file_path("eth0"): expected,
+ }
+
+ self._apply_and_verify(
+ self.distro.apply_network_config, net_cfg, expected_cfgs.copy()
+ )
+
+
def get_mode(path, target=None):
return os.stat(subp.target_path(target, path)).st_mode & 0o777
diff --git a/tests/unittests/distros/test_networking.py b/tests/unittests/distros/test_networking.py
index 6f7465c9..9a8cf507 100644
--- a/tests/unittests/distros/test_networking.py
+++ b/tests/unittests/distros/test_networking.py
@@ -13,7 +13,7 @@ from cloudinit.distros.networking import (
LinuxNetworking,
Networking,
)
-from tests.unittests.helpers import does_not_raise
+from tests.unittests.helpers import does_not_raise, readResource
@pytest.fixture
@@ -47,6 +47,17 @@ def generic_networking_cls():
@pytest.fixture
+def bsd_networking_cls(asset="netinfo/freebsd-ifconfig-output"):
+ """Returns a patched BSDNetworking class which already comes pre-loaded
+ with output for ``ifconfig -a``"""
+ ifs_txt = readResource(asset)
+ with mock.patch(
+ "cloudinit.distros.networking.subp.subp", return_value=(ifs_txt, None)
+ ):
+ yield BSDNetworking
+
+
+@pytest.fixture
def sys_class_net(tmpdir):
sys_class_net_path = tmpdir.join("sys/class/net")
sys_class_net_path.ensure_dir()
@@ -58,9 +69,33 @@ def sys_class_net(tmpdir):
class TestBSDNetworkingIsPhysical:
- def test_raises_notimplementederror(self):
- with pytest.raises(NotImplementedError):
- BSDNetworking().is_physical("eth0")
+ def test_is_physical(self, bsd_networking_cls):
+ networking = bsd_networking_cls()
+ assert networking.is_physical("vtnet0")
+
+ def test_is_not_physical(self, bsd_networking_cls):
+ networking = bsd_networking_cls()
+ assert not networking.is_physical("re0.33")
+
+
+class TestBSDNetworkingIsVLAN:
+ def test_is_vlan(self, bsd_networking_cls):
+ networking = bsd_networking_cls()
+ assert networking.is_vlan("re0.33")
+
+ def test_is_not_physical(self, bsd_networking_cls):
+ networking = bsd_networking_cls()
+ assert not networking.is_vlan("vtnet0")
+
+
+class TestBSDNetworkingIsBridge:
+ def test_is_vlan(self, bsd_networking_cls):
+ networking = bsd_networking_cls()
+ assert networking.is_bridge("bridge0")
+
+ def test_is_not_physical(self, bsd_networking_cls):
+ networking = bsd_networking_cls()
+ assert not networking.is_bridge("vtnet0")
class TestLinuxNetworkingIsPhysical:
@@ -83,10 +118,20 @@ class TestLinuxNetworkingIsPhysical:
assert LinuxNetworking().is_physical(devname)
+@mock.patch("cloudinit.distros.networking.BSDNetworking.is_up")
class TestBSDNetworkingTrySetLinkUp:
- def test_raises_notimplementederror(self):
- with pytest.raises(NotImplementedError):
- BSDNetworking().try_set_link_up("eth0")
+ def test_calls_subp_return_true(self, m_is_up, bsd_networking_cls):
+ devname = "vtnet0"
+ networking = bsd_networking_cls()
+ m_is_up.return_value = True
+
+ with mock.patch("cloudinit.subp.subp") as m_subp:
+ is_success = networking.try_set_link_up(devname)
+ assert (
+ mock.call(["ifconfig", devname, "up"])
+ == m_subp.call_args_list[-1]
+ )
+ assert is_success
@mock.patch("cloudinit.net.is_up")
@@ -116,9 +161,9 @@ class TestLinuxNetworkingTrySetLinkUp:
class TestBSDNetworkingSettle:
- def test_settle_doesnt_error(self):
- # This also implicitly tests that it doesn't use subp.subp
- BSDNetworking().settle()
+ def test_settle_doesnt_error(self, bsd_networking_cls):
+ networking = bsd_networking_cls()
+ networking.settle()
@pytest.mark.usefixtures("sys_class_net")
diff --git a/tests/unittests/distros/test_opensuse.py b/tests/unittests/distros/test_opensuse.py
index 4a4b266f..6b8eea65 100644
--- a/tests/unittests/distros/test_opensuse.py
+++ b/tests/unittests/distros/test_opensuse.py
@@ -1,9 +1,8 @@
# This file is part of cloud-init. See LICENSE file for license information.
+from tests.unittests.distros import _get_distro
from tests.unittests.helpers import CiTestCase
-from . import _get_distro
-
class TestopenSUSE(CiTestCase):
def test_get_distro(self):
diff --git a/tests/unittests/distros/test_photon.py b/tests/unittests/distros/test_photon.py
index fed30c2b..b192be7a 100644
--- a/tests/unittests/distros/test_photon.py
+++ b/tests/unittests/distros/test_photon.py
@@ -1,10 +1,9 @@
# This file is part of cloud-init. See LICENSE file for license information.
from cloudinit import util
+from tests.unittests.distros import _get_distro
from tests.unittests.helpers import CiTestCase, mock
-from . import _get_distro
-
SYSTEM_INFO = {
"paths": {
"cloud_dir": "/var/lib/cloud/",
diff --git a/tests/unittests/distros/test_sles.py b/tests/unittests/distros/test_sles.py
index 66b8b13d..7732c380 100644
--- a/tests/unittests/distros/test_sles.py
+++ b/tests/unittests/distros/test_sles.py
@@ -1,9 +1,8 @@
# This file is part of cloud-init. See LICENSE file for license information.
+from tests.unittests.distros import _get_distro
from tests.unittests.helpers import CiTestCase
-from . import _get_distro
-
class TestSLES(CiTestCase):
def test_get_distro(self):
diff --git a/tests/unittests/helpers.py b/tests/unittests/helpers.py
index 31e0188c..fec63809 100644
--- a/tests/unittests/helpers.py
+++ b/tests/unittests/helpers.py
@@ -17,8 +17,7 @@ from typing import ClassVar, List, Union
from unittest import mock
from unittest.util import strclass
-import httpretty
-import pytest
+import responses
import cloudinit
from cloudinit import cloud, distros
@@ -374,30 +373,18 @@ class FilesystemMockingTestCase(ResourceUsingTestCase):
self.patched_funcs.close()
-class HttprettyTestCase(CiTestCase):
- # necessary as http_proxy gets in the way of httpretty
- # https://github.com/gabrielfalcao/HTTPretty/issues/122
- # Also make sure that allow_net_connect is set to False.
- # And make sure reset and enable/disable are done.
-
+class ResponsesTestCase(CiTestCase):
def setUp(self):
- self.restore_proxy = os.environ.get("http_proxy")
- if self.restore_proxy is not None:
- del os.environ["http_proxy"]
- super(HttprettyTestCase, self).setUp()
- httpretty.HTTPretty.allow_net_connect = False
- httpretty.reset()
- httpretty.enable()
- # Stop the logging from HttpPretty so our logs don't get mixed
- # up with its logs
- logging.getLogger("httpretty.core").setLevel(logging.CRITICAL)
+ super().setUp()
+ self.responses = responses.RequestsMock(
+ assert_all_requests_are_fired=False
+ )
+ self.responses.start()
def tearDown(self):
- httpretty.disable()
- httpretty.reset()
- if self.restore_proxy:
- os.environ["http_proxy"] = self.restore_proxy
- super(HttprettyTestCase, self).tearDown()
+ self.responses.stop()
+ self.responses.reset()
+ super().tearDown()
class SchemaTestCaseMixin(unittest.TestCase):
@@ -585,10 +572,7 @@ def does_not_raise():
>>> assert (0 / example_input) is not None
"""
- try:
- yield
- except Exception as ex:
- raise pytest.fail("DID RAISE {0}".format(ex))
+ yield
# vi: ts=4 expandtab
diff --git a/tests/unittests/net/test_dhcp.py b/tests/unittests/net/test_dhcp.py
index db9f0e97..e4f57b50 100644
--- a/tests/unittests/net/test_dhcp.py
+++ b/tests/unittests/net/test_dhcp.py
@@ -4,8 +4,8 @@ import os
import signal
from textwrap import dedent
-import httpretty
import pytest
+import responses
from cloudinit.net.dhcp import (
InvalidDHCPLeaseFileError,
@@ -22,12 +22,15 @@ from cloudinit.net.ephemeral import EphemeralDHCPv4
from cloudinit.util import ensure_file, write_file
from tests.unittests.helpers import (
CiTestCase,
- HttprettyTestCase,
+ ResponsesTestCase,
mock,
populate_dir,
- wrap_and_call,
)
+PID_F = "/run/dhclient.pid"
+LEASE_F = "/run/dhclient.lease"
+DHCLIENT = "/sbin/dhclient"
+
class TestParseDHCPLeasesFile(CiTestCase):
def test_parse_empty_lease_file_errors(self):
@@ -373,46 +376,17 @@ class TestDHCPDiscoveryClean(CiTestCase):
self.logs.getvalue(),
)
- @mock.patch("cloudinit.temp_utils.os.getuid")
- @mock.patch("cloudinit.net.dhcp.dhcp_discovery")
- @mock.patch("cloudinit.net.dhcp.subp.which")
- @mock.patch("cloudinit.net.dhcp.find_fallback_nic")
- def test_dhclient_run_with_tmpdir(self, m_fback, m_which, m_dhcp, m_uid):
- """maybe_perform_dhcp_discovery passes tmpdir to dhcp_discovery."""
- m_uid.return_value = 0 # Fake root user for tmpdir
- m_fback.return_value = "eth9"
- m_which.return_value = "/sbin/dhclient"
- m_dhcp.return_value = {"address": "192.168.2.2"}
- retval = wrap_and_call(
- "cloudinit.temp_utils",
- {"_TMPDIR": {"new": None}, "os.getuid": 0},
- maybe_perform_dhcp_discovery,
- )
- self.assertEqual({"address": "192.168.2.2"}, retval)
- self.assertEqual(
- 1, m_dhcp.call_count, "dhcp_discovery not called once"
- )
- call = m_dhcp.call_args_list[0]
- self.assertEqual("/sbin/dhclient", call[0][0])
- self.assertEqual("eth9", call[0][1])
- self.assertIn("/var/tmp/cloud-init/cloud-init-dhcp-", call[0][2])
-
@mock.patch("time.sleep", mock.MagicMock())
@mock.patch("cloudinit.net.dhcp.os.kill")
@mock.patch("cloudinit.net.dhcp.subp.subp")
- def test_dhcp_discovery_run_in_sandbox_warns_invalid_pid(
- self, m_subp, m_kill
- ):
+ @mock.patch("cloudinit.net.dhcp.util.wait_for_files", return_value=False)
+ def test_dhcp_discovery_warns_invalid_pid(self, m_wait, m_subp, m_kill):
"""dhcp_discovery logs a warning when pidfile contains invalid content.
Lease processing still occurs and no proc kill is attempted.
"""
m_subp.return_value = ("", "")
- tmpdir = self.tmp_dir()
- dhclient_script = os.path.join(tmpdir, "dhclient.orig")
- script_content = "#!/bin/bash\necho fake-dhclient"
- write_file(dhclient_script, script_content, mode=0o755)
- write_file(self.tmp_path("dhclient.pid", tmpdir), "") # Empty pid ''
+
lease_content = dedent(
"""
lease {
@@ -423,19 +397,24 @@ class TestDHCPDiscoveryClean(CiTestCase):
}
"""
)
- write_file(self.tmp_path("dhcp.leases", tmpdir), lease_content)
- self.assertCountEqual(
- [
- {
- "interface": "eth9",
- "fixed-address": "192.168.2.74",
- "subnet-mask": "255.255.255.0",
- "routers": "192.168.2.1",
- }
- ],
- dhcp_discovery(dhclient_script, "eth9", tmpdir),
- )
+ with mock.patch(
+ "cloudinit.util.load_file", return_value=lease_content
+ ):
+ self.assertCountEqual(
+ [
+ {
+ "interface": "eth9",
+ "fixed-address": "192.168.2.74",
+ "subnet-mask": "255.255.255.0",
+ "routers": "192.168.2.1",
+ }
+ ],
+ parse_dhcp_lease_file("lease"),
+ )
+ with self.assertRaises(InvalidDHCPLeaseFileError):
+ with mock.patch("cloudinit.util.load_file", return_value=""):
+ dhcp_discovery(DHCLIENT, "eth9")
self.assertIn(
"dhclient(pid=, parentpid=unknown) failed "
"to daemonize after 10.0 seconds",
@@ -447,23 +426,18 @@ class TestDHCPDiscoveryClean(CiTestCase):
@mock.patch("cloudinit.net.dhcp.os.kill")
@mock.patch("cloudinit.net.dhcp.util.wait_for_files")
@mock.patch("cloudinit.net.dhcp.subp.subp")
- def test_dhcp_discovery_run_in_sandbox_waits_on_lease_and_pid(
+ def test_dhcp_discovery_waits_on_lease_and_pid(
self, m_subp, m_wait, m_kill, m_getppid
):
"""dhcp_discovery waits for the presence of pidfile and dhcp.leases."""
m_subp.return_value = ("", "")
- tmpdir = self.tmp_dir()
- dhclient_script = os.path.join(tmpdir, "dhclient.orig")
- script_content = "#!/bin/bash\necho fake-dhclient"
- write_file(dhclient_script, script_content, mode=0o755)
+
# Don't create pid or leases file
- pidfile = self.tmp_path("dhclient.pid", tmpdir)
- leasefile = self.tmp_path("dhcp.leases", tmpdir)
- m_wait.return_value = [pidfile] # Return the missing pidfile wait for
+ m_wait.return_value = [PID_F] # Return the missing pidfile wait for
m_getppid.return_value = 1 # Indicate that dhclient has daemonized
- self.assertEqual([], dhcp_discovery(dhclient_script, "eth9", tmpdir))
+ self.assertEqual([], dhcp_discovery("/sbin/dhclient", "eth9"))
self.assertEqual(
- mock.call([pidfile, leasefile], maxwait=5, naplen=0.01),
+ mock.call([PID_F, LEASE_F], maxwait=5, naplen=0.01),
m_wait.call_args_list[0],
)
self.assertIn(
@@ -475,85 +449,13 @@ class TestDHCPDiscoveryClean(CiTestCase):
@mock.patch("cloudinit.net.dhcp.util.get_proc_ppid")
@mock.patch("cloudinit.net.dhcp.os.kill")
@mock.patch("cloudinit.net.dhcp.subp.subp")
- def test_dhcp_discovery_run_in_sandbox(self, m_subp, m_kill, m_getppid):
- """dhcp_discovery brings up the interface and runs dhclient.
-
- It also returns the parsed dhcp.leases file generated in the sandbox.
- """
- m_subp.return_value = ("", "")
- tmpdir = self.tmp_dir()
- dhclient_script = os.path.join(tmpdir, "dhclient.orig")
- script_content = "#!/bin/bash\necho fake-dhclient"
- write_file(dhclient_script, script_content, mode=0o755)
- lease_content = dedent(
- """
- lease {
- interface "eth9";
- fixed-address 192.168.2.74;
- option subnet-mask 255.255.255.0;
- option routers 192.168.2.1;
- }
- """
- )
- lease_file = os.path.join(tmpdir, "dhcp.leases")
- write_file(lease_file, lease_content)
- pid_file = os.path.join(tmpdir, "dhclient.pid")
- my_pid = 1
- write_file(pid_file, "%d\n" % my_pid)
- m_getppid.return_value = 1 # Indicate that dhclient has daemonized
-
- self.assertCountEqual(
- [
- {
- "interface": "eth9",
- "fixed-address": "192.168.2.74",
- "subnet-mask": "255.255.255.0",
- "routers": "192.168.2.1",
- }
- ],
- dhcp_discovery(dhclient_script, "eth9", tmpdir),
- )
- # dhclient script got copied
- with open(os.path.join(tmpdir, "dhclient")) as stream:
- self.assertEqual(script_content, stream.read())
- # Interface was brought up before dhclient called from sandbox
- m_subp.assert_has_calls(
- [
- mock.call(
- ["ip", "link", "set", "dev", "eth9", "up"], capture=True
- ),
- mock.call(
- [
- os.path.join(tmpdir, "dhclient"),
- "-1",
- "-v",
- "-lf",
- lease_file,
- "-pf",
- os.path.join(tmpdir, "dhclient.pid"),
- "eth9",
- "-sf",
- "/bin/true",
- ],
- capture=True,
- ),
- ]
- )
- m_kill.assert_has_calls([mock.call(my_pid, signal.SIGKILL)])
-
- @mock.patch("cloudinit.net.dhcp.util.get_proc_ppid")
- @mock.patch("cloudinit.net.dhcp.os.kill")
- @mock.patch("cloudinit.net.dhcp.subp.subp")
- def test_dhcp_discovery_outside_sandbox(self, m_subp, m_kill, m_getppid):
+ @mock.patch("cloudinit.util.wait_for_files", return_value=False)
+ def test_dhcp_discovery(self, m_wait, m_subp, m_kill, m_getppid):
"""dhcp_discovery brings up the interface and runs dhclient.
- It also returns the parsed dhcp.leases file generated in the sandbox.
+ It also returns the parsed dhcp.leases file.
"""
m_subp.return_value = ("", "")
- tmpdir = self.tmp_dir()
- dhclient_script = os.path.join(tmpdir, "dhclient.orig")
- script_content = "#!/bin/bash\necho fake-dhclient"
- write_file(dhclient_script, script_content, mode=0o755)
lease_content = dedent(
"""
lease {
@@ -564,14 +466,12 @@ class TestDHCPDiscoveryClean(CiTestCase):
}
"""
)
- lease_file = os.path.join(tmpdir, "dhcp.leases")
- write_file(lease_file, lease_content)
- pid_file = os.path.join(tmpdir, "dhclient.pid")
my_pid = 1
- write_file(pid_file, "%d\n" % my_pid)
m_getppid.return_value = 1 # Indicate that dhclient has daemonized
- with mock.patch("os.access", return_value=False):
+ with mock.patch(
+ "cloudinit.util.load_file", side_effect=["1", lease_content]
+ ):
self.assertCountEqual(
[
{
@@ -581,12 +481,9 @@ class TestDHCPDiscoveryClean(CiTestCase):
"routers": "192.168.2.1",
}
],
- dhcp_discovery(dhclient_script, "eth9", tmpdir),
+ dhcp_discovery("/sbin/dhclient", "eth9"),
)
- # dhclient script got copied
- with open(os.path.join(tmpdir, "dhclient.orig")) as stream:
- self.assertEqual(script_content, stream.read())
- # Interface was brought up before dhclient called from sandbox
+ # Interface was brought up before dhclient called
m_subp.assert_has_calls(
[
mock.call(
@@ -594,13 +491,13 @@ class TestDHCPDiscoveryClean(CiTestCase):
),
mock.call(
[
- os.path.join(tmpdir, "dhclient.orig"),
+ DHCLIENT,
"-1",
"-v",
"-lf",
- lease_file,
+ LEASE_F,
"-pf",
- os.path.join(tmpdir, "dhclient.pid"),
+ PID_F,
"eth9",
"-sf",
"/bin/true",
@@ -614,16 +511,14 @@ class TestDHCPDiscoveryClean(CiTestCase):
@mock.patch("cloudinit.net.dhcp.util.get_proc_ppid")
@mock.patch("cloudinit.net.dhcp.os.kill")
@mock.patch("cloudinit.net.dhcp.subp.subp")
- def test_dhcp_output_error_stream(self, m_subp, m_kill, m_getppid):
+ @mock.patch("cloudinit.util.wait_for_files")
+ def test_dhcp_output_error_stream(self, m_wait, m_subp, m_kill, m_getppid):
""" "dhcp_log_func is called with the output and error streams of
- dhclinet when the callable is passed."""
+ dhclient when the callable is passed."""
dhclient_err = "FAKE DHCLIENT ERROR"
dhclient_out = "FAKE DHCLIENT OUT"
m_subp.return_value = (dhclient_out, dhclient_err)
tmpdir = self.tmp_dir()
- dhclient_script = os.path.join(tmpdir, "dhclient.orig")
- script_content = "#!/bin/bash\necho fake-dhclient"
- write_file(dhclient_script, script_content, mode=0o755)
lease_content = dedent(
"""
lease {
@@ -645,9 +540,7 @@ class TestDHCPDiscoveryClean(CiTestCase):
self.assertEqual(out, dhclient_out)
self.assertEqual(err, dhclient_err)
- dhcp_discovery(
- dhclient_script, "eth9", tmpdir, dhcp_log_func=dhcp_log_func
- )
+ dhcp_discovery(DHCLIENT, "eth9", dhcp_log_func=dhcp_log_func)
class TestSystemdParseLeases(CiTestCase):
@@ -769,13 +662,13 @@ class TestSystemdParseLeases(CiTestCase):
)
-class TestEphemeralDhcpNoNetworkSetup(HttprettyTestCase):
+class TestEphemeralDhcpNoNetworkSetup(ResponsesTestCase):
@mock.patch("cloudinit.net.dhcp.maybe_perform_dhcp_discovery")
def test_ephemeral_dhcp_no_network_if_url_connectivity(self, m_dhcp):
"""No EphemeralDhcp4 network setup when connectivity_url succeeds."""
url = "http://example.org/index.html"
- httpretty.register_uri(httpretty.GET, url)
+ self.responses.add(responses.GET, url)
with EphemeralDHCPv4(
connectivity_url_data={"url": url},
) as lease:
@@ -798,7 +691,7 @@ class TestEphemeralDhcpNoNetworkSetup(HttprettyTestCase):
m_dhcp.return_value = [fake_lease]
m_subp.return_value = ("", "")
- httpretty.register_uri(httpretty.GET, url, body={}, status=404)
+ self.responses.add(responses.GET, url, body=b"", status=404)
with EphemeralDHCPv4(
connectivity_url_data={"url": url},
) as lease:
diff --git a/tests/unittests/net/test_ephemeral.py b/tests/unittests/net/test_ephemeral.py
index d2237faf..0cefd04a 100644
--- a/tests/unittests/net/test_ephemeral.py
+++ b/tests/unittests/net/test_ephemeral.py
@@ -22,16 +22,22 @@ class TestEphemeralIPNetwork:
m_exit_stack,
ipv4,
ipv6,
+ tmpdir,
):
interface = object()
- with EphemeralIPNetwork(interface, ipv4=ipv4, ipv6=ipv6):
+ tmp_dir = str(tmpdir)
+ with EphemeralIPNetwork(
+ interface, ipv4=ipv4, ipv6=ipv6, tmp_dir=tmp_dir
+ ):
pass
expected_call_args_list = []
if ipv4:
expected_call_args_list.append(
mock.call(m_ephemeral_dhcp_v4.return_value)
)
- assert [mock.call(interface)] == m_ephemeral_dhcp_v4.call_args_list
+ assert [
+ mock.call(interface, tmp_dir=tmp_dir)
+ ] == m_ephemeral_dhcp_v4.call_args_list
else:
assert [] == m_ephemeral_dhcp_v4.call_args_list
if ipv6:
diff --git a/tests/unittests/net/test_init.py b/tests/unittests/net/test_init.py
index 53bbb15a..aa5ffc31 100644
--- a/tests/unittests/net/test_init.py
+++ b/tests/unittests/net/test_init.py
@@ -8,15 +8,15 @@ from pathlib import Path
from typing import Optional
from unittest import mock
-import httpretty
import pytest
import requests
+import responses
import cloudinit.net as net
from cloudinit.net.ephemeral import EphemeralIPv4Network, EphemeralIPv6Network
from cloudinit.subp import ProcessExecutionError
from cloudinit.util import ensure_file, write_file
-from tests.unittests.helpers import CiTestCase, HttprettyTestCase
+from tests.unittests.helpers import CiTestCase, ResponsesTestCase
class TestSysDevPath(CiTestCase):
@@ -1210,7 +1210,7 @@ class TestEphemeralIPV6Network:
assert expected_setup_calls == m_subp.call_args_list
-class TestHasURLConnectivity(HttprettyTestCase):
+class TestHasURLConnectivity(ResponsesTestCase):
def setUp(self):
super(TestHasURLConnectivity, self).setUp()
self.url = "http://fake/"
@@ -1225,7 +1225,7 @@ class TestHasURLConnectivity(HttprettyTestCase):
)
def test_true_on_url_connectivity_success(self):
- httpretty.register_uri(httpretty.GET, self.url)
+ self.responses.add(responses.GET, self.url)
self.assertTrue(
net.has_url_connectivity({"url": self.url}),
"Expected True on url connect",
@@ -1241,7 +1241,7 @@ class TestHasURLConnectivity(HttprettyTestCase):
)
def test_true_on_url_connectivity_failure(self):
- httpretty.register_uri(httpretty.GET, self.url, body={}, status=404)
+ self.responses.add(responses.GET, self.url, body=b"", status=404)
self.assertFalse(
net.has_url_connectivity({"url": self.url}),
"Expected False on url fail",
diff --git a/tests/unittests/net/test_networkd.py b/tests/unittests/net/test_networkd.py
index a22c5092..2958231b 100644
--- a/tests/unittests/net/test_networkd.py
+++ b/tests/unittests/net/test_networkd.py
@@ -1,7 +1,10 @@
# This file is part of cloud-init. See LICENSE file for license information.
+from string import Template
from unittest import mock
+import pytest
+
from cloudinit import safeyaml
from cloudinit.net import network_state, networkd
@@ -53,6 +56,149 @@ Domains=foo.local bar.local
"""
+V2_CONFIG_DHCP_YES_OVERRIDES = """\
+network:
+ version: 2
+ ethernets:
+ eth0:
+ dhcp4: true
+ dhcp4-overrides:
+ hostname: hal
+ route-metric: 1100
+ send-hostname: false
+ use-dns: false
+ use-domains: false
+ use-hostname: false
+ use-mtu: false
+ use-ntp: false
+ use-routes: false
+ dhcp6: true
+ dhcp6-overrides:
+ use-dns: false
+ use-domains: false
+ use-hostname: false
+ use-ntp: false
+ match:
+ macaddress: "00:11:22:33:44:55"
+ nameservers:
+ addresses: ["8.8.8.8", "2001:4860:4860::8888"]
+"""
+
+V2_CONFIG_DHCP_YES_OVERRIDES_RENDERED = """[DHCPv4]
+Hostname=hal
+RouteMetric=1100
+SendHostname=False
+UseDNS=False
+UseDomains=False
+UseHostname=False
+UseMTU=False
+UseNTP=False
+UseRoutes=False
+
+[DHCPv6]
+UseDNS=False
+UseDomains=False
+UseHostname=False
+UseNTP=False
+
+[Match]
+MACAddress=00:11:22:33:44:55
+Name=eth0
+
+[Network]
+DHCP=yes
+DNS=8.8.8.8 2001:4860:4860::8888
+
+"""
+
+V2_CONFIG_DHCP_DOMAIN_VS_OVERRIDE = Template(
+ """\
+network:
+ version: 2
+ ethernets:
+ eth0:
+ dhcp${dhcp_version}domain: true
+ dhcp${dhcp_version}: true
+ dhcp${dhcp_version}-overrides:
+ use-domains: route
+"""
+)
+
+V2_CONFIG_DHCP_OVERRIDES = Template(
+ """\
+network:
+ version: 2
+ ethernets:
+ eth0:
+ dhcp${dhcp_version}: true
+ dhcp${dhcp_version}-overrides:
+ ${key}: ${value}
+ match:
+ macaddress: "00:11:22:33:44:55"
+ nameservers:
+ addresses: ["8.8.8.8", "2001:4860:4860::8888"]
+"""
+)
+
+V2_CONFIG_DHCP_OVERRIDES_RENDERED = Template(
+ """[DHCPv${dhcp_version}]
+${key}=${value}
+
+[Match]
+MACAddress=00:11:22:33:44:55
+Name=eth0
+
+[Network]
+DHCP=ipv${dhcp_version}
+DNS=8.8.8.8 2001:4860:4860::8888
+
+"""
+)
+
+V1_CONFIG_MULTI_SUBNETS = """
+network:
+ version: 1
+ config:
+ - type: physical
+ name: eth0
+ mac_address: 'ae:98:25:fa:36:9e'
+ subnets:
+ - type: static
+ address: '10.0.0.2'
+ netmask: '255.255.255.255'
+ gateway: '10.0.0.1'
+ - type: static6
+ address: '2a01:4f8:10a:19d2::4/64'
+ gateway: '2a01:4f8:10a:19d2::2'
+ - type: nameserver
+ address:
+ - '100.100.100.100'
+ search:
+ - 'rgrunbla.github.beta.tailscale.net'
+"""
+
+V1_CONFIG_MULTI_SUBNETS_RENDERED = """\
+[Address]
+Address=10.0.0.2/32
+
+[Address]
+Address=2a01:4f8:10a:19d2::4/64
+
+[Match]
+MACAddress=ae:98:25:fa:36:9e
+Name=eth0
+
+[Network]
+DHCP=no
+DNS=100.100.100.100
+Domains=rgrunbla.github.beta.tailscale.net
+
+[Route]
+Gateway=10.0.0.1
+Gateway=2a01:4f8:10a:19d2::2
+
+"""
+
class TestNetworkdRenderState:
def _parse_network_state_from_config(self, config):
@@ -71,5 +217,95 @@ class TestNetworkdRenderState:
assert "ens92" in rendered_content
assert rendered_content["ens92"] == V2_CONFIG_SET_NAME_RENDERED_ETH1
+ def test_networkd_render_dhcp_yes_with_dhcp_overrides(self):
+ with mock.patch("cloudinit.net.get_interfaces_by_mac"):
+ ns = self._parse_network_state_from_config(
+ V2_CONFIG_DHCP_YES_OVERRIDES
+ )
+ renderer = networkd.Renderer()
+ rendered_content = renderer._render_content(ns)
+
+ assert (
+ rendered_content["eth0"] == V2_CONFIG_DHCP_YES_OVERRIDES_RENDERED
+ )
+
+ @pytest.mark.parametrize("dhcp_version", [("4"), ("6")])
+ def test_networkd_render_dhcp_domains_vs_overrides(self, dhcp_version):
+ expected_exception = (
+ f"eth0 has both dhcp{dhcp_version}domain and"
+ f" dhcp{dhcp_version}-overrides.use-domains configured. Use one"
+ )
+ with pytest.raises(Exception, match=expected_exception):
+ with mock.patch("cloudinit.net.get_interfaces_by_mac"):
+ config = V2_CONFIG_DHCP_DOMAIN_VS_OVERRIDE.substitute(
+ dhcp_version=dhcp_version
+ )
+ ns = self._parse_network_state_from_config(config)
+ renderer = networkd.Renderer()
+ renderer._render_content(ns)
+
+ @pytest.mark.parametrize(
+ "dhcp_version,spec_key,spec_value,rendered_key,rendered_value",
+ [
+ ("4", "use-dns", "false", "UseDNS", "False"),
+ ("4", "use-dns", "true", "UseDNS", "True"),
+ ("4", "use-ntp", "false", "UseNTP", "False"),
+ ("4", "use-ntp", "true", "UseNTP", "True"),
+ ("4", "send-hostname", "false", "SendHostname", "False"),
+ ("4", "send-hostname", "true", "SendHostname", "True"),
+ ("4", "use-hostname", "false", "UseHostname", "False"),
+ ("4", "use-hostname", "true", "UseHostname", "True"),
+ ("4", "hostname", "olivaw", "Hostname", "olivaw"),
+ ("4", "route-metric", "12345", "RouteMetric", "12345"),
+ ("4", "use-domains", "false", "UseDomains", "False"),
+ ("4", "use-domains", "true", "UseDomains", "True"),
+ ("4", "use-domains", "route", "UseDomains", "route"),
+ ("4", "use-mtu", "false", "UseMTU", "False"),
+ ("4", "use-mtu", "true", "UseMTU", "True"),
+ ("4", "use-routes", "false", "UseRoutes", "False"),
+ ("4", "use-routes", "true", "UseRoutes", "True"),
+ ("6", "use-dns", "false", "UseDNS", "False"),
+ ("6", "use-dns", "true", "UseDNS", "True"),
+ ("6", "use-ntp", "false", "UseNTP", "False"),
+ ("6", "use-ntp", "true", "UseNTP", "True"),
+ ("6", "use-hostname", "false", "UseHostname", "False"),
+ ("6", "use-hostname", "true", "UseHostname", "True"),
+ ("6", "use-domains", "false", "UseDomains", "False"),
+ ("6", "use-domains", "true", "UseDomains", "True"),
+ ("6", "use-domains", "route", "UseDomains", "route"),
+ ],
+ )
+ def test_networkd_render_dhcp_overrides(
+ self, dhcp_version, spec_key, spec_value, rendered_key, rendered_value
+ ):
+ with mock.patch("cloudinit.net.get_interfaces_by_mac"):
+ ns = self._parse_network_state_from_config(
+ V2_CONFIG_DHCP_OVERRIDES.substitute(
+ dhcp_version=dhcp_version, key=spec_key, value=spec_value
+ )
+ )
+ renderer = networkd.Renderer()
+ rendered_content = renderer._render_content(ns)
+
+ assert rendered_content[
+ "eth0"
+ ] == V2_CONFIG_DHCP_OVERRIDES_RENDERED.substitute(
+ dhcp_version=dhcp_version, key=rendered_key, value=rendered_value
+ )
+
+ def test_networkd_render_v1_multi_subnets(self):
+ """
+ Ensure a device with multiple subnets gets correctly rendered.
+
+ Per systemd-networkd docs, [Address] can only contain a single instance
+ of Address.
+ """
+ with mock.patch("cloudinit.net.get_interfaces_by_mac"):
+ ns = self._parse_network_state_from_config(V1_CONFIG_MULTI_SUBNETS)
+ renderer = networkd.Renderer()
+ rendered_content = renderer._render_content(ns)
+
+ assert rendered_content["eth0"] == V1_CONFIG_MULTI_SUBNETS_RENDERED
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/sources/helpers/test_ec2.py b/tests/unittests/sources/helpers/test_ec2.py
index 77e7c7b6..c8f55211 100644
--- a/tests/unittests/sources/helpers/test_ec2.py
+++ b/tests/unittests/sources/helpers/test_ec2.py
@@ -1,18 +1,18 @@
# This file is part of cloud-init. See LICENSE file for license information.
-import httpretty as hp
+import responses
from cloudinit import url_helper as uh
from cloudinit.sources.helpers import ec2
from tests.unittests import helpers
-class TestEc2Util(helpers.HttprettyTestCase):
+class TestEc2Util(helpers.ResponsesTestCase):
VERSION = "latest"
def test_userdata_fetch(self):
- hp.register_uri(
- hp.GET,
+ self.responses.add(
+ responses.GET,
"http://169.254.169.254/%s/user-data" % (self.VERSION),
body="stuff",
status=200,
@@ -21,8 +21,8 @@ class TestEc2Util(helpers.HttprettyTestCase):
self.assertEqual("stuff", userdata.decode("utf-8"))
def test_userdata_fetch_fail_not_found(self):
- hp.register_uri(
- hp.GET,
+ self.responses.add(
+ responses.GET,
"http://169.254.169.254/%s/user-data" % (self.VERSION),
status=404,
)
@@ -30,8 +30,8 @@ class TestEc2Util(helpers.HttprettyTestCase):
self.assertEqual("", userdata)
def test_userdata_fetch_fail_server_dead(self):
- hp.register_uri(
- hp.GET,
+ self.responses.add(
+ responses.GET,
"http://169.254.169.254/%s/user-data" % (self.VERSION),
status=500,
)
@@ -39,8 +39,8 @@ class TestEc2Util(helpers.HttprettyTestCase):
self.assertEqual("", userdata)
def test_userdata_fetch_fail_server_not_found(self):
- hp.register_uri(
- hp.GET,
+ self.responses.add(
+ responses.GET,
"http://169.254.169.254/%s/user-data" % (self.VERSION),
status=404,
)
@@ -49,26 +49,26 @@ class TestEc2Util(helpers.HttprettyTestCase):
def test_metadata_fetch_no_keys(self):
base_url = "http://169.254.169.254/%s/meta-data/" % (self.VERSION)
- hp.register_uri(
- hp.GET,
+ self.responses.add(
+ responses.GET,
base_url,
status=200,
body="\n".join(["hostname", "instance-id", "ami-launch-index"]),
)
- hp.register_uri(
- hp.GET,
+ self.responses.add(
+ responses.GET,
uh.combine_url(base_url, "hostname"),
status=200,
body="ec2.fake.host.name.com",
)
- hp.register_uri(
- hp.GET,
+ self.responses.add(
+ responses.GET,
uh.combine_url(base_url, "instance-id"),
status=200,
body="123",
)
- hp.register_uri(
- hp.GET,
+ self.responses.add(
+ responses.GET,
uh.combine_url(base_url, "ami-launch-index"),
status=200,
body="1",
@@ -80,32 +80,32 @@ class TestEc2Util(helpers.HttprettyTestCase):
def test_metadata_fetch_key(self):
base_url = "http://169.254.169.254/%s/meta-data/" % (self.VERSION)
- hp.register_uri(
- hp.GET,
+ self.responses.add(
+ responses.GET,
base_url,
status=200,
body="\n".join(["hostname", "instance-id", "public-keys/"]),
)
- hp.register_uri(
- hp.GET,
+ self.responses.add(
+ responses.GET,
uh.combine_url(base_url, "hostname"),
status=200,
body="ec2.fake.host.name.com",
)
- hp.register_uri(
- hp.GET,
+ self.responses.add(
+ responses.GET,
uh.combine_url(base_url, "instance-id"),
status=200,
body="123",
)
- hp.register_uri(
- hp.GET,
+ self.responses.add(
+ responses.GET,
uh.combine_url(base_url, "public-keys/"),
status=200,
body="0=my-public-key",
)
- hp.register_uri(
- hp.GET,
+ self.responses.add(
+ responses.GET,
uh.combine_url(base_url, "public-keys/0/openssh-key"),
status=200,
body="ssh-rsa AAAA.....wZEf my-public-key",
@@ -117,38 +117,38 @@ class TestEc2Util(helpers.HttprettyTestCase):
def test_metadata_fetch_with_2_keys(self):
base_url = "http://169.254.169.254/%s/meta-data/" % (self.VERSION)
- hp.register_uri(
- hp.GET,
+ self.responses.add(
+ responses.GET,
base_url,
status=200,
body="\n".join(["hostname", "instance-id", "public-keys/"]),
)
- hp.register_uri(
- hp.GET,
+ self.responses.add(
+ responses.GET,
uh.combine_url(base_url, "hostname"),
status=200,
body="ec2.fake.host.name.com",
)
- hp.register_uri(
- hp.GET,
+ self.responses.add(
+ responses.GET,
uh.combine_url(base_url, "instance-id"),
status=200,
body="123",
)
- hp.register_uri(
- hp.GET,
+ self.responses.add(
+ responses.GET,
uh.combine_url(base_url, "public-keys/"),
status=200,
body="\n".join(["0=my-public-key", "1=my-other-key"]),
)
- hp.register_uri(
- hp.GET,
+ self.responses.add(
+ responses.GET,
uh.combine_url(base_url, "public-keys/0/openssh-key"),
status=200,
body="ssh-rsa AAAA.....wZEf my-public-key",
)
- hp.register_uri(
- hp.GET,
+ self.responses.add(
+ responses.GET,
uh.combine_url(base_url, "public-keys/1/openssh-key"),
status=200,
body="ssh-rsa AAAA.....wZEf my-other-key",
@@ -160,40 +160,40 @@ class TestEc2Util(helpers.HttprettyTestCase):
def test_metadata_fetch_bdm(self):
base_url = "http://169.254.169.254/%s/meta-data/" % (self.VERSION)
- hp.register_uri(
- hp.GET,
+ self.responses.add(
+ responses.GET,
base_url,
status=200,
body="\n".join(
["hostname", "instance-id", "block-device-mapping/"]
),
)
- hp.register_uri(
- hp.GET,
+ self.responses.add(
+ responses.GET,
uh.combine_url(base_url, "hostname"),
status=200,
body="ec2.fake.host.name.com",
)
- hp.register_uri(
- hp.GET,
+ self.responses.add(
+ responses.GET,
uh.combine_url(base_url, "instance-id"),
status=200,
body="123",
)
- hp.register_uri(
- hp.GET,
+ self.responses.add(
+ responses.GET,
uh.combine_url(base_url, "block-device-mapping/"),
status=200,
body="\n".join(["ami", "ephemeral0"]),
)
- hp.register_uri(
- hp.GET,
+ self.responses.add(
+ responses.GET,
uh.combine_url(base_url, "block-device-mapping/ami"),
status=200,
body="sdb",
)
- hp.register_uri(
- hp.GET,
+ self.responses.add(
+ responses.GET,
uh.combine_url(base_url, "block-device-mapping/ephemeral0"),
status=200,
body="sdc",
@@ -208,58 +208,58 @@ class TestEc2Util(helpers.HttprettyTestCase):
def test_metadata_no_security_credentials(self):
base_url = "http://169.254.169.254/%s/meta-data/" % (self.VERSION)
- hp.register_uri(
- hp.GET,
+ self.responses.add(
+ responses.GET,
base_url,
status=200,
body="\n".join(["instance-id", "iam/"]),
)
- hp.register_uri(
- hp.GET,
+ self.responses.add(
+ responses.GET,
uh.combine_url(base_url, "instance-id"),
status=200,
body="i-0123451689abcdef0",
)
- hp.register_uri(
- hp.GET,
+ self.responses.add(
+ responses.GET,
uh.combine_url(base_url, "iam/"),
status=200,
body="\n".join(["info/", "security-credentials/"]),
)
- hp.register_uri(
- hp.GET,
+ self.responses.add(
+ responses.GET,
uh.combine_url(base_url, "iam/info/"),
status=200,
body="LastUpdated",
)
- hp.register_uri(
- hp.GET,
+ self.responses.add(
+ responses.GET,
uh.combine_url(base_url, "iam/info/LastUpdated"),
status=200,
body="2016-10-27T17:29:39Z",
)
- hp.register_uri(
- hp.GET,
+ self.responses.add(
+ responses.GET,
uh.combine_url(base_url, "iam/security-credentials/"),
status=200,
body="ReadOnly/",
)
- hp.register_uri(
- hp.GET,
+ self.responses.add(
+ responses.GET,
uh.combine_url(base_url, "iam/security-credentials/ReadOnly/"),
status=200,
body="\n".join(["LastUpdated", "Expiration"]),
)
- hp.register_uri(
- hp.GET,
+ self.responses.add(
+ responses.GET,
uh.combine_url(
base_url, "iam/security-credentials/ReadOnly/LastUpdated"
),
status=200,
body="2016-10-27T17:28:17Z",
)
- hp.register_uri(
- hp.GET,
+ self.responses.add(
+ responses.GET,
uh.combine_url(
base_url, "iam/security-credentials/ReadOnly/Expiration"
),
@@ -273,5 +273,55 @@ class TestEc2Util(helpers.HttprettyTestCase):
self.assertEqual(iam["info"]["LastUpdated"], "2016-10-27T17:29:39Z")
self.assertNotIn("security-credentials", iam)
+ def test_metadata_children_with_invalid_character(self):
+ def _skip_tags(exception):
+ if isinstance(exception, uh.UrlError) and exception.code == 404:
+ if "meta-data/tags/" in exception.url:
+ print(exception.url)
+ return True
+ return False
+
+ base_url = "http://169.254.169.254/%s/meta-data/" % (self.VERSION)
+ self.responses.add(
+ responses.GET,
+ base_url,
+ status=200,
+ body="\n".join(["tags/", "ami-launch-index"]),
+ )
+ self.responses.add(
+ responses.GET,
+ uh.combine_url(base_url, "tags/"),
+ status=200,
+ body="\n".join(["test/invalid", "valid"]),
+ )
+ self.responses.add(
+ responses.GET,
+ uh.combine_url(base_url, "tags/valid"),
+ status=200,
+ body="OK",
+ )
+ self.responses.add(
+ responses.GET,
+ uh.combine_url(base_url, "tags/test/invalid"),
+ status=404,
+ )
+ self.responses.add(
+ responses.GET,
+ uh.combine_url(base_url, "ami-launch-index"),
+ status=200,
+ body="1",
+ )
+ md = ec2.get_instance_metadata(
+ self.VERSION,
+ retries=0,
+ timeout=0.1,
+ retrieval_exception_ignore_cb=_skip_tags,
+ )
+ self.assertEqual(md["tags"]["valid"], "OK")
+ self.assertEqual(md["tags"]["test/invalid"], "(skipped)")
+ self.assertEqual(md["ami-launch-index"], "1")
+ md = ec2.get_instance_metadata(self.VERSION, retries=0, timeout=0.1)
+ self.assertEqual(len(md), 0)
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/sources/test_aliyun.py b/tests/unittests/sources/test_aliyun.py
index e628dc02..fe4e54b5 100644
--- a/tests/unittests/sources/test_aliyun.py
+++ b/tests/unittests/sources/test_aliyun.py
@@ -4,7 +4,7 @@ import functools
import os
from unittest import mock
-import httpretty
+import responses
from cloudinit import helpers
from cloudinit.sources import DataSourceAliYun as ay
@@ -49,33 +49,7 @@ DEFAULT_USERDATA = """\
hostname: localhost"""
-def register_mock_metaserver(base_url, data):
- def register_helper(register, base_url, body):
- if isinstance(body, str):
- register(base_url, body)
- elif isinstance(body, list):
- register(base_url.rstrip("/"), "\n".join(body) + "\n")
- elif isinstance(body, dict):
- if not body:
- register(
- base_url.rstrip("/") + "/", "not found", status_code=404
- )
- vals = []
- for k, v in body.items():
- if isinstance(v, (str, list)):
- suffix = k.rstrip("/")
- else:
- suffix = k.rstrip("/") + "/"
- vals.append(suffix)
- url = base_url.rstrip("/") + "/" + suffix
- register_helper(register, url, v)
- register(base_url, "\n".join(vals) + "\n")
-
- register = functools.partial(httpretty.register_uri, httpretty.GET)
- register_helper(register, base_url, data)
-
-
-class TestAliYunDatasource(test_helpers.HttprettyTestCase):
+class TestAliYunDatasource(test_helpers.ResponsesTestCase):
def setUp(self):
super(TestAliYunDatasource, self).setUp()
cfg = {"datasource": {"AliYun": {"timeout": "1", "max_wait": "1"}}}
@@ -124,10 +98,35 @@ class TestAliYunDatasource(test_helpers.HttprettyTestCase):
"instance-identity",
)
+ def register_mock_metaserver(self, base_url, data):
+ def register_helper(register, base_url, body):
+ if isinstance(body, str):
+ register(base_url, body)
+ elif isinstance(body, list):
+ register(base_url.rstrip("/"), "\n".join(body) + "\n")
+ elif isinstance(body, dict):
+ if not body:
+ register(
+ base_url.rstrip("/") + "/", "not found", status=404
+ )
+ vals = []
+ for k, v in body.items():
+ if isinstance(v, (str, list)):
+ suffix = k.rstrip("/")
+ else:
+ suffix = k.rstrip("/") + "/"
+ vals.append(suffix)
+ url = base_url.rstrip("/") + "/" + suffix
+ register_helper(register, url, v)
+ register(base_url, "\n".join(vals) + "\n")
+
+ register = functools.partial(self.responses.add, responses.GET)
+ register_helper(register, base_url, data)
+
def regist_default_server(self):
- register_mock_metaserver(self.metadata_url, self.default_metadata)
- register_mock_metaserver(self.userdata_url, self.default_userdata)
- register_mock_metaserver(self.identity_url, self.default_identity)
+ self.register_mock_metaserver(self.metadata_url, self.default_metadata)
+ self.register_mock_metaserver(self.userdata_url, self.default_userdata)
+ self.register_mock_metaserver(self.identity_url, self.default_identity)
def _test_get_data(self):
self.assertEqual(self.ds.metadata, self.default_metadata)
diff --git a/tests/unittests/sources/test_azure.py b/tests/unittests/sources/test_azure.py
index 4eceecf1..24fa061c 100644
--- a/tests/unittests/sources/test_azure.py
+++ b/tests/unittests/sources/test_azure.py
@@ -9,9 +9,9 @@ import stat
import xml.etree.ElementTree as ET
from pathlib import Path
-import httpretty
import pytest
import requests
+import responses
from cloudinit import distros, helpers, subp, url_helper
from cloudinit.net import dhcp
@@ -31,7 +31,7 @@ from cloudinit.version import version_string as vs
from tests.unittests.helpers import (
CiTestCase,
ExitStack,
- HttprettyTestCase,
+ ResponsesTestCase,
mock,
populate_dir,
resourceLocation,
@@ -761,7 +761,7 @@ class TestNetworkConfig:
assert azure_ds.network_config == self.fallback_config
-class TestGetMetadataFromIMDS(HttprettyTestCase):
+class TestGetMetadataFromIMDS(ResponsesTestCase):
with_logs = True
@@ -880,14 +880,21 @@ class TestGetMetadataFromIMDS(HttprettyTestCase):
@mock.patch("cloudinit.url_helper.time.sleep")
def test_get_metadata_from_imds_empty_when_no_imds_present(self, m_sleep):
"""Return empty dict when IMDS network metadata is absent."""
- httpretty.register_uri(
- httpretty.GET,
- dsaz.IMDS_URL + "/instance?api-version=2017-12-01",
- body={},
+ # Workaround https://github.com/getsentry/responses/pull/166
+ # url path can be reverted to "/instance?api-version=2019-12-01"
+ response = requests.Response()
+ response.status_code = 404
+ self.responses.add(
+ responses.GET,
+ dsaz.IMDS_URL + "/instance",
+ body=requests.HTTPError("...", response=response),
status=404,
)
- self.assertEqual({}, dsaz.get_metadata_from_imds(retries=2))
+ self.assertEqual(
+ {},
+ dsaz.get_metadata_from_imds(retries=2, api_version="2019-12-01"),
+ )
self.assertEqual([mock.call(1), mock.call(1)], m_sleep.call_args_list)
self.assertIn(
@@ -909,8 +916,8 @@ class TestGetMetadataFromIMDS(HttprettyTestCase):
self.attempt += 1
raise requests.Timeout("Fake connection timeout")
- httpretty.register_uri(
- httpretty.GET,
+ self.responses.add(
+ responses.GET,
dsaz.IMDS_URL + "instance?api-version=2017-12-01",
body=retry_callback,
)
@@ -1134,6 +1141,7 @@ scbus-1 on xpt0 bus 0
if isinstance(distro, str):
distro_cls = distros.fetch(distro)
distro = distro_cls(distro, data.get("sys_cfg", {}), self.paths)
+ distro.get_tmp_exec_path = mock.Mock(side_effect=self.tmp_dir)
dsrc = dsaz.DataSourceAzure(
data.get("sys_cfg", {}), distro=distro, paths=self.paths
)
@@ -3004,7 +3012,9 @@ class TestPreprovisioningHotAttachNics(CiTestCase):
):
"""Wait for nic attach if we do not have a fallback interface.
Skip waiting for additional nics after we have found primary"""
- dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
+ distro = mock.MagicMock()
+ distro.get_tmp_exec_path = self.tmp_dir
+ dsa = dsaz.DataSourceAzure({}, distro=distro, paths=self.paths)
lease = {
"interface": "eth9",
"fixed-address": "192.168.2.9",
@@ -3050,7 +3060,7 @@ class TestPreprovisioningHotAttachNics(CiTestCase):
m_attach.side_effect = ["eth0", "eth1"]
m_imds.reset_mock()
m_imds.side_effect = [{}, md]
- dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
+ dsa = dsaz.DataSourceAzure({}, distro=distro, paths=self.paths)
dsa._wait_for_all_nics_ready()
self.assertEqual(1, m_detach.call_count)
self.assertEqual(2, m_attach.call_count)
@@ -3066,7 +3076,9 @@ class TestPreprovisioningHotAttachNics(CiTestCase):
):
"""Retry polling for network metadata on all failures except timeout
and network unreachable errors"""
- dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
+ distro = mock.MagicMock()
+ distro.get_tmp_exec_path = self.tmp_dir
+ dsa = dsaz.DataSourceAzure({}, distro=distro, paths=self.paths)
lease = {
"interface": "eth9",
"fixed-address": "192.168.2.9",
@@ -3102,7 +3114,7 @@ class TestPreprovisioningHotAttachNics(CiTestCase):
requests.Timeout("Fake connection timeout")
] * 6 + [requests.ConnectionError("Fake Network Unreachable")] * 6
- dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
+ dsa = dsaz.DataSourceAzure({}, distro=distro, paths=self.paths)
is_primary, expected_nic_count = dsa._check_if_nic_is_primary("eth1")
self.assertEqual(False, is_primary)
@@ -3283,7 +3295,9 @@ class TestPreprovisioningPollIMDS(CiTestCase):
m_request.side_effect = fake_timeout_once
report_file = self.tmp_path("report_marker", self.tmp)
m_isfile.return_value = True
- dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
+ distro = mock.MagicMock()
+ distro.get_tmp_exec_path = self.tmp_dir
+ dsa = dsaz.DataSourceAzure({}, distro=distro, paths=self.paths)
with mock.patch.object(
dsa, "_reported_ready_marker_file", report_file
), mock.patch.object(dsa, "_ephemeral_dhcp_ctx") as m_dhcp_ctx:
@@ -3347,7 +3361,9 @@ class TestPreprovisioningPollIMDS(CiTestCase):
}
]
m_media_switch.return_value = None
- dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
+ distro = mock.MagicMock()
+ distro.get_tmp_exec_path = self.tmp_dir
+ dsa = dsaz.DataSourceAzure({}, distro=distro, paths=self.paths)
self.assertFalse(os.path.exists(report_file))
with mock.patch.object(
dsa, "_reported_ready_marker_file", report_file
@@ -3379,7 +3395,9 @@ class TestPreprovisioningPollIMDS(CiTestCase):
]
m_media_switch.return_value = None
m_report_ready.side_effect = [Exception("fail")]
- dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
+ distro = mock.MagicMock()
+ distro.get_tmp_exec_path = self.tmp_dir
+ dsa = dsaz.DataSourceAzure({}, distro=distro, paths=self.paths)
self.assertFalse(os.path.exists(report_file))
with mock.patch.object(
dsa, "_reported_ready_marker_file", report_file
@@ -3637,7 +3655,11 @@ class TestEphemeralNetworking:
azure_ds._setup_ephemeral_networking(iface=iface)
assert mock_ephemeral_dhcp_v4.mock_calls == [
- mock.call(iface=iface, dhcp_log_func=dsaz.dhcp_log_cb),
+ mock.call(
+ iface=iface,
+ dhcp_log_func=dsaz.dhcp_log_cb,
+ tmp_dir=azure_ds.distro.get_tmp_exec_path(),
+ ),
mock.call().obtain_lease(),
]
assert mock_sleep.mock_calls == []
@@ -3660,7 +3682,11 @@ class TestEphemeralNetworking:
azure_ds._setup_ephemeral_networking(iface=iface)
assert mock_ephemeral_dhcp_v4.mock_calls == [
- mock.call(iface=iface, dhcp_log_func=dsaz.dhcp_log_cb),
+ mock.call(
+ iface=iface,
+ dhcp_log_func=dsaz.dhcp_log_cb,
+ tmp_dir=azure_ds.distro.get_tmp_exec_path(),
+ ),
mock.call().obtain_lease(),
]
assert mock_sleep.mock_calls == []
@@ -3699,7 +3725,11 @@ class TestEphemeralNetworking:
azure_ds._setup_ephemeral_networking()
assert mock_ephemeral_dhcp_v4.mock_calls == [
- mock.call(iface=None, dhcp_log_func=dsaz.dhcp_log_cb),
+ mock.call(
+ iface=None,
+ dhcp_log_func=dsaz.dhcp_log_cb,
+ tmp_dir=azure_ds.distro.get_tmp_exec_path(),
+ ),
mock.call().obtain_lease(),
mock.call().obtain_lease(),
]
@@ -3730,7 +3760,11 @@ class TestEphemeralNetworking:
azure_ds._setup_ephemeral_networking()
assert mock_ephemeral_dhcp_v4.mock_calls == [
- mock.call(iface=None, dhcp_log_func=dsaz.dhcp_log_cb),
+ mock.call(
+ iface=None,
+ dhcp_log_func=dsaz.dhcp_log_cb,
+ tmp_dir=azure_ds.distro.get_tmp_exec_path(),
+ ),
mock.call().obtain_lease(),
mock.call().obtain_lease(),
]
@@ -3765,7 +3799,11 @@ class TestEphemeralNetworking:
assert (
mock_ephemeral_dhcp_v4.mock_calls
== [
- mock.call(iface=None, dhcp_log_func=dsaz.dhcp_log_cb),
+ mock.call(
+ iface=None,
+ dhcp_log_func=dsaz.dhcp_log_cb,
+ tmp_dir=azure_ds.distro.get_tmp_exec_path(),
+ ),
]
+ [mock.call().obtain_lease()] * 11
)
@@ -3999,6 +4037,20 @@ class TestIMDS:
]
+class TestInstanceId:
+ def test_metadata(self, azure_ds, mock_dmi_read_dmi_data):
+ azure_ds.metadata = {"instance-id": "test-id"}
+
+ id = azure_ds.get_instance_id()
+
+ assert id == "test-id"
+
+ def test_fallback(self, azure_ds, mock_dmi_read_dmi_data):
+ id = azure_ds.get_instance_id()
+
+ assert id == "fake-system-uuid"
+
+
class TestProvisioning:
@pytest.fixture(autouse=True)
def provisioning_setup(
@@ -4099,7 +4151,11 @@ class TestProvisioning:
mock.call(timeout_minutes=20)
]
assert self.mock_net_dhcp_maybe_perform_dhcp_discovery.mock_calls == [
- mock.call(None, dsaz.dhcp_log_cb)
+ mock.call(
+ None,
+ dsaz.dhcp_log_cb,
+ self.azure_ds.distro.get_tmp_exec_path(),
+ )
]
assert self.azure_ds._wireserver_endpoint == "10.11.12.13"
assert self.azure_ds._is_ephemeral_networking_up() is False
@@ -4176,8 +4232,16 @@ class TestProvisioning:
mock.call(timeout_minutes=5),
]
assert self.mock_net_dhcp_maybe_perform_dhcp_discovery.mock_calls == [
- mock.call(None, dsaz.dhcp_log_cb),
- mock.call(None, dsaz.dhcp_log_cb),
+ mock.call(
+ None,
+ dsaz.dhcp_log_cb,
+ self.azure_ds.distro.get_tmp_exec_path(),
+ ),
+ mock.call(
+ None,
+ dsaz.dhcp_log_cb,
+ self.azure_ds.distro.get_tmp_exec_path(),
+ ),
]
assert self.azure_ds._wireserver_endpoint == "10.11.12.13"
assert self.azure_ds._is_ephemeral_networking_up() is False
@@ -4280,8 +4344,16 @@ class TestProvisioning:
mock.call(iface="ethAttached1", timeout_minutes=20),
]
assert self.mock_net_dhcp_maybe_perform_dhcp_discovery.mock_calls == [
- mock.call(None, dsaz.dhcp_log_cb),
- mock.call("ethAttached1", dsaz.dhcp_log_cb),
+ mock.call(
+ None,
+ dsaz.dhcp_log_cb,
+ self.azure_ds.distro.get_tmp_exec_path(),
+ ),
+ mock.call(
+ "ethAttached1",
+ dsaz.dhcp_log_cb,
+ self.azure_ds.distro.get_tmp_exec_path(),
+ ),
]
assert self.azure_ds._wireserver_endpoint == "10.11.12.13"
assert self.azure_ds._is_ephemeral_networking_up() is False
@@ -4420,8 +4492,16 @@ class TestProvisioning:
mock.call(iface="ethAttached1", timeout_minutes=20),
]
assert self.mock_net_dhcp_maybe_perform_dhcp_discovery.mock_calls == [
- mock.call(None, dsaz.dhcp_log_cb),
- mock.call("ethAttached1", dsaz.dhcp_log_cb),
+ mock.call(
+ None,
+ dsaz.dhcp_log_cb,
+ self.azure_ds.distro.get_tmp_exec_path(),
+ ),
+ mock.call(
+ "ethAttached1",
+ dsaz.dhcp_log_cb,
+ self.azure_ds.distro.get_tmp_exec_path(),
+ ),
]
assert self.azure_ds._wireserver_endpoint == "10.11.12.13"
assert self.azure_ds._is_ephemeral_networking_up() is False
@@ -4508,7 +4588,11 @@ class TestProvisioning:
mock.call(timeout_minutes=20),
]
assert self.mock_net_dhcp_maybe_perform_dhcp_discovery.mock_calls == [
- mock.call(None, dsaz.dhcp_log_cb),
+ mock.call(
+ None,
+ dsaz.dhcp_log_cb,
+ self.azure_ds.distro.get_tmp_exec_path(),
+ ),
]
# Verify IMDS metadata.
@@ -4568,7 +4652,11 @@ class TestProvisioning:
mock.call(timeout_minutes=20)
]
assert self.mock_net_dhcp_maybe_perform_dhcp_discovery.mock_calls == [
- mock.call(None, dsaz.dhcp_log_cb)
+ mock.call(
+ None,
+ dsaz.dhcp_log_cb,
+ self.azure_ds.distro.get_tmp_exec_path(),
+ )
]
assert self.azure_ds._wireserver_endpoint == "10.11.12.13"
assert self.azure_ds._is_ephemeral_networking_up() is False
diff --git a/tests/unittests/sources/test_azure_helper.py b/tests/unittests/sources/test_azure_helper.py
index ff912bef..0a41fedf 100644
--- a/tests/unittests/sources/test_azure_helper.py
+++ b/tests/unittests/sources/test_azure_helper.py
@@ -15,8 +15,7 @@ from cloudinit.sources.helpers import azure as azure_helper
from cloudinit.sources.helpers.azure import WALinuxAgentShim as wa_shim
from cloudinit.util import load_file
from tests.unittests.helpers import CiTestCase, ExitStack, mock
-
-from .test_azure import construct_ovf_env
+from tests.unittests.sources.test_azure import construct_ovf_env
GOAL_STATE_TEMPLATE = """\
<?xml version="1.0" encoding="utf-8"?>
diff --git a/tests/unittests/sources/test_bigstep.py b/tests/unittests/sources/test_bigstep.py
index 148cfa0b..6406a362 100644
--- a/tests/unittests/sources/test_bigstep.py
+++ b/tests/unittests/sources/test_bigstep.py
@@ -1,8 +1,8 @@
import json
import os
-import httpretty
import pytest
+import responses
from cloudinit import helpers
from cloudinit.sources import DataSourceBigstep as bigstep
@@ -21,11 +21,11 @@ METADATA_BODY = json.dumps(
class TestBigstep:
- @httpretty.activate
@pytest.mark.parametrize("custom_paths", [False, True])
@mock.patch(M_PATH + "util.load_file", return_value=IMDS_URL)
+ @responses.activate
def test_get_data_honor_cloud_dir(self, m_load_file, custom_paths, tmpdir):
- httpretty.register_uri(httpretty.GET, IMDS_URL, body=METADATA_BODY)
+ responses.add(responses.GET, IMDS_URL, body=METADATA_BODY)
paths = {}
url_file = "/var/lib/cloud/data/seed/bigstep/url"
diff --git a/tests/unittests/sources/test_common.py b/tests/unittests/sources/test_common.py
index a78eaccb..daeefa8a 100644
--- a/tests/unittests/sources/test_common.py
+++ b/tests/unittests/sources/test_common.py
@@ -19,6 +19,7 @@ from cloudinit.sources import DataSourceLXD as LXD
from cloudinit.sources import DataSourceMAAS as MAAS
from cloudinit.sources import DataSourceNoCloud as NoCloud
from cloudinit.sources import DataSourceNone as DSNone
+from cloudinit.sources import DataSourceNWCS as NWCS
from cloudinit.sources import DataSourceOpenNebula as OpenNebula
from cloudinit.sources import DataSourceOpenStack as OpenStack
from cloudinit.sources import DataSourceOracle as Oracle
@@ -52,6 +53,7 @@ DEFAULT_LOCAL = [
Scaleway.DataSourceScaleway,
UpCloud.DataSourceUpCloudLocal,
VMware.DataSourceVMware,
+ NWCS.DataSourceNWCS,
]
DEFAULT_NETWORK = [
diff --git a/tests/unittests/sources/test_ec2.py b/tests/unittests/sources/test_ec2.py
index c12613ec..4c832da7 100644
--- a/tests/unittests/sources/test_ec2.py
+++ b/tests/unittests/sources/test_ec2.py
@@ -259,8 +259,8 @@ def _register_ssh_keys(rfunc, base_url, keys_data):
rfunc(burl + "/%s/openssh-key/" % name, val)
-def register_mock_metaserver(base_url, data):
- """Register with httpretty a ec2 metadata like service serving 'data'.
+def register_mock_metaserver(base_url, data, responses_mock=None):
+ """Register with responses a ec2 metadata like service serving 'data'.
If given a dictionary, it will populate urls under base_url for
that dictionary. For example, input of
@@ -272,6 +272,7 @@ def register_mock_metaserver(base_url, data):
base_url/mac with 00:16:3e:00:00:00
In the index, references to lists or dictionaries have a trailing /.
"""
+ responses_mock = responses_mock or responses
def register_helper(register, base_url, body):
if not isinstance(base_url, str):
@@ -304,12 +305,12 @@ def register_mock_metaserver(base_url, data):
url, body = argc
method = responses.PUT if ec2.API_TOKEN_ROUTE in url else responses.GET
status = kwargs.get("status", 200)
- return responses.add(method, url, body, status=status)
+ return responses_mock.add(method, url, body, status=status)
register_helper(myreg, base_url, data)
-class TestEc2(test_helpers.HttprettyTestCase):
+class TestEc2(test_helpers.ResponsesTestCase):
with_logs = True
maxDiff = None
@@ -336,7 +337,8 @@ class TestEc2(test_helpers.HttprettyTestCase):
def _setup_ds(self, sys_cfg, platform_data, md, md_version=None):
self.uris = []
- distro = {}
+ distro = mock.MagicMock()
+ distro.get_tmp_exec_path = self.tmp_dir
paths = helpers.Paths({"run_dir": self.tmp})
if sys_cfg is None:
sys_cfg = {}
@@ -363,37 +365,46 @@ class TestEc2(test_helpers.HttprettyTestCase):
ds.min_metadata_version
] + ds.extended_metadata_versions
token_url = self.data_url("latest", data_item="api/token")
- register_mock_metaserver(token_url, "API-TOKEN")
+ register_mock_metaserver(token_url, "API-TOKEN", self.responses)
for version in all_versions:
metadata_url = self.data_url(version) + "/"
if version == md_version:
# Register all metadata for desired version
register_mock_metaserver(
- metadata_url, md.get("md", DEFAULT_METADATA)
+ metadata_url,
+ md.get("md", DEFAULT_METADATA),
+ self.responses,
)
userdata_url = self.data_url(
version, data_item="user-data"
)
- register_mock_metaserver(userdata_url, md.get("ud", ""))
+ register_mock_metaserver(
+ userdata_url, md.get("ud", ""), self.responses
+ )
identity_url = self.data_url(
version, data_item="dynamic/instance-identity"
)
register_mock_metaserver(
- identity_url, md.get("id", DYNAMIC_METADATA)
+ identity_url,
+ md.get("id", DYNAMIC_METADATA),
+ self.responses,
)
else:
instance_id_url = metadata_url + "instance-id"
if version == ds.min_metadata_version:
# Add min_metadata_version service availability check
register_mock_metaserver(
- instance_id_url, DEFAULT_METADATA["instance-id"]
+ instance_id_url,
+ DEFAULT_METADATA["instance-id"],
+ self.responses,
)
else:
# Register 404s for all unrequested extended versions
- register_mock_metaserver(instance_id_url, None)
+ register_mock_metaserver(
+ instance_id_url, None, self.responses
+ )
return ds
- @responses.activate
def test_network_config_property_returns_version_2_network_data(self):
"""network_config property returns network version 2 for metadata"""
ds = self._setup_ds(
@@ -428,7 +439,6 @@ class TestEc2(test_helpers.HttprettyTestCase):
m_get_mac.return_value = mac1
self.assertEqual(expected, ds.network_config)
- @responses.activate
def test_network_config_property_set_dhcp4(self):
"""network_config property configures dhcp4 on nics with local-ipv4s.
@@ -467,7 +477,6 @@ class TestEc2(test_helpers.HttprettyTestCase):
m_get_mac.return_value = mac1
self.assertEqual(expected, ds.network_config)
- @responses.activate
def test_network_config_property_secondary_private_ips(self):
"""network_config property configures any secondary ipv4 addresses.
@@ -511,7 +520,6 @@ class TestEc2(test_helpers.HttprettyTestCase):
m_get_mac.return_value = mac1
self.assertEqual(expected, ds.network_config)
- @responses.activate
def test_network_config_property_is_cached_in_datasource(self):
"""network_config property is cached in DataSourceEc2."""
ds = self._setup_ds(
@@ -523,7 +531,6 @@ class TestEc2(test_helpers.HttprettyTestCase):
self.assertEqual({"cached": "data"}, ds.network_config)
@mock.patch("cloudinit.net.dhcp.maybe_perform_dhcp_discovery")
- @responses.activate
def test_network_config_cached_property_refreshed_on_upgrade(self, m_dhcp):
"""Refresh the network_config Ec2 cache if network key is absent.
@@ -540,24 +547,26 @@ class TestEc2(test_helpers.HttprettyTestCase):
self.assertTrue(ds.get_data())
# Workaround https://github.com/getsentry/responses/issues/212
- if hasattr(responses.mock, "_urls"):
+ if hasattr(self.responses, "_urls"):
# Can be removed when Bionic is EOL
- for index, url in enumerate(responses.mock._urls):
+ for index, url in enumerate(self.responses._urls):
if url["url"].startswith(
"http://169.254.169.254/2009-04-04/meta-data/"
):
- del responses.mock._urls[index]
- elif hasattr(responses.mock, "_matches"):
+ del self.responses._urls[index]
+ elif hasattr(self.responses, "_matches"):
# Can be removed when Focal is EOL
- for index, response in enumerate(responses.mock._matches):
+ for index, response in enumerate(self.responses._matches):
if response.url.startswith(
"http://169.254.169.254/2009-04-04/meta-data/"
):
- del responses.mock._matches[index]
+ del self.responses._matches[index]
# Provide new revision of metadata that contains network data
register_mock_metaserver(
- "http://169.254.169.254/2009-04-04/meta-data/", DEFAULT_METADATA
+ "http://169.254.169.254/2009-04-04/meta-data/",
+ DEFAULT_METADATA,
+ self.responses,
)
mac1 = "06:17:04:d7:26:09" # Defined in DEFAULT_METADATA
get_interface_mac_path = M_PATH_NET + "get_interfaces_by_mac"
@@ -583,7 +592,6 @@ class TestEc2(test_helpers.HttprettyTestCase):
}
self.assertEqual(expected, ds.network_config)
- @responses.activate
def test_ec2_get_instance_id_refreshes_identity_on_upgrade(self):
"""get_instance-id gets DataSourceEc2Local.identity if not present.
@@ -605,24 +613,26 @@ class TestEc2(test_helpers.HttprettyTestCase):
register_mock_metaserver(
"http://[fd00:ec2::254]/{0}/meta-data/instance-id".format(ver),
None,
+ self.responses,
)
ds.metadata_address = "http://[fd00:ec2::254]"
register_mock_metaserver(
"{0}/{1}/meta-data/".format(ds.metadata_address, all_versions[-1]),
DEFAULT_METADATA,
+ self.responses,
)
# Register dynamic/instance-identity document which we now read.
register_mock_metaserver(
"{0}/{1}/dynamic/".format(ds.metadata_address, all_versions[-1]),
DYNAMIC_METADATA,
+ self.responses,
)
ds._cloud_name = ec2.CloudNames.AWS
# Setup cached metadata on the Datasource
ds.metadata = DEFAULT_METADATA
self.assertEqual("my-identity-id", ds.get_instance_id())
- @responses.activate
def test_classic_instance_true(self):
"""If no vpc-id in metadata, is_classic_instance must return true."""
md_copy = copy.deepcopy(DEFAULT_METADATA)
@@ -639,7 +649,6 @@ class TestEc2(test_helpers.HttprettyTestCase):
self.assertTrue(ds.get_data())
self.assertTrue(ds.is_classic_instance())
- @responses.activate
def test_classic_instance_false(self):
"""If vpc-id in metadata, is_classic_instance must return false."""
ds = self._setup_ds(
@@ -650,7 +659,6 @@ class TestEc2(test_helpers.HttprettyTestCase):
self.assertTrue(ds.get_data())
self.assertFalse(ds.is_classic_instance())
- @responses.activate
def test_aws_inaccessible_imds_service_fails_with_retries(self):
"""Inaccessibility of http://169.254.169.254 are retried."""
ds = self._setup_ds(
@@ -697,7 +705,6 @@ class TestEc2(test_helpers.HttprettyTestCase):
for readurl_call in m_readurl.call_args_list:
self.assertIn("latest/api/token", readurl_call[0][0])
- @responses.activate
def test_aws_token_403_fails_without_retries(self):
"""Verify that 403s fetching AWS tokens are not retried."""
ds = self._setup_ds(
@@ -707,7 +714,7 @@ class TestEc2(test_helpers.HttprettyTestCase):
)
token_url = self.data_url("latest", data_item="api/token")
- responses.add(responses.PUT, token_url, status=403)
+ self.responses.add(responses.PUT, token_url, status=403)
self.assertFalse(ds.get_data())
# Just one /latest/api/token request
logs = self.logs.getvalue()
@@ -719,7 +726,6 @@ class TestEc2(test_helpers.HttprettyTestCase):
for log in expected_logs:
self.assertIn(log, logs)
- @responses.activate
def test_aws_token_redacted(self):
"""Verify that aws tokens are redacted when logged."""
ds = self._setup_ds(
@@ -738,7 +744,6 @@ class TestEc2(test_helpers.HttprettyTestCase):
self.assertEqual(83, len(logs_with_redacted))
self.assertEqual(0, len(logs_with_token))
- @responses.activate
@mock.patch("cloudinit.net.dhcp.maybe_perform_dhcp_discovery")
def test_valid_platform_with_strict_true(self, m_dhcp):
"""Valid platform data should return true with strict_id true."""
@@ -754,7 +759,6 @@ class TestEc2(test_helpers.HttprettyTestCase):
self.assertEqual("ec2", ds.platform_type)
self.assertEqual("metadata (%s)" % ds.metadata_address, ds.subplatform)
- @responses.activate
def test_valid_platform_with_strict_false(self):
"""Valid platform data should return true with strict_id false."""
ds = self._setup_ds(
@@ -765,7 +769,6 @@ class TestEc2(test_helpers.HttprettyTestCase):
ret = ds.get_data()
self.assertTrue(ret)
- @responses.activate
def test_unknown_platform_with_strict_true(self):
"""Unknown platform data with strict_id true should return False."""
uuid = "ab439480-72bf-11d3-91fc-b8aded755F9a"
@@ -777,7 +780,6 @@ class TestEc2(test_helpers.HttprettyTestCase):
ret = ds.get_data()
self.assertFalse(ret)
- @responses.activate
def test_unknown_platform_with_strict_false(self):
"""Unknown platform data with strict_id false should return True."""
uuid = "ab439480-72bf-11d3-91fc-b8aded755F9a"
@@ -789,7 +791,6 @@ class TestEc2(test_helpers.HttprettyTestCase):
ret = ds.get_data()
self.assertTrue(ret)
- @responses.activate
def test_ec2_local_returns_false_on_non_aws(self):
"""DataSourceEc2Local returns False when platform is not AWS."""
self.datasource = ec2.DataSourceEc2Local
@@ -805,19 +806,18 @@ class TestEc2(test_helpers.HttprettyTestCase):
]
for attr_name in platform_attrs:
platform_name = getattr(ec2.CloudNames, attr_name)
- if platform_name != "aws":
+ if platform_name not in ["aws", "outscale"]:
ds._cloud_name = platform_name
ret = ds.get_data()
self.assertEqual("ec2", ds.platform_type)
self.assertFalse(ret)
message = (
- "Local Ec2 mode only supported on ('aws',),"
+ "Local Ec2 mode only supported on ('aws', 'outscale'),"
" not {0}".format(platform_name)
)
self.assertIn(message, self.logs.getvalue())
@mock.patch("cloudinit.sources.DataSourceEc2.util.is_FreeBSD")
- @responses.activate
def test_ec2_local_returns_false_on_bsd(self, m_is_freebsd):
"""DataSourceEc2Local returns False on BSD.
@@ -842,7 +842,6 @@ class TestEc2(test_helpers.HttprettyTestCase):
@mock.patch("cloudinit.net.find_fallback_nic")
@mock.patch("cloudinit.net.ephemeral.maybe_perform_dhcp_discovery")
@mock.patch("cloudinit.sources.DataSourceEc2.util.is_FreeBSD")
- @responses.activate
def test_ec2_local_performs_dhcp_on_non_bsd(
self, m_is_bsd, m_dhcp, m_fallback_nic, m_net4, m_net6
):
@@ -873,7 +872,7 @@ class TestEc2(test_helpers.HttprettyTestCase):
ret = ds.get_data()
self.assertTrue(ret)
- m_dhcp.assert_called_once_with("eth9", None)
+ m_dhcp.assert_called_once_with("eth9", None, mock.ANY)
m_net4.assert_called_once_with(
broadcast="192.168.2.255",
interface="eth9",
@@ -884,7 +883,6 @@ class TestEc2(test_helpers.HttprettyTestCase):
)
self.assertIn("Crawl of metadata service ", self.logs.getvalue())
- @responses.activate
def test_get_instance_tags(self):
ds = self._setup_ds(
platform_data=self.valid_platform_data,
@@ -1176,6 +1174,7 @@ class TesIdentifyPlatform(test_helpers.CiTestCase):
"uuid": "81c7e555-6471-4833-9551-1ab366c4cfd2",
"uuid_source": "dmi",
"vendor": "tothecloud",
+ "product_name": "cloudproduct",
}
unspecial.update(**kwargs)
return unspecial
@@ -1208,5 +1207,33 @@ class TesIdentifyPlatform(test_helpers.CiTestCase):
m_collect.return_value = self.collmock(vendor="e24cloudyday")
self.assertEqual(ec2.CloudNames.UNKNOWN, ec2.identify_platform())
+ # Outscale
+ @mock.patch("cloudinit.sources.DataSourceEc2._collect_platform_data")
+ def test_identify_outscale(self, m_collect):
+ """Should return true if the dmi product data has expected value."""
+ m_collect.return_value = self.collmock(
+ vendor="3DS Outscale".lower(),
+ product_name="3DS Outscale VM".lower(),
+ )
+ self.assertEqual(ec2.CloudNames.OUTSCALE, ec2.identify_platform())
+
+ @mock.patch("cloudinit.sources.DataSourceEc2._collect_platform_data")
+ def test_false_on_wrong_sys_vendor(self, m_collect):
+ """Should return false on empty value returned."""
+ m_collect.return_value = self.collmock(
+ vendor="Not 3DS Outscale".lower(),
+ product_name="3DS Outscale VM".lower(),
+ )
+ self.assertEqual(ec2.CloudNames.UNKNOWN, ec2.identify_platform())
+
+ @mock.patch("cloudinit.sources.DataSourceEc2._collect_platform_data")
+ def test_false_on_wrong_product_name(self, m_collect):
+ """Should return false on an unrelated string."""
+ m_collect.return_value = self.collmock(
+ vendor="3DS Outscale".lower(),
+ product_name="Not 3DS Outscale VM".lower(),
+ )
+ self.assertEqual(ec2.CloudNames.UNKNOWN, ec2.identify_platform())
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/sources/test_exoscale.py b/tests/unittests/sources/test_exoscale.py
index 591256d8..c71889f9 100644
--- a/tests/unittests/sources/test_exoscale.py
+++ b/tests/unittests/sources/test_exoscale.py
@@ -4,8 +4,8 @@
# This file is part of cloud-init. See LICENSE file for license information.
import os
-import httpretty
import requests
+import responses
from cloudinit import helpers, util
from cloudinit.sources.DataSourceExoscale import (
@@ -16,7 +16,7 @@ from cloudinit.sources.DataSourceExoscale import (
get_password,
read_metadata,
)
-from tests.unittests.helpers import HttprettyTestCase, mock
+from tests.unittests.helpers import ResponsesTestCase, mock
TEST_PASSWORD_URL = "{}:{}/{}/".format(
METADATA_URL, PASSWORD_SERVER_PORT, API_VERSION
@@ -27,8 +27,7 @@ TEST_METADATA_URL = "{}/{}/meta-data/".format(METADATA_URL, API_VERSION)
TEST_USERDATA_URL = "{}/{}/user-data".format(METADATA_URL, API_VERSION)
-@httpretty.activate
-class TestDatasourceExoscale(HttprettyTestCase):
+class TestDatasourceExoscale(ResponsesTestCase):
def setUp(self):
super(TestDatasourceExoscale, self).setUp()
self.tmp = self.tmp_dir()
@@ -39,23 +38,23 @@ class TestDatasourceExoscale(HttprettyTestCase):
def test_password_saved(self):
"""The password is not set when it is not found
in the metadata service."""
- httpretty.register_uri(
- httpretty.GET, self.password_url, body="saved_password"
+ self.responses.add(
+ responses.GET, self.password_url, body="saved_password"
)
self.assertFalse(get_password())
def test_password_empty(self):
"""No password is set if the metadata service returns
an empty string."""
- httpretty.register_uri(httpretty.GET, self.password_url, body="")
+ self.responses.add(responses.GET, self.password_url, body="")
self.assertFalse(get_password())
def test_password(self):
"""The password is set to what is found in the metadata
service."""
expected_password = "p@ssw0rd"
- httpretty.register_uri(
- httpretty.GET, self.password_url, body=expected_password
+ self.responses.add(
+ responses.GET, self.password_url, body=expected_password
)
password = get_password()
self.assertEqual(expected_password, password)
@@ -82,24 +81,24 @@ class TestDatasourceExoscale(HttprettyTestCase):
expected_id = "12345"
expected_hostname = "myname"
expected_userdata = "#cloud-config"
- httpretty.register_uri(
- httpretty.GET, self.userdata_url, body=expected_userdata
+ self.responses.add(
+ responses.GET, self.userdata_url, body=expected_userdata
)
- httpretty.register_uri(
- httpretty.GET, self.password_url, body=expected_password
+ self.responses.add(
+ responses.GET, self.password_url, body=expected_password
)
- httpretty.register_uri(
- httpretty.GET,
+ self.responses.add(
+ responses.GET,
self.metadata_url,
body="instance-id\nlocal-hostname",
)
- httpretty.register_uri(
- httpretty.GET,
+ self.responses.add(
+ responses.GET,
"{}local-hostname".format(self.metadata_url),
body=expected_hostname,
)
- httpretty.register_uri(
- httpretty.GET,
+ self.responses.add(
+ responses.GET,
"{}instance-id".format(self.metadata_url),
body=expected_id,
)
@@ -130,24 +129,24 @@ class TestDatasourceExoscale(HttprettyTestCase):
expected_id = "12345"
expected_hostname = "myname"
expected_userdata = "#cloud-config"
- httpretty.register_uri(
- httpretty.GET, self.userdata_url, body=expected_userdata
+ self.responses.add(
+ responses.GET, self.userdata_url, body=expected_userdata
)
- httpretty.register_uri(
- httpretty.GET, self.password_url, body=expected_answer
+ self.responses.add(
+ responses.GET, self.password_url, body=expected_answer
)
- httpretty.register_uri(
- httpretty.GET,
+ self.responses.add(
+ responses.GET,
self.metadata_url,
body="instance-id\nlocal-hostname",
)
- httpretty.register_uri(
- httpretty.GET,
+ self.responses.add(
+ responses.GET,
"{}local-hostname".format(self.metadata_url),
body=expected_hostname,
)
- httpretty.register_uri(
- httpretty.GET,
+ self.responses.add(
+ responses.GET,
"{}instance-id".format(self.metadata_url),
body=expected_id,
)
@@ -169,24 +168,24 @@ class TestDatasourceExoscale(HttprettyTestCase):
expected_id = "12345"
expected_hostname = "myname"
expected_userdata = "#cloud-config"
- httpretty.register_uri(
- httpretty.GET, self.userdata_url, body=expected_userdata
+ self.responses.add(
+ responses.GET, self.userdata_url, body=expected_userdata
)
- httpretty.register_uri(
- httpretty.GET, self.password_url, body=expected_answer
+ self.responses.add(
+ responses.GET, self.password_url, body=expected_answer
)
- httpretty.register_uri(
- httpretty.GET,
+ self.responses.add(
+ responses.GET,
self.metadata_url,
body="instance-id\nlocal-hostname",
)
- httpretty.register_uri(
- httpretty.GET,
+ self.responses.add(
+ responses.GET,
"{}local-hostname".format(self.metadata_url),
body=expected_hostname,
)
- httpretty.register_uri(
- httpretty.GET,
+ self.responses.add(
+ responses.GET,
"{}instance-id".format(self.metadata_url),
body=expected_id,
)
@@ -207,21 +206,21 @@ class TestDatasourceExoscale(HttprettyTestCase):
expected_userdata = "#cloud-config"
m_password.side_effect = requests.Timeout("Fake Connection Timeout")
- httpretty.register_uri(
- httpretty.GET, self.userdata_url, body=expected_userdata
+ self.responses.add(
+ responses.GET, self.userdata_url, body=expected_userdata
)
- httpretty.register_uri(
- httpretty.GET,
+ self.responses.add(
+ responses.GET,
self.metadata_url,
body="instance-id\nlocal-hostname",
)
- httpretty.register_uri(
- httpretty.GET,
+ self.responses.add(
+ responses.GET,
"{}local-hostname".format(self.metadata_url),
body=expected_hostname,
)
- httpretty.register_uri(
- httpretty.GET,
+ self.responses.add(
+ responses.GET,
"{}instance-id".format(self.metadata_url),
body=expected_id,
)
diff --git a/tests/unittests/sources/test_gce.py b/tests/unittests/sources/test_gce.py
index 1ce0c6ec..1a89563c 100644
--- a/tests/unittests/sources/test_gce.py
+++ b/tests/unittests/sources/test_gce.py
@@ -11,12 +11,14 @@ from base64 import b64decode, b64encode
from unittest import mock
from urllib.parse import urlparse
-import httpretty
+import responses
from cloudinit import distros, helpers, settings
from cloudinit.sources import DataSourceGCE
from tests.unittests import helpers as test_helpers
+M_PATH = "cloudinit.sources.DataSourceGCE."
+
GCE_META = {
"instance/id": "123",
"instance/zone": "foo/bar",
@@ -58,32 +60,7 @@ GUEST_ATTRIBUTES_URL = (
)
-def _set_mock_metadata(gce_meta=None):
- if gce_meta is None:
- gce_meta = GCE_META
-
- def _request_callback(method, uri, headers):
- url_path = urlparse(uri).path
- if url_path.startswith("/computeMetadata/v1/"):
- path = url_path.split("/computeMetadata/v1/")[1:][0]
- recursive = path.endswith("/")
- path = path.rstrip("/")
- else:
- path = None
- if path in gce_meta:
- response = gce_meta.get(path)
- if recursive:
- response = json.dumps(response)
- return (200, headers, response)
- else:
- return (404, headers, "")
-
- # reset is needed. https://github.com/gabrielfalcao/HTTPretty/issues/316
- httpretty.register_uri(httpretty.GET, MD_URL_RE, body=_request_callback)
-
-
-@httpretty.activate
-class TestDataSourceGCE(test_helpers.HttprettyTestCase):
+class TestDataSourceGCE(test_helpers.ResponsesTestCase):
def _make_distro(self, dtype, def_user=None):
cfg = dict(settings.CFG_BUILTIN)
cfg["system_info"]["distro"] = dtype
@@ -99,30 +76,63 @@ class TestDataSourceGCE(test_helpers.HttprettyTestCase):
self.ds = DataSourceGCE.DataSourceGCE(
settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": tmp})
)
+
ppatch = self.m_platform_reports_gce = mock.patch(
- "cloudinit.sources.DataSourceGCE.platform_reports_gce"
+ M_PATH + "platform_reports_gce"
)
self.m_platform_reports_gce = ppatch.start()
self.m_platform_reports_gce.return_value = True
self.addCleanup(ppatch.stop)
+
+ pppatch = self.m_is_resolvable_url = mock.patch(
+ M_PATH + "util.is_resolvable_url", return_value=True
+ )
+ self.m_is_resolvable_url = pppatch.start()
+ self.addCleanup(pppatch.stop)
+
self.add_patch("time.sleep", "m_sleep") # just to speed up tests
super(TestDataSourceGCE, self).setUp()
+ def _set_mock_metadata(self, gce_meta=None, *, check_headers=None):
+ if gce_meta is None:
+ gce_meta = GCE_META
+
+ def _request_callback(request):
+ url_path = urlparse(request.url).path
+ if url_path.startswith("/computeMetadata/v1/"):
+ path = url_path.split("/computeMetadata/v1/")[1:][0]
+ recursive = path.endswith("/")
+ path = path.rstrip("/")
+ else:
+ path = None
+ if path in gce_meta:
+ response = gce_meta.get(path)
+ if recursive:
+ response = json.dumps(response)
+ if check_headers is not None:
+ for k in check_headers.keys():
+ self.assertEqual(check_headers[k], request.headers[k])
+ return (200, request.headers, response)
+ else:
+ return (404, request.headers, "")
+
+ self.responses.add_callback(
+ responses.GET,
+ MD_URL_RE,
+ callback=_request_callback,
+ )
+
def test_connection(self):
- _set_mock_metadata()
+ self._set_mock_metadata(check_headers=HEADERS)
success = self.ds.get_data()
self.assertTrue(success)
- req_header = httpretty.last_request().headers
- for header_name, expected_value in HEADERS.items():
- self.assertEqual(expected_value, req_header.get(header_name))
-
def test_metadata(self):
# UnicodeDecodeError if set to ds.userdata instead of userdata_raw
meta = GCE_META.copy()
meta["instance/attributes/user-data"] = b"/bin/echo \xff\n"
- _set_mock_metadata()
+ self._set_mock_metadata()
self.ds.get_data()
shostname = GCE_META.get("instance/hostname").split(".")[0]
@@ -137,9 +147,9 @@ class TestDataSourceGCE(test_helpers.HttprettyTestCase):
self.ds.get_userdata_raw(),
)
- # test partial metadata (missing user-data in particular)
def test_metadata_partial(self):
- _set_mock_metadata(GCE_META_PARTIAL)
+ """test partial metadata (missing user-data in particular)"""
+ self._set_mock_metadata(GCE_META_PARTIAL)
self.ds.get_data()
self.assertEqual(
@@ -151,7 +161,7 @@ class TestDataSourceGCE(test_helpers.HttprettyTestCase):
def test_userdata_no_encoding(self):
"""check that user-data is read."""
- _set_mock_metadata(GCE_USER_DATA_TEXT)
+ self._set_mock_metadata(GCE_USER_DATA_TEXT)
self.ds.get_data()
self.assertEqual(
GCE_USER_DATA_TEXT["instance/attributes"]["user-data"].encode(),
@@ -160,7 +170,7 @@ class TestDataSourceGCE(test_helpers.HttprettyTestCase):
def test_metadata_encoding(self):
"""user-data is base64 encoded if user-data-encoding is 'base64'."""
- _set_mock_metadata(GCE_META_ENCODING)
+ self._set_mock_metadata(GCE_META_ENCODING)
self.ds.get_data()
instance_data = GCE_META_ENCODING.get("instance/attributes")
@@ -175,12 +185,12 @@ class TestDataSourceGCE(test_helpers.HttprettyTestCase):
]:
meta = GCE_META_PARTIAL.copy()
del meta[required_key]
- _set_mock_metadata(meta)
+ self._set_mock_metadata(meta)
self.assertEqual(False, self.ds.get_data())
- httpretty.reset()
+ self.responses.reset()
def test_no_ssh_keys_metadata(self):
- _set_mock_metadata()
+ self._set_mock_metadata()
self.ds.get_data()
self.assertEqual([], self.ds.get_public_ssh_keys())
@@ -215,13 +225,13 @@ class TestDataSourceGCE(test_helpers.HttprettyTestCase):
meta["project/attributes"] = project_attributes
meta["instance/attributes"] = instance_attributes
- _set_mock_metadata(meta)
+ self._set_mock_metadata(meta)
self.ds.get_data()
expected = [valid_key.format(key) for key in range(3)]
self.assertEqual(set(expected), set(self.ds.get_public_ssh_keys()))
- @mock.patch("cloudinit.sources.DataSourceGCE.ug_util")
+ @mock.patch(M_PATH + "ug_util")
def test_default_user_ssh_keys(self, mock_ug_util):
mock_ug_util.normalize_users_groups.return_value = None, None
mock_ug_util.extract_default.return_value = "ubuntu", None
@@ -261,7 +271,7 @@ class TestDataSourceGCE(test_helpers.HttprettyTestCase):
meta["project/attributes"] = project_attributes
meta["instance/attributes"] = instance_attributes
- _set_mock_metadata(meta)
+ self._set_mock_metadata(meta)
ubuntu_ds.get_data()
expected = [valid_key.format(key) for key in range(3)]
@@ -284,7 +294,7 @@ class TestDataSourceGCE(test_helpers.HttprettyTestCase):
meta["project/attributes"] = project_attributes
meta["instance/attributes"] = instance_attributes
- _set_mock_metadata(meta)
+ self._set_mock_metadata(meta)
self.ds.get_data()
expected = [valid_key.format(key) for key in range(2)]
@@ -306,14 +316,14 @@ class TestDataSourceGCE(test_helpers.HttprettyTestCase):
meta["project/attributes"] = project_attributes
meta["instance/attributes"] = instance_attributes
- _set_mock_metadata(meta)
+ self._set_mock_metadata(meta)
self.ds.get_data()
expected = [valid_key.format(0)]
self.assertEqual(set(expected), set(self.ds.get_public_ssh_keys()))
def test_only_last_part_of_zone_used_for_availability_zone(self):
- _set_mock_metadata()
+ self._set_mock_metadata()
r = self.ds.get_data()
self.assertEqual(True, r)
self.assertEqual("bar", self.ds.availability_zone)
@@ -388,26 +398,26 @@ class TestDataSourceGCE(test_helpers.HttprettyTestCase):
m_readurl.assert_has_calls(readurl_expected_calls, any_order=True)
@mock.patch(
- "cloudinit.sources.DataSourceGCE.EphemeralDHCPv4",
+ M_PATH + "EphemeralDHCPv4",
autospec=True,
)
- @mock.patch(
- "cloudinit.sources.DataSourceGCE.DataSourceGCELocal.fallback_interface"
- )
+ @mock.patch(M_PATH + "DataSourceGCELocal.fallback_interface")
def test_local_datasource_uses_ephemeral_dhcp(self, _m_fallback, m_dhcp):
- _set_mock_metadata()
+ self._set_mock_metadata()
+ distro = mock.MagicMock()
+ distro.get_tmp_exec_path = self.tmp_dir
ds = DataSourceGCE.DataSourceGCELocal(
- sys_cfg={}, distro=None, paths=None
+ sys_cfg={}, distro=distro, paths=None
)
ds._get_data()
assert m_dhcp.call_count == 1
@mock.patch(
- "cloudinit.sources.DataSourceGCE.EphemeralDHCPv4",
+ M_PATH + "EphemeralDHCPv4",
autospec=True,
)
def test_datasource_doesnt_use_ephemeral_dhcp(self, m_dhcp):
- _set_mock_metadata()
+ self._set_mock_metadata()
ds = DataSourceGCE.DataSourceGCE(sys_cfg={}, distro=None, paths=None)
ds._get_data()
assert m_dhcp.call_count == 0
diff --git a/tests/unittests/sources/test_hetzner.py b/tests/unittests/sources/test_hetzner.py
index 193b7e42..dfaa472e 100644
--- a/tests/unittests/sources/test_hetzner.py
+++ b/tests/unittests/sources/test_hetzner.py
@@ -66,8 +66,10 @@ class TestDataSourceHetzner(CiTestCase):
self.tmp = self.tmp_dir()
def get_ds(self):
+ distro = mock.MagicMock()
+ distro.get_tmp_exec_path = self.tmp_dir
ds = DataSourceHetzner.DataSourceHetzner(
- settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": self.tmp})
+ settings.CFG_BUILTIN, distro, helpers.Paths({"run_dir": self.tmp})
)
return ds
@@ -112,6 +114,7 @@ class TestDataSourceHetzner(CiTestCase):
connectivity_url_data={
"url": "http://169.254.169.254/hetzner/v1/metadata/instance-id"
},
+ tmp_dir=mock.ANY,
)
self.assertTrue(m_readmd.called)
diff --git a/tests/unittests/sources/test_init.py b/tests/unittests/sources/test_init.py
index 52f6cbfc..a81c33a2 100644
--- a/tests/unittests/sources/test_init.py
+++ b/tests/unittests/sources/test_init.py
@@ -10,8 +10,6 @@ from cloudinit.event import EventScope, EventType
from cloudinit.helpers import Paths
from cloudinit.sources import (
EXPERIMENTAL_TEXT,
- INSTANCE_JSON_FILE,
- INSTANCE_JSON_SENSITIVE_FILE,
METADATA_UNKNOWN,
REDACT_SENSITIVE_VALUE,
UNSET,
@@ -356,16 +354,17 @@ class TestDataSource(CiTestCase):
def test_get_data_does_not_write_instance_data_on_failure(self):
"""get_data does not write INSTANCE_JSON_FILE on get_data False."""
tmp = self.tmp_dir()
+ paths = Paths({"run_dir": tmp})
datasource = DataSourceTestSubclassNet(
self.sys_cfg,
self.distro,
- Paths({"run_dir": tmp}),
+ paths,
get_data_retval=False,
)
self.assertFalse(datasource.get_data())
- json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
+ json_file = paths.get_runpath("instance_data")
self.assertFalse(
- os.path.exists(json_file), "Found unexpected file %s" % json_file
+ os.path.exists(json_file), f"Found unexpected file {json_file}"
)
def test_get_data_writes_json_instance_data_on_success(self):
@@ -395,7 +394,7 @@ class TestDataSource(CiTestCase):
return_value="canonical_cloud_id",
):
datasource.get_data()
- json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
+ json_file = Paths({"run_dir": tmp}).get_runpath("instance_data")
content = util.load_file(json_file)
expected = {
"base64_encoded_keys": [],
@@ -485,7 +484,7 @@ class TestDataSource(CiTestCase):
}
with mock.patch("cloudinit.util.system_info", return_value=sys_info):
datasource.get_data()
- json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
+ json_file = Paths({"run_dir": tmp}).get_runpath("instance_data")
redacted = util.load_json(util.load_file(json_file))
expected = {
"base64_encoded_keys": [],
@@ -584,7 +583,9 @@ class TestDataSource(CiTestCase):
return_value="canonical-cloud-id",
):
datasource.get_data()
- sensitive_json_file = self.tmp_path(INSTANCE_JSON_SENSITIVE_FILE, tmp)
+ sensitive_json_file = Paths({"run_dir": tmp}).get_runpath(
+ "instance_data_sensitive"
+ )
content = util.load_file(sensitive_json_file)
expected = {
"base64_encoded_keys": [],
@@ -649,14 +650,15 @@ class TestDataSource(CiTestCase):
def test_get_data_handles_redacted_unserializable_content(self):
"""get_data warns unserializable content in INSTANCE_JSON_FILE."""
tmp = self.tmp_dir()
+ paths = Paths({"run_dir": tmp})
datasource = DataSourceTestSubclassNet(
self.sys_cfg,
self.distro,
- Paths({"run_dir": tmp}),
+ paths,
custom_metadata={"key1": "val1", "key2": {"key2.1": self.paths}},
)
datasource.get_data()
- json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
+ json_file = paths.get_runpath("instance_data")
content = util.load_file(json_file)
expected_metadata = {
"key1": "val1",
@@ -675,14 +677,15 @@ class TestDataSource(CiTestCase):
tmp = self.tmp_dir()
cloud_dir = os.path.join(tmp, "cloud")
util.ensure_dir(cloud_dir)
+ paths = Paths({"run_dir": tmp, "cloud_dir": cloud_dir})
datasource = DataSourceTestSubclassNet(
self.sys_cfg,
self.distro,
- Paths({"run_dir": tmp, "cloud_dir": cloud_dir}),
+ paths,
)
datasource.ec2_metadata = UNSET
datasource.get_data()
- json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
+ json_file = paths.get_runpath("instance_data")
instance_data = util.load_json(util.load_file(json_file))
self.assertNotIn("ec2_metadata", instance_data["ds"])
datasource.ec2_metadata = {"ec2stuff": "is good"}
@@ -733,13 +736,14 @@ class TestDataSource(CiTestCase):
tmp = self.tmp_dir()
cloud_dir = os.path.join(tmp, "cloud")
util.ensure_dir(cloud_dir)
+ paths = Paths({"run_dir": tmp, "cloud_dir": cloud_dir})
datasource = DataSourceTestSubclassNet(
self.sys_cfg,
self.distro,
- Paths({"run_dir": tmp, "cloud_dir": cloud_dir}),
+ paths,
)
datasource.get_data()
- json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
+ json_file = paths.get_runpath("instance_data")
instance_data = util.load_json(util.load_file(json_file))
self.assertNotIn("network_json", instance_data["ds"])
datasource.network_json = {"network_json": "is good"}
@@ -780,14 +784,15 @@ class TestDataSource(CiTestCase):
def test_get_data_base64encodes_unserializable_bytes(self):
"""On py3, get_data base64encodes any unserializable content."""
tmp = self.tmp_dir()
+ paths = Paths({"run_dir": tmp})
datasource = DataSourceTestSubclassNet(
self.sys_cfg,
self.distro,
- Paths({"run_dir": tmp}),
+ paths,
custom_metadata={"key1": "val1", "key2": {"key2.1": b"\x123"}},
)
self.assertTrue(datasource.get_data())
- json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
+ json_file = paths.get_runpath("instance_data")
content = util.load_file(json_file)
instance_json = util.load_json(content)
self.assertCountEqual(
diff --git a/tests/unittests/sources/test_lxd.py b/tests/unittests/sources/test_lxd.py
index e60bb71f..b02ed177 100644
--- a/tests/unittests/sources/test_lxd.py
+++ b/tests/unittests/sources/test_lxd.py
@@ -1,5 +1,6 @@
# This file is part of cloud-init. See LICENSE file for license information.
+import copy
import json
import re
import stat
@@ -13,6 +14,7 @@ import yaml
from cloudinit.sources import UNSET
from cloudinit.sources import DataSourceLXD as lxd
from cloudinit.sources import InvalidMetaDataException
+from cloudinit.sources.DataSourceLXD import MetaDataKeys
DS_PATH = "cloudinit.sources.DataSourceLXD."
@@ -61,6 +63,29 @@ LXD_V1_METADATA_NO_NETWORK_CONFIG = {
},
}
+DEVICES = {
+ "devices": {
+ "some-disk": {
+ "path": "/path/in/container",
+ "source": "/path/on/host",
+ "type": "disk",
+ },
+ "enp1s0": {
+ "ipv4.address": "10.20.30.40",
+ "name": "eth0",
+ "network": "lxdbr0",
+ "type": "nic",
+ },
+ "root": {"path": "/", "pool": "default", "type": "disk"},
+ "enp1s1": {
+ "ipv4.address": "10.20.30.50",
+ "name": "eth1",
+ "network": "lxdbr0",
+ "type": "nic",
+ },
+ }
+}
+
def lxd_metadata():
return LXD_V1_METADATA
@@ -128,8 +153,10 @@ class TestGenerateFallbackNetworkConfig:
@mock.patch(DS_PATH + "util.system_info")
@mock.patch(DS_PATH + "subp.subp")
@mock.patch(DS_PATH + "subp.which")
+ @mock.patch(DS_PATH + "find_fallback_nic")
def test_net_v2_based_on_network_mode_virt_type_and_uname_machine(
self,
+ m_fallback,
m_which,
m_subp,
m_system_info,
@@ -138,11 +165,12 @@ class TestGenerateFallbackNetworkConfig:
expected,
):
"""Return network config v2 based on uname -m, systemd-detect-virt."""
+ m_fallback.return_value = None
if systemd_detect_virt is None:
m_which.return_value = None
m_system_info.return_value = {"uname": ["", "", "", "", uname_machine]}
m_subp.return_value = (systemd_detect_virt, "")
- assert expected == lxd.generate_fallback_network_config()
+ assert expected == lxd.generate_network_config()
if systemd_detect_virt is None:
assert 0 == m_subp.call_count
assert 0 == m_system_info.call_count
@@ -156,6 +184,122 @@ class TestGenerateFallbackNetworkConfig:
assert 1 == m_system_info.call_count
+class TestNetworkConfig:
+ @pytest.fixture(autouse=True)
+ def mocks(self, mocker):
+ mocker.patch(f"{DS_PATH}subp.subp", return_value=("whatever", ""))
+
+ def test_provided_network_config(self, lxd_ds, mocker):
+ def _get_data(self):
+ self._crawled_metadata = copy.deepcopy(DEVICES)
+ self._crawled_metadata["network-config"] = "hi"
+
+ mocker.patch.object(
+ lxd.DataSourceLXD,
+ "_get_data",
+ autospec=True,
+ side_effect=_get_data,
+ )
+ assert lxd_ds.network_config == "hi"
+
+ @pytest.mark.parametrize(
+ "devices_to_remove,expected_config",
+ [
+ pytest.param(
+ # When two nics are presented with no passed network-config,
+ # Never configure more than one device.
+ # Always choose lowest sorted device over higher
+ # Always configure with DHCP
+ [],
+ {
+ "version": 1,
+ "config": [
+ {
+ "name": "eth0",
+ "subnets": [{"control": "auto", "type": "dhcp"}],
+ "type": "physical",
+ }
+ ],
+ },
+ id="multi-device",
+ ),
+ pytest.param(
+ # When one device is presented, use it
+ ["enp1s0"],
+ {
+ "version": 1,
+ "config": [
+ {
+ "name": "eth0",
+ "subnets": [{"control": "auto", "type": "dhcp"}],
+ "type": "physical",
+ }
+ ],
+ },
+ id="no-eth0",
+ ),
+ pytest.param(
+ # When one device is presented, use it
+ ["enp1s1"],
+ {
+ "version": 1,
+ "config": [
+ {
+ "name": "eth0",
+ "subnets": [{"control": "auto", "type": "dhcp"}],
+ "type": "physical",
+ }
+ ],
+ },
+ id="no-eth1",
+ ),
+ pytest.param(
+ # When no devices are presented, generate fallback
+ ["enp1s0", "enp1s1"],
+ {
+ "version": 1,
+ "config": [
+ {
+ "name": "eth0",
+ "subnets": [{"control": "auto", "type": "dhcp"}],
+ "type": "physical",
+ }
+ ],
+ },
+ id="device-list-empty",
+ ),
+ ],
+ )
+ def test_provided_devices(
+ self, devices_to_remove, expected_config, lxd_ds, mocker
+ ):
+ # TODO: The original point of these tests was to ensure that when
+ # presented nics by the LXD devices endpoint, that we setup the correct
+ # device accordingly. Once LXD provides us MAC addresses for these
+ # devices, we can continue this functionality, but these tests have
+ # been modified to ensure that regardless of the number of devices
+ # present, we generate the proper fallback
+ m_fallback = mocker.patch(
+ "cloudinit.sources.DataSourceLXD.find_fallback_nic",
+ return_value=None,
+ )
+ devices = copy.deepcopy(DEVICES)
+ for name in devices_to_remove:
+ del devices["devices"][name]
+
+ def _get_data(self):
+ self._crawled_metadata = devices
+
+ mocker.patch.object(
+ lxd.DataSourceLXD,
+ "_get_data",
+ autospec=True,
+ side_effect=_get_data,
+ )
+ assert lxd_ds.network_config == expected_config
+ assert m_fallback.call_count == 1
+
+
class TestDataSourceLXD:
def test_platform_info(self, lxd_ds):
assert "LXD" == lxd_ds.dsname
@@ -195,9 +339,7 @@ class TestDataSourceLXD:
"""network_config is correctly computed when _network_config is unset
and _crawled_metadata does not contain network_config.
"""
- lxd.generate_fallback_network_config = mock.Mock(
- return_value=NETWORK_V1
- )
+ lxd.generate_network_config = mock.Mock(return_value=NETWORK_V1)
assert UNSET == lxd_ds_no_network_config._crawled_metadata
assert UNSET == lxd_ds_no_network_config._network_config
assert None is lxd_ds_no_network_config.userdata_raw
@@ -207,7 +349,7 @@ class TestDataSourceLXD:
LXD_V1_METADATA_NO_NETWORK_CONFIG
== lxd_ds_no_network_config._crawled_metadata
)
- assert 1 == lxd.generate_fallback_network_config.call_count
+ assert 1 == lxd.generate_network_config.call_count
class TestIsPlatformViable:
@@ -237,15 +379,16 @@ class TestIsPlatformViable:
class TestReadMetadata:
@pytest.mark.parametrize(
- "url_responses,expected,logs",
+ "get_devices,url_responses,expected,logs",
(
( # Assert non-JSON format from config route
+ False,
{
"http://lxd/1.0/meta-data": "local-hostname: md\n",
"http://lxd/1.0/config": "[NOT_JSON",
},
InvalidMetaDataException(
- "Unable to determine cloud-init config from"
+ "Unable to process LXD config at"
" http://lxd/1.0/config. Expected JSON but found:"
" [NOT_JSON"
),
@@ -255,6 +398,7 @@ class TestReadMetadata:
],
),
( # Assert success on just meta-data
+ False,
{
"http://lxd/1.0/meta-data": "local-hostname: md\n",
"http://lxd/1.0/config": "[]",
@@ -269,7 +413,65 @@ class TestReadMetadata:
"[GET] [HTTP:200] http://lxd/1.0/config",
],
),
+ ( # Assert success on devices
+ True,
+ {
+ "http://lxd/1.0/meta-data": "local-hostname: md\n",
+ "http://lxd/1.0/config": "[]",
+ "http://lxd/1.0/devices": (
+ '{"root": {"path": "/", "pool": "default",'
+ ' "type": "disk"}}'
+ ),
+ },
+ {
+ "_metadata_api_version": lxd.LXD_SOCKET_API_VERSION,
+ "config": {},
+ "meta-data": "local-hostname: md\n",
+ "devices": {
+ "root": {
+ "path": "/",
+ "pool": "default",
+ "type": "disk",
+ }
+ },
+ },
+ [
+ "[GET] [HTTP:200] http://lxd/1.0/meta-data",
+ "[GET] [HTTP:200] http://lxd/1.0/config",
+ ],
+ ),
+ ( # Assert 404 on devices
+ True,
+ {
+ "http://lxd/1.0/meta-data": "local-hostname: md\n",
+ "http://lxd/1.0/config": "[]",
+ },
+ InvalidMetaDataException(
+ "Invalid HTTP response [404] from http://lxd/1.0/devices"
+ ),
+ [
+ "[GET] [HTTP:200] http://lxd/1.0/meta-data",
+ "[GET] [HTTP:200] http://lxd/1.0/config",
+ ],
+ ),
+ ( # Assert non-JSON format from devices
+ True,
+ {
+ "http://lxd/1.0/meta-data": "local-hostname: md\n",
+ "http://lxd/1.0/config": "[]",
+ "http://lxd/1.0/devices": '{"root"',
+ },
+ InvalidMetaDataException(
+ "Unable to process LXD config at"
+ ' http://lxd/1.0/devices. Expected JSON but found: {"root"'
+ ),
+ [
+ "[GET] [HTTP:200] http://lxd/1.0/meta-data",
+ "[GET] [HTTP:200] http://lxd/1.0/config",
+ ],
+ ),
( # Assert 404s for config routes log skipping
+ False,
{
"http://lxd/1.0/meta-data": "local-hostname: md\n",
"http://lxd/1.0/config": (
@@ -308,6 +510,7 @@ class TestReadMetadata:
],
),
( # Assert all CONFIG_KEY_ALIASES promoted to top-level keys
+ False,
{
"http://lxd/1.0/meta-data": "local-hostname: md\n",
"http://lxd/1.0/config": (
@@ -348,7 +551,8 @@ class TestReadMetadata:
"[GET] [HTTP:200] http://lxd/1.0/config/user.vendor-data",
],
),
- ( # Assert cloud-init.* config key values prefered over user.*
+ ( # Assert cloud-init.* config key values preferred over user.*
+ False,
{
"http://lxd/1.0/meta-data": "local-hostname: md\n",
"http://lxd/1.0/config": (
@@ -425,7 +629,7 @@ class TestReadMetadata:
)
@mock.patch.object(lxd.requests.Session, "get")
def test_read_metadata_handles_unexpected_content_or_http_status(
- self, session_get, url_responses, expected, logs, caplog
+ self, m_session_get, get_devices, url_responses, expected, logs, caplog
):
"""read_metadata handles valid and invalid content and status codes."""
@@ -446,16 +650,48 @@ class TestReadMetadata:
type(m_resp).text = mock_text
return m_resp
- session_get.side_effect = fake_get
-
+ m_session_get.side_effect = fake_get
+ metadata_keys = MetaDataKeys.META_DATA | MetaDataKeys.CONFIG
+ if get_devices:
+ metadata_keys |= MetaDataKeys.DEVICES
if isinstance(expected, Exception):
with pytest.raises(type(expected), match=re.escape(str(expected))):
- lxd.read_metadata()
+ lxd.read_metadata(metadata_keys=metadata_keys)
else:
- assert expected == lxd.read_metadata()
- caplogs = caplog.text
+ assert expected == lxd.read_metadata(metadata_keys=metadata_keys)
for log in logs:
- assert log in caplogs
+ assert log in caplog.text
+
+ @pytest.mark.parametrize(
+ "metadata_keys, expected_get_urls",
+ [
+ (MetaDataKeys.NONE, []),
+ (MetaDataKeys.META_DATA, ["http://lxd/1.0/meta-data"]),
+ (MetaDataKeys.CONFIG, ["http://lxd/1.0/config"]),
+ (MetaDataKeys.DEVICES, ["http://lxd/1.0/devices"]),
+ (
+ MetaDataKeys.DEVICES | MetaDataKeys.CONFIG,
+ ["http://lxd/1.0/config", "http://lxd/1.0/devices"],
+ ),
+ (
+ MetaDataKeys.ALL,
+ [
+ "http://lxd/1.0/meta-data",
+ "http://lxd/1.0/config",
+ "http://lxd/1.0/devices",
+ ],
+ ),
+ ],
+ )
+ @mock.patch.object(lxd.requests.Session, "get")
+ def test_read_metadata_keys(
+ self, m_session_get, metadata_keys, expected_get_urls
+ ):
+ lxd.read_metadata(metadata_keys=metadata_keys)
+ assert (
+ list(map(mock.call, expected_get_urls))
+ == m_session_get.call_args_list
+ )
# vi: ts=4 expandtab
diff --git a/tests/unittests/sources/test_nwcs.py b/tests/unittests/sources/test_nwcs.py
new file mode 100644
index 00000000..395f99f8
--- /dev/null
+++ b/tests/unittests/sources/test_nwcs.py
@@ -0,0 +1,116 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+from cloudinit import helpers, settings, util
+from cloudinit.sources import DataSourceNWCS
+from tests.unittests.helpers import CiTestCase, mock
+
+METADATA = util.load_yaml(
+ """
+instance-id: test
+machine_type: b1.centi
+hostname: debian
+network:
+ version: 1
+ config:
+ - type: physical
+ name: eth0
+ mac_address: 96:00:00:08:19:da
+ subnets:
+ - type: dhcp
+public-keys:
+- ssh-rsa \
+ AAAAC3Nzac1lZdI1NTE5AaaAIaFrcac0yVITsmRrmueq6MD0qYNKlEvW8O1Ib4nkhmWh
+userdata: "test"
+vendordata: "test"
+"""
+)
+
+
+class TestDataSourceNWCS(CiTestCase):
+ """
+ Test reading the metadata
+ """
+
+ def setUp(self):
+ super(TestDataSourceNWCS, self).setUp()
+ self.tmp = self.tmp_dir()
+
+ def get_ds(self):
+ distro = mock.MagicMock()
+ distro.get_tmp_exec_path = self.tmp_dir
+ ds = DataSourceNWCS.DataSourceNWCS(
+ settings.CFG_BUILTIN, distro, helpers.Paths({"run_dir": self.tmp})
+ )
+ return ds
+
+ @mock.patch("cloudinit.net.dhcp.maybe_perform_dhcp_discovery")
+ @mock.patch("cloudinit.sources.DataSourceNWCS.EphemeralDHCPv4")
+ @mock.patch("cloudinit.net.find_fallback_nic")
+ @mock.patch("cloudinit.sources.DataSourceNWCS.read_metadata")
+ @mock.patch("cloudinit.sources.DataSourceNWCS.get_nwcs_data")
+ def test_read_data(
+ self,
+ m_get_nwcs_data,
+ m_readmd,
+ m_fallback_nic,
+ m_net,
+ m_dhcp,
+ ):
+ m_get_nwcs_data.return_value = True
+ m_readmd.return_value = METADATA.copy()
+ m_fallback_nic.return_value = "eth0"
+ m_dhcp.return_value = [
+ {
+ "interface": "eth0",
+ "fixed-address": "192.168.0.2",
+ "routers": "192.168.0.1",
+ "subnet-mask": "255.255.255.0",
+ "broadcast-address": "192.168.0.255",
+ }
+ ]
+
+ ds = self.get_ds()
+ ret = ds.get_data()
+ self.assertTrue(ret)
+
+ m_net.assert_called_once_with(
+ iface="eth0",
+ connectivity_url_data={
+ "url": "http://169.254.169.254/api/v1/metadata/instance-id"
+ },
+ )
+
+ self.assertTrue(m_readmd.called)
+
+ self.assertEqual(METADATA.get("hostname"), ds.get_hostname().hostname)
+
+ self.assertEqual(METADATA.get("public-keys"), ds.get_public_ssh_keys())
+
+ self.assertIsInstance(ds.get_public_ssh_keys(), list)
+ self.assertEqual(ds.get_userdata_raw(), METADATA.get("userdata"))
+ self.assertEqual(ds.get_vendordata_raw(), METADATA.get("vendordata"))
+
+ @mock.patch("cloudinit.sources.DataSourceNWCS.read_metadata")
+ @mock.patch("cloudinit.net.find_fallback_nic")
+ @mock.patch("cloudinit.sources.DataSourceNWCS.get_nwcs_data")
+ def test_not_on_nwcs_returns_false(
+ self, m_get_nwcs_data, m_find_fallback, m_read_md
+ ):
+ """If helper 'get_nwcs_data' returns False,
+ return False from get_data."""
+ m_get_nwcs_data.return_value = False
+ ds = self.get_ds()
+ ret = ds.get_data()
+
+ self.assertFalse(ret)
+ # These are a white box attempt to ensure it did not search.
+ m_find_fallback.assert_not_called()
+ m_read_md.assert_not_called()
+
+ @mock.patch("cloudinit.sources.DataSourceNWCS.get_interface_name")
+ def test_get_interface_name(self, m_ifname):
+ m_ifname.return_value = "eth0"
+
+ self.assertEqual(
+ m_ifname.return_value, METADATA["network"]["config"][0]["name"]
+ )
diff --git a/tests/unittests/sources/test_openstack.py b/tests/unittests/sources/test_openstack.py
index f65aab8b..8bcecae7 100644
--- a/tests/unittests/sources/test_openstack.py
+++ b/tests/unittests/sources/test_openstack.py
@@ -10,7 +10,7 @@ import re
from io import StringIO
from urllib.parse import urlparse
-import httpretty as hp
+import responses
from cloudinit import helpers, settings, util
from cloudinit.sources import UNSET, BrokenMetadata
@@ -18,6 +18,7 @@ from cloudinit.sources import DataSourceOpenStack as ds
from cloudinit.sources import convert_vendordata
from cloudinit.sources.helpers import openstack
from tests.unittests import helpers as test_helpers
+from tests.unittests.helpers import mock
BASE_URL = "http://169.254.169.254"
PUBKEY = "ssh-rsa AAAAB3NzaC1....sIkJhq8wdX+4I3A4cYbYP ubuntu@server-460\n"
@@ -76,8 +77,8 @@ MOCK_PATH = "cloudinit.sources.DataSourceOpenStack."
# TODO _register_uris should leverage test_ec2.register_mock_metaserver.
-def _register_uris(version, ec2_files, ec2_meta, os_files):
- """Registers a set of url patterns into httpretty that will mimic the
+def _register_uris(version, ec2_files, ec2_meta, os_files, *, responses_mock):
+ """Registers a set of url patterns into responses that will mimic the
same data returned by the openstack metadata service (and ec2 service)."""
def match_ec2_url(uri, headers):
@@ -118,17 +119,17 @@ def _register_uris(version, ec2_files, ec2_meta, os_files):
return (200, headers, os_files.get(path))
return (404, headers, "")
- def get_request_callback(method, uri, headers):
- uri = urlparse(uri)
+ def get_request_callback(request):
+ uri = urlparse(request.url)
path = uri.path.lstrip("/").split("/")
if path[0] == "openstack":
- return match_os_uri(uri, headers)
- return match_ec2_url(uri, headers)
+ return match_os_uri(uri, request.headers)
+ return match_ec2_url(uri, request.headers)
- hp.register_uri(
- hp.GET,
+ responses_mock.add_callback(
+ responses.GET,
re.compile(r"http://169.254.169.254/.*"),
- body=get_request_callback,
+ callback=get_request_callback,
)
@@ -136,7 +137,7 @@ def _read_metadata_service():
return ds.read_metadata_service(BASE_URL, retries=0, timeout=0.1)
-class TestOpenStackDataSource(test_helpers.HttprettyTestCase):
+class TestOpenStackDataSource(test_helpers.ResponsesTestCase):
with_logs = True
VERSION = "latest"
@@ -146,7 +147,13 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase):
self.tmp = self.tmp_dir()
def test_successful(self):
- _register_uris(self.VERSION, EC2_FILES, EC2_META, OS_FILES)
+ _register_uris(
+ self.VERSION,
+ EC2_FILES,
+ EC2_META,
+ OS_FILES,
+ responses_mock=self.responses,
+ )
f = _read_metadata_service()
self.assertEqual(VENDOR_DATA, f.get("vendordata"))
self.assertEqual(VENDOR_DATA2, f.get("vendordata2"))
@@ -171,7 +178,9 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase):
)
def test_no_ec2(self):
- _register_uris(self.VERSION, {}, {}, OS_FILES)
+ _register_uris(
+ self.VERSION, {}, {}, OS_FILES, responses_mock=self.responses
+ )
f = _read_metadata_service()
self.assertEqual(VENDOR_DATA, f.get("vendordata"))
self.assertEqual(VENDOR_DATA2, f.get("vendordata2"))
@@ -186,7 +195,9 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase):
for k in list(os_files.keys()):
if k.endswith("meta_data.json"):
os_files.pop(k, None)
- _register_uris(self.VERSION, {}, {}, os_files)
+ _register_uris(
+ self.VERSION, {}, {}, os_files, responses_mock=self.responses
+ )
self.assertRaises(openstack.NonReadable, _read_metadata_service)
def test_bad_uuid(self):
@@ -196,7 +207,9 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase):
for k in list(os_files.keys()):
if k.endswith("meta_data.json"):
os_files[k] = json.dumps(os_meta)
- _register_uris(self.VERSION, {}, {}, os_files)
+ _register_uris(
+ self.VERSION, {}, {}, os_files, responses_mock=self.responses
+ )
self.assertRaises(BrokenMetadata, _read_metadata_service)
def test_userdata_empty(self):
@@ -204,7 +217,9 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase):
for k in list(os_files.keys()):
if k.endswith("user_data"):
os_files.pop(k, None)
- _register_uris(self.VERSION, {}, {}, os_files)
+ _register_uris(
+ self.VERSION, {}, {}, os_files, responses_mock=self.responses
+ )
f = _read_metadata_service()
self.assertEqual(VENDOR_DATA, f.get("vendordata"))
self.assertEqual(VENDOR_DATA2, f.get("vendordata2"))
@@ -217,7 +232,9 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase):
for k in list(os_files.keys()):
if k.endswith("vendor_data.json"):
os_files.pop(k, None)
- _register_uris(self.VERSION, {}, {}, os_files)
+ _register_uris(
+ self.VERSION, {}, {}, os_files, responses_mock=self.responses
+ )
f = _read_metadata_service()
self.assertEqual(CONTENT_0, f["files"]["/etc/foo.cfg"])
self.assertEqual(CONTENT_1, f["files"]["/etc/bar/bar.cfg"])
@@ -228,7 +245,9 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase):
for k in list(os_files.keys()):
if k.endswith("vendor_data2.json"):
os_files.pop(k, None)
- _register_uris(self.VERSION, {}, {}, os_files)
+ _register_uris(
+ self.VERSION, {}, {}, os_files, responses_mock=self.responses
+ )
f = _read_metadata_service()
self.assertEqual(CONTENT_0, f["files"]["/etc/foo.cfg"])
self.assertEqual(CONTENT_1, f["files"]["/etc/bar/bar.cfg"])
@@ -239,7 +258,9 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase):
for k in list(os_files.keys()):
if k.endswith("vendor_data.json"):
os_files[k] = "{" # some invalid json
- _register_uris(self.VERSION, {}, {}, os_files)
+ _register_uris(
+ self.VERSION, {}, {}, os_files, responses_mock=self.responses
+ )
self.assertRaises(BrokenMetadata, _read_metadata_service)
def test_vendordata2_invalid(self):
@@ -247,7 +268,9 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase):
for k in list(os_files.keys()):
if k.endswith("vendor_data2.json"):
os_files[k] = "{" # some invalid json
- _register_uris(self.VERSION, {}, {}, os_files)
+ _register_uris(
+ self.VERSION, {}, {}, os_files, responses_mock=self.responses
+ )
self.assertRaises(BrokenMetadata, _read_metadata_service)
def test_metadata_invalid(self):
@@ -255,12 +278,20 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase):
for k in list(os_files.keys()):
if k.endswith("meta_data.json"):
os_files[k] = "{" # some invalid json
- _register_uris(self.VERSION, {}, {}, os_files)
+ _register_uris(
+ self.VERSION, {}, {}, os_files, responses_mock=self.responses
+ )
self.assertRaises(BrokenMetadata, _read_metadata_service)
@test_helpers.mock.patch("cloudinit.net.dhcp.maybe_perform_dhcp_discovery")
def test_datasource(self, m_dhcp):
- _register_uris(self.VERSION, EC2_FILES, EC2_META, OS_FILES)
+ _register_uris(
+ self.VERSION,
+ EC2_FILES,
+ EC2_META,
+ OS_FILES,
+ responses_mock=self.responses,
+ )
ds_os = ds.DataSourceOpenStack(
settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": self.tmp})
)
@@ -283,16 +314,23 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase):
self.assertIsNone(ds_os.vendordata_raw)
m_dhcp.assert_not_called()
- @hp.activate
@test_helpers.mock.patch("cloudinit.net.ephemeral.EphemeralIPv4Network")
@test_helpers.mock.patch(
"cloudinit.net.ephemeral.maybe_perform_dhcp_discovery"
)
def test_local_datasource(self, m_dhcp, m_net):
"""OpenStackLocal calls EphemeralDHCPNetwork and gets instance data."""
- _register_uris(self.VERSION, EC2_FILES, EC2_META, OS_FILES)
+ _register_uris(
+ self.VERSION,
+ EC2_FILES,
+ EC2_META,
+ OS_FILES,
+ responses_mock=self.responses,
+ )
+ distro = mock.MagicMock()
+ distro.get_tmp_exec_path = self.tmp_dir
ds_os_local = ds.DataSourceOpenStackLocal(
- settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": self.tmp})
+ settings.CFG_BUILTIN, distro, helpers.Paths({"run_dir": self.tmp})
)
ds_os_local._fallback_interface = "eth9" # Monkey patch for dhcp
m_dhcp.return_value = [
@@ -322,14 +360,16 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase):
self.assertEqual(VENDOR_DATA, ds_os_local.vendordata_pure)
self.assertEqual(VENDOR_DATA2, ds_os_local.vendordata2_pure)
self.assertIsNone(ds_os_local.vendordata_raw)
- m_dhcp.assert_called_with("eth9", None)
+ m_dhcp.assert_called_with("eth9", None, mock.ANY)
def test_bad_datasource_meta(self):
os_files = copy.deepcopy(OS_FILES)
for k in list(os_files.keys()):
if k.endswith("meta_data.json"):
os_files[k] = "{" # some invalid json
- _register_uris(self.VERSION, {}, {}, os_files)
+ _register_uris(
+ self.VERSION, {}, {}, os_files, responses_mock=self.responses
+ )
ds_os = ds.DataSourceOpenStack(
settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": self.tmp})
)
@@ -351,7 +391,9 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase):
for k in list(os_files.keys()):
if k.endswith("meta_data.json"):
os_files.pop(k)
- _register_uris(self.VERSION, {}, {}, os_files)
+ _register_uris(
+ self.VERSION, {}, {}, os_files, responses_mock=self.responses
+ )
ds_os = ds.DataSourceOpenStack(
settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": self.tmp})
)
@@ -426,7 +468,9 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase):
for k in list(os_files.keys()):
if k.endswith("meta_data.json"):
os_files[k] = json.dumps(os_meta)
- _register_uris(self.VERSION, {}, {}, os_files)
+ _register_uris(
+ self.VERSION, {}, {}, os_files, responses_mock=self.responses
+ )
ds_os = ds.DataSourceOpenStack(
settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": self.tmp})
)
@@ -442,10 +486,15 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase):
self.assertFalse(found)
self.assertIsNone(ds_os.version)
- @hp.activate
def test_wb__crawl_metadata_does_not_persist(self):
"""_crawl_metadata returns current metadata and does not cache."""
- _register_uris(self.VERSION, EC2_FILES, EC2_META, OS_FILES)
+ _register_uris(
+ self.VERSION,
+ EC2_FILES,
+ EC2_META,
+ OS_FILES,
+ responses_mock=self.responses,
+ )
ds_os = ds.DataSourceOpenStack(
settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": self.tmp})
)
@@ -604,6 +653,26 @@ class TestDetectOpenStack(test_helpers.CiTestCase):
)
@test_helpers.mock.patch(MOCK_PATH + "dmi.read_dmi_data")
+ def test_detect_openstack_huaweicloud_chassis_asset_tag(
+ self, m_dmi, m_is_x86
+ ):
+ """Return True on OpenStack reporting Huawei Cloud VM asset-tag."""
+ m_is_x86.return_value = True
+
+ def fake_asset_tag_dmi_read(dmi_key):
+ if dmi_key == "system-product-name":
+ return "c7.large.2" # No match
+ if dmi_key == "chassis-asset-tag":
+ return "HUAWEICLOUD"
+ assert False, "Unexpected dmi read of %s" % dmi_key
+
+ m_dmi.side_effect = fake_asset_tag_dmi_read
+ self.assertTrue(
+ ds.detect_openstack(),
+ "Expected detect_openstack == True on Huawei Cloud VM",
+ )
+
+ @test_helpers.mock.patch(MOCK_PATH + "dmi.read_dmi_data")
def test_detect_openstack_oraclecloud_chassis_asset_tag(
self, m_dmi, m_is_x86
):
@@ -686,7 +755,7 @@ class TestDetectOpenStack(test_helpers.CiTestCase):
m_proc_env.assert_called_with(1)
-class TestMetadataReader(test_helpers.HttprettyTestCase):
+class TestMetadataReader(test_helpers.ResponsesTestCase):
"""Test the MetadataReader."""
burl = "http://169.254.169.254/"
@@ -703,8 +772,11 @@ class TestMetadataReader(test_helpers.HttprettyTestCase):
def register(self, path, body=None, status=200):
content = body if not isinstance(body, str) else body.encode("utf-8")
- hp.register_uri(
- hp.GET, self.burl + "openstack" + path, status=status, body=content
+ self.responses.add(
+ responses.GET,
+ self.burl + "openstack" + path,
+ status=status,
+ body=content,
)
def register_versions(self, versions):
diff --git a/tests/unittests/sources/test_oracle.py b/tests/unittests/sources/test_oracle.py
index 7cd55be0..22aba7e2 100644
--- a/tests/unittests/sources/test_oracle.py
+++ b/tests/unittests/sources/test_oracle.py
@@ -7,6 +7,7 @@ import logging
from unittest import mock
import pytest
+import responses
from cloudinit.sources import DataSourceOracle as oracle
from cloudinit.sources import NetworkConfigSource
@@ -274,58 +275,90 @@ class TestNetworkConfigFromOpcImds:
)
assert 1 == caplog.text.count(" not found; skipping")
- def test_secondary_nic(self, oracle_ds):
+ @pytest.mark.parametrize(
+ "set_primary",
+ [True, False],
+ )
+ def test_imds_nic_setup_v1(self, set_primary, oracle_ds):
oracle_ds._vnics_data = json.loads(OPC_VM_SECONDARY_VNIC_RESPONSE)
oracle_ds._network_config = {
"version": 1,
"config": [{"primary": "nic"}],
}
- mac_addr, nic_name = MAC_ADDR, "ens3"
with mock.patch(
- DS_PATH + ".get_interfaces_by_mac",
- return_value={mac_addr: nic_name},
+ f"{DS_PATH}.get_interfaces_by_mac",
+ return_value={
+ "02:00:17:05:d1:db": "ens3",
+ "00:00:17:02:2b:b1": "ens4",
+ },
):
- oracle_ds._add_network_config_from_opc_imds(set_primary=False)
-
- # The input is mutated
- assert 2 == len(oracle_ds.network_config["config"])
-
- secondary_nic_cfg = oracle_ds.network_config["config"][1]
- assert nic_name == secondary_nic_cfg["name"]
- assert "physical" == secondary_nic_cfg["type"]
- assert mac_addr == secondary_nic_cfg["mac_address"]
- assert 9000 == secondary_nic_cfg["mtu"]
+ oracle_ds._add_network_config_from_opc_imds(
+ set_primary=set_primary
+ )
- assert 1 == len(secondary_nic_cfg["subnets"])
- subnet_cfg = secondary_nic_cfg["subnets"][0]
- # These values are hard-coded in OPC_VM_SECONDARY_VNIC_RESPONSE
- assert "10.0.0.231" == subnet_cfg["address"]
+ secondary_nic_index = 1
+ nic_cfg = oracle_ds.network_config["config"]
+ if set_primary:
+ primary_cfg = nic_cfg[1]
+ secondary_nic_index += 1
+
+ assert "ens3" == primary_cfg["name"]
+ assert "physical" == primary_cfg["type"]
+ assert "02:00:17:05:d1:db" == primary_cfg["mac_address"]
+ assert 9000 == primary_cfg["mtu"]
+ assert 1 == len(primary_cfg["subnets"])
+ assert "address" not in primary_cfg["subnets"][0]
+ assert "dhcp" == primary_cfg["subnets"][0]["type"]
+ secondary_cfg = nic_cfg[secondary_nic_index]
+ assert "ens4" == secondary_cfg["name"]
+ assert "physical" == secondary_cfg["type"]
+ assert "00:00:17:02:2b:b1" == secondary_cfg["mac_address"]
+ assert 9000 == secondary_cfg["mtu"]
+ assert 1 == len(secondary_cfg["subnets"])
+ assert "10.0.0.231/24" == secondary_cfg["subnets"][0]["address"]
+ assert "static" == secondary_cfg["subnets"][0]["type"]
- def test_secondary_nic_v2(self, oracle_ds):
+ @pytest.mark.parametrize(
+ "set_primary",
+ [True, False],
+ )
+ def test_secondary_nic_v2(self, set_primary, oracle_ds):
oracle_ds._vnics_data = json.loads(OPC_VM_SECONDARY_VNIC_RESPONSE)
oracle_ds._network_config = {
"version": 2,
"ethernets": {"primary": {"nic": {}}},
}
- mac_addr, nic_name = MAC_ADDR, "ens3"
with mock.patch(
- DS_PATH + ".get_interfaces_by_mac",
- return_value={mac_addr: nic_name},
+ f"{DS_PATH}.get_interfaces_by_mac",
+ return_value={
+ "02:00:17:05:d1:db": "ens3",
+ "00:00:17:02:2b:b1": "ens4",
+ },
):
- oracle_ds._add_network_config_from_opc_imds(set_primary=False)
+ oracle_ds._add_network_config_from_opc_imds(
+ set_primary=set_primary
+ )
- # The input is mutated
- assert 2 == len(oracle_ds.network_config["ethernets"])
+ nic_cfg = oracle_ds.network_config["ethernets"]
+ if set_primary:
+ assert "ens3" in nic_cfg
+ primary_cfg = nic_cfg["ens3"]
+
+ assert primary_cfg["dhcp4"] is True
+ assert primary_cfg["dhcp6"] is False
+ assert "02:00:17:05:d1:db" == primary_cfg["match"]["macaddress"]
+ assert 9000 == primary_cfg["mtu"]
+ assert "addresses" not in primary_cfg
- secondary_nic_cfg = oracle_ds.network_config["ethernets"]["ens3"]
- assert secondary_nic_cfg["dhcp4"] is False
- assert secondary_nic_cfg["dhcp6"] is False
- assert mac_addr == secondary_nic_cfg["match"]["macaddress"]
- assert 9000 == secondary_nic_cfg["mtu"]
+ assert "ens4" in nic_cfg
+ secondary_cfg = nic_cfg["ens4"]
+ assert secondary_cfg["dhcp4"] is False
+ assert secondary_cfg["dhcp6"] is False
+ assert "00:00:17:02:2b:b1" == secondary_cfg["match"]["macaddress"]
+ assert 9000 == secondary_cfg["mtu"]
- assert 1 == len(secondary_nic_cfg["addresses"])
- # These values are hard-coded in OPC_VM_SECONDARY_VNIC_RESPONSE
- assert "10.0.0.231" == secondary_nic_cfg["addresses"][0]
+ assert 1 == len(secondary_cfg["addresses"])
+ assert "10.0.0.231/24" == secondary_cfg["addresses"][0]
@pytest.mark.parametrize("error_add_network", [None, Exception])
@pytest.mark.parametrize(
@@ -615,41 +648,41 @@ class TestNetworkConfigFiltersNetFailover:
assert expected_cfg == netcfg
-def _mock_v2_urls(httpretty):
- def instance_callback(request, uri, response_headers):
- print(response_headers)
- assert request.headers.get("Authorization") == "Bearer Oracle"
- return [200, response_headers, OPC_V2_METADATA]
+def _mock_v2_urls(mocked_responses):
+ def instance_callback(response):
+ print(response.url)
+ assert response.headers.get("Authorization") == "Bearer Oracle"
+ return [200, response.headers, OPC_V2_METADATA]
- def vnics_callback(request, uri, response_headers):
- assert request.headers.get("Authorization") == "Bearer Oracle"
- return [200, response_headers, OPC_BM_SECONDARY_VNIC_RESPONSE]
+ def vnics_callback(response):
+ assert response.headers.get("Authorization") == "Bearer Oracle"
+ return [200, response.headers, OPC_BM_SECONDARY_VNIC_RESPONSE]
- httpretty.register_uri(
- httpretty.GET,
+ mocked_responses.add_callback(
+ responses.GET,
"http://169.254.169.254/opc/v2/instance/",
- body=instance_callback,
+ callback=instance_callback,
)
- httpretty.register_uri(
- httpretty.GET,
+ mocked_responses.add_callback(
+ responses.GET,
"http://169.254.169.254/opc/v2/vnics/",
- body=vnics_callback,
+ callback=vnics_callback,
)
-def _mock_no_v2_urls(httpretty):
- httpretty.register_uri(
- httpretty.GET,
+def _mock_no_v2_urls(mocked_responses):
+ mocked_responses.add(
+ responses.GET,
"http://169.254.169.254/opc/v2/instance/",
status=404,
)
- httpretty.register_uri(
- httpretty.GET,
+ mocked_responses.add(
+ responses.GET,
"http://169.254.169.254/opc/v1/instance/",
body=OPC_V1_METADATA,
)
- httpretty.register_uri(
- httpretty.GET,
+ mocked_responses.add(
+ responses.GET,
"http://169.254.169.254/opc/v1/vnics/",
body=OPC_BM_SECONDARY_VNIC_RESPONSE,
)
@@ -688,9 +721,9 @@ class TestReadOpcMetadata:
instance_data,
fetch_vnics,
vnics_data,
- httpretty,
+ mocked_responses,
):
- setup_urls(httpretty)
+ setup_urls(mocked_responses)
metadata = oracle.read_opc_metadata(fetch_vnics_data=fetch_vnics)
assert version == metadata.version
@@ -716,23 +749,48 @@ class TestReadOpcMetadata:
v1_failure_count,
expected_body,
expectation,
- httpretty,
+ mocked_responses,
):
- v2_responses = [httpretty.Response("", status=404)] * v2_failure_count
- v2_responses.append(httpretty.Response(OPC_V2_METADATA))
- v1_responses = [httpretty.Response("", status=404)] * v1_failure_count
- v1_responses.append(httpretty.Response(OPC_V1_METADATA))
-
- httpretty.register_uri(
- httpretty.GET,
- "http://169.254.169.254/opc/v1/instance/",
- responses=v1_responses,
+ # Workaround https://github.com/getsentry/responses/pull/171
+ # This mocking can be unrolled when Bionic is EOL
+ url_v2_call_count = 0
+
+ def url_v2_callback(request):
+ nonlocal url_v2_call_count
+ url_v2_call_count += 1
+ if url_v2_call_count <= v2_failure_count:
+ return (
+ 404,
+ request.headers,
+ f"403 Client Error: Forbidden for url: {url_v2}",
+ )
+ return 200, request.headers, OPC_V2_METADATA
+
+ url_v2 = "http://169.254.169.254/opc/v2/instance/"
+ mocked_responses.add_callback(
+ responses.GET, url_v2, callback=url_v2_callback
)
- httpretty.register_uri(
- httpretty.GET,
- "http://169.254.169.254/opc/v2/instance/",
- responses=v2_responses,
+
+ # Workaround https://github.com/getsentry/responses/pull/171
+ # This mocking can be unrolled when Bionic is EOL
+ url_v1_call_count = 0
+
+ def url_v1_callback(request):
+ nonlocal url_v1_call_count
+ url_v1_call_count += 1
+ if url_v1_call_count <= v1_failure_count:
+ return (
+ 404,
+ request.headers,
+ f"403 Client Error: Forbidden for url: {url_v1}",
+ )
+ return 200, request.headers, OPC_V1_METADATA
+
+ url_v1 = "http://169.254.169.254/opc/v1/instance/"
+ mocked_responses.add_callback(
+ responses.GET, url_v1, callback=url_v1_callback
)
+
with expectation:
assert expected_body == oracle.read_opc_metadata().instance_data
@@ -929,6 +987,7 @@ class TestNonIscsiRoot_GetDataBehaviour:
"headers": {"Authorization": "Bearer Oracle"},
"url": "http://169.254.169.254/opc/v2/instance/",
},
+ tmp_dir=oracle_ds.distro.get_tmp_exec_path(),
)
] == m_EphemeralDHCPv4.call_args_list
@@ -971,6 +1030,7 @@ class TestNonIscsiRoot_GetDataBehaviour:
"headers": {"Authorization": "Bearer Oracle"},
"url": "http://169.254.169.254/opc/v2/instance/",
},
+ tmp_dir=oracle_ds.distro.get_tmp_exec_path(),
)
] == m_EphemeralDHCPv4.call_args_list
diff --git a/tests/unittests/sources/test_scaleway.py b/tests/unittests/sources/test_scaleway.py
index 64c785d6..f9b470cb 100644
--- a/tests/unittests/sources/test_scaleway.py
+++ b/tests/unittests/sources/test_scaleway.py
@@ -1,16 +1,17 @@
# This file is part of cloud-init. See LICENSE file for license information.
import json
+from urllib.parse import SplitResult, urlsplit
-import httpretty
import requests
+import responses
from cloudinit import helpers, settings, sources
from cloudinit.sources import DataSourceScaleway
-from tests.unittests.helpers import CiTestCase, HttprettyTestCase, mock
+from tests.unittests.helpers import CiTestCase, ResponsesTestCase, mock
-class DataResponses(object):
+class DataResponses:
"""
Possible responses of the API endpoint
169.254.42.42/user_data/cloud-init and
@@ -20,26 +21,26 @@ class DataResponses(object):
FAKE_USER_DATA = '#!/bin/bash\necho "user-data"'
@staticmethod
- def rate_limited(method, uri, headers):
- return 429, headers, ""
+ def rate_limited(request):
+ return 429, request.headers, ""
@staticmethod
- def api_error(method, uri, headers):
- return 500, headers, ""
+ def api_error(request):
+ return 500, request.headers, ""
@classmethod
- def get_ok(cls, method, uri, headers):
- return 200, headers, cls.FAKE_USER_DATA
+ def get_ok(cls, request):
+ return 200, request.headers, cls.FAKE_USER_DATA
@staticmethod
- def empty(method, uri, headers):
+ def empty(request):
"""
No user data for this server.
"""
- return 404, headers, ""
+ return 404, request.headers, ""
-class MetadataResponses(object):
+class MetadataResponses:
"""
Possible responses of the metadata API.
"""
@@ -63,8 +64,8 @@ class MetadataResponses(object):
}
@classmethod
- def get_ok(cls, method, uri, headers):
- return 200, headers, json.dumps(cls.FAKE_METADATA)
+ def get_ok(cls, response):
+ return 200, response.headers, json.dumps(cls.FAKE_METADATA)
class TestOnScaleway(CiTestCase):
@@ -163,23 +164,38 @@ def get_source_address_adapter(*args, **kwargs):
to bind on ports below 1024.
This function removes the bind on a privileged address, since anyway the
- HTTP call is mocked by httpretty.
+ HTTP call is mocked by responses.
"""
kwargs.pop("source_address")
return requests.adapters.HTTPAdapter(*args, **kwargs)
-class TestDataSourceScaleway(HttprettyTestCase):
+def _fix_mocking_url(url: str) -> str:
+ # Workaround https://github.com/getsentry/responses/pull/166
+ # This function can be removed when Bionic is EOL
+ split_result = urlsplit(url)
+ return SplitResult(
+ scheme=split_result.scheme,
+ netloc=split_result.netloc,
+ path=split_result.path,
+ query="", # ignore
+ fragment=split_result.fragment,
+ ).geturl()
+
+
+class TestDataSourceScaleway(ResponsesTestCase):
def setUp(self):
tmp = self.tmp_dir()
+ distro = mock.MagicMock()
+ distro.get_tmp_exec_path = self.tmp_dir
self.datasource = DataSourceScaleway.DataSourceScaleway(
- settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": tmp})
+ settings.CFG_BUILTIN, distro, helpers.Paths({"run_dir": tmp})
)
super(TestDataSourceScaleway, self).setUp()
- self.metadata_url = DataSourceScaleway.BUILTIN_DS_CONFIG[
- "metadata_url"
- ]
+ self.metadata_url = _fix_mocking_url(
+ DataSourceScaleway.BUILTIN_DS_CONFIG["metadata_url"]
+ )
self.userdata_url = DataSourceScaleway.BUILTIN_DS_CONFIG[
"userdata_url"
]
@@ -212,14 +228,14 @@ class TestDataSourceScaleway(HttprettyTestCase):
m_get_cmdline.return_value = "scaleway"
# Make user data API return a valid response
- httpretty.register_uri(
- httpretty.GET, self.metadata_url, body=MetadataResponses.get_ok
+ self.responses.add_callback(
+ responses.GET, self.metadata_url, callback=MetadataResponses.get_ok
)
- httpretty.register_uri(
- httpretty.GET, self.userdata_url, body=DataResponses.get_ok
+ self.responses.add_callback(
+ responses.GET, self.userdata_url, callback=DataResponses.get_ok
)
- httpretty.register_uri(
- httpretty.GET, self.vendordata_url, body=DataResponses.get_ok
+ self.responses.add_callback(
+ responses.GET, self.vendordata_url, callback=DataResponses.get_ok
)
self.datasource.get_data()
@@ -343,14 +359,15 @@ class TestDataSourceScaleway(HttprettyTestCase):
# Make user and vendor data APIs return HTTP/404, which means there is
# no user / vendor data for the server.
- httpretty.register_uri(
- httpretty.GET, self.metadata_url, body=MetadataResponses.get_ok
+
+ self.responses.add_callback(
+ responses.GET, self.metadata_url, callback=MetadataResponses.get_ok
)
- httpretty.register_uri(
- httpretty.GET, self.userdata_url, body=DataResponses.empty
+ self.responses.add_callback(
+ responses.GET, self.userdata_url, callback=DataResponses.empty
)
- httpretty.register_uri(
- httpretty.GET, self.vendordata_url, body=DataResponses.empty
+ self.responses.add_callback(
+ responses.GET, self.vendordata_url, callback=DataResponses.empty
)
self.datasource.get_data()
self.assertIsNone(self.datasource.get_userdata_raw())
@@ -371,21 +388,26 @@ class TestDataSourceScaleway(HttprettyTestCase):
"""
m_get_cmdline.return_value = "scaleway"
- httpretty.register_uri(
- httpretty.GET, self.metadata_url, body=MetadataResponses.get_ok
+ self.responses.add_callback(
+ responses.GET, self.metadata_url, callback=MetadataResponses.get_ok
)
- httpretty.register_uri(
- httpretty.GET, self.vendordata_url, body=DataResponses.empty
+ self.responses.add_callback(
+ responses.GET, self.vendordata_url, callback=DataResponses.empty
)
- httpretty.register_uri(
- httpretty.GET,
- self.userdata_url,
- responses=[
- httpretty.Response(body=DataResponses.rate_limited),
- httpretty.Response(body=DataResponses.rate_limited),
- httpretty.Response(body=DataResponses.get_ok),
- ],
+ # Workaround https://github.com/getsentry/responses/pull/171
+ # This mocking can be unrolled when Bionic is EOL
+ call_count = 0
+
+ def _callback(request):
+ nonlocal call_count
+ call_count += 1
+ if call_count <= 2:
+ return DataResponses.rate_limited(request)
+ return DataResponses.get_ok(request)
+
+ self.responses.add_callback(
+ responses.GET, self.userdata_url, callback=_callback
)
self.datasource.get_data()
self.assertEqual(
diff --git a/tests/unittests/sources/test_smartos.py b/tests/unittests/sources/test_smartos.py
index 702a67f7..9640d9c7 100644
--- a/tests/unittests/sources/test_smartos.py
+++ b/tests/unittests/sources/test_smartos.py
@@ -350,7 +350,7 @@ SUCCESS_LEN = len("0123abcd SUCCESS ")
NOTFOUND_LEN = len("0123abcd NOTFOUND")
-class PsuedoJoyentClient(object):
+class PsuedoJoyentClient:
def __init__(self, data=None):
if data is None:
data = MOCK_RETURNS.copy()
@@ -736,7 +736,7 @@ class TestIdentifyFile(CiTestCase):
)
-class ShortReader(object):
+class ShortReader:
"""Implements a 'read' interface for bytes provided.
much like io.BytesIO but the 'endbyte' acts as if EOF.
When it is reached a short will be returned."""
diff --git a/tests/unittests/sources/test_upcloud.py b/tests/unittests/sources/test_upcloud.py
index 317cb638..9cbd33d9 100644
--- a/tests/unittests/sources/test_upcloud.py
+++ b/tests/unittests/sources/test_upcloud.py
@@ -207,8 +207,10 @@ class TestUpCloudNetworkSetup(CiTestCase):
self.tmp = self.tmp_dir()
def get_ds(self, get_sysinfo=_mock_dmi):
+ distro = mock.MagicMock()
+ distro.get_tmp_exec_path = self.tmp_dir
ds = DataSourceUpCloudLocal(
- settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": self.tmp})
+ settings.CFG_BUILTIN, distro, helpers.Paths({"run_dir": self.tmp})
)
if get_sysinfo:
ds._get_sysinfo = get_sysinfo
@@ -240,7 +242,7 @@ class TestUpCloudNetworkSetup(CiTestCase):
self.assertTrue(ret)
self.assertTrue(m_dhcp.called)
- m_dhcp.assert_called_with("eth1", None)
+ m_dhcp.assert_called_with("eth1", None, mock.ANY)
m_net.assert_called_once_with(
broadcast="10.6.3.255",
diff --git a/tests/unittests/sources/test_vultr.py b/tests/unittests/sources/test_vultr.py
index 5f2ccd4a..27481e8e 100644
--- a/tests/unittests/sources/test_vultr.py
+++ b/tests/unittests/sources/test_vultr.py
@@ -262,8 +262,10 @@ class TestDataSourceVultr(CiTestCase):
mock_isvultr.return_value = True
mock_netmap.return_value = INTERFACE_MAP
+ distro = mock.MagicMock()
+ distro.get_tmp_exec_path = self.tmp_dir
source = DataSourceVultr.DataSourceVultr(
- settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": self.tmp})
+ settings.CFG_BUILTIN, distro, helpers.Paths({"run_dir": self.tmp})
)
# Test for failure
@@ -323,7 +325,9 @@ class TestDataSourceVultr(CiTestCase):
self.assertEqual(expected, vultr.generate_network_config(interf))
# Override ephemeral for proper unit testing
- def ephemeral_init(self, iface="", connectivity_url_data=None):
+ def ephemeral_init(
+ self, iface="", connectivity_url_data=None, tmp_dir=None
+ ):
global FINAL_INTERFACE_USED
FINAL_INTERFACE_USED = iface
if iface == "eth0":
@@ -331,7 +335,9 @@ class TestDataSourceVultr(CiTestCase):
raise NoDHCPLeaseError("Generic for testing")
# Override ephemeral for proper unit testing
- def ephemeral_init_always(self, iface="", connectivity_url_data=None):
+ def ephemeral_init_always(
+ self, iface="", connectivity_url_data=None, tmp_dir=None
+ ):
global FINAL_INTERFACE_USED
FINAL_INTERFACE_USED = iface
@@ -369,8 +375,10 @@ class TestDataSourceVultr(CiTestCase):
mock_interface_list.return_value = FILTERED_INTERFACES
mock_check_route.return_value = True
+ distro = mock.MagicMock()
+ distro.get_tmp_exec_path = self.tmp_dir
source = DataSourceVultr.DataSourceVultr(
- settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": self.tmp})
+ settings.CFG_BUILTIN, distro, helpers.Paths({"run_dir": self.tmp})
)
try:
diff --git a/tests/unittests/test_apport.py b/tests/unittests/test_apport.py
index a2c866b9..1876c1be 100644
--- a/tests/unittests/test_apport.py
+++ b/tests/unittests/test_apport.py
@@ -1,3 +1,5 @@
+import pytest
+
from tests.unittests.helpers import mock
M_PATH = "cloudinit.apport."
@@ -19,5 +21,40 @@ class TestApport:
report = object()
apport.attach_user_data(report, ui)
assert [
- mock.call(report, user_data_file, "user_data.txt")
+ mock.call(report, user_data_file, "user_data.txt"),
] == m_hookutils.attach_file.call_args_list
+ assert [
+ mock.call(
+ report,
+ "/var/log/installer/autoinstall-user-data",
+ "AutoInstallUserData",
+ ),
+ mock.call(report, "/autoinstall.yaml", "AutoInstallYAML"),
+ mock.call(
+ report,
+ "/etc/cloud/cloud.cfg.d/99-installer.cfg",
+ "InstallerCloudCfg",
+ ),
+ ] == m_hookutils.attach_file_if_exists.call_args_list
+
+ @pytest.mark.parametrize(
+ "report,tags",
+ (
+ ({"Irrelevant": "."}, ""),
+ ({"UdiLog": "."}, "ubuntu-desktop-installer"),
+ ({"CurtinError": ".", "SubiquityLog": "."}, "curtin subiquity"),
+ (
+ {
+ "UdiLog": ".",
+ "JournalErrors": "...Breaking ordering cycle...",
+ },
+ "systemd-ordering ubuntu-desktop-installer",
+ ),
+ ),
+ )
+ def test_add_bug_tags_assigns_proper_tags(self, report, tags):
+ """Tags are assigned based on non-empty project report key values."""
+ from cloudinit import apport
+
+ apport.add_bug_tags(report)
+ assert report.get("Tags", "") == tags
diff --git a/tests/unittests/test_builtin_handlers.py b/tests/unittests/test_builtin_handlers.py
index 4f9eeb65..ba2f96c5 100644
--- a/tests/unittests/test_builtin_handlers.py
+++ b/tests/unittests/test_builtin_handlers.py
@@ -13,6 +13,7 @@ from cloudinit import handlers, helpers, util
from cloudinit.cmd.devel import read_cfg_paths
from cloudinit.handlers.cloud_config import CloudConfigPartHandler
from cloudinit.handlers.jinja_template import (
+ JinjaLoadError,
JinjaTemplatePartHandler,
convert_jinja_instance_data,
render_jinja_payload,
@@ -160,7 +161,7 @@ class TestJinjaTemplatePartHandler(CiTestCase):
"""If instance-data is absent, raise an error from handle_part."""
script_handler = ShellScriptPartHandler(self.paths)
h = JinjaTemplatePartHandler(self.paths, sub_handlers=[script_handler])
- with self.assertRaises(RuntimeError) as context_manager:
+ with self.assertRaises(JinjaLoadError) as context_manager:
h.handle_part(
data="data",
ctype="!" + handlers.CONTENT_START,
@@ -187,7 +188,7 @@ class TestJinjaTemplatePartHandler(CiTestCase):
util.write_file(instance_json, util.json_dumps({}))
h = JinjaTemplatePartHandler(self.paths, sub_handlers=[script_handler])
with mock.patch(self.mpath + "load_file") as m_load:
- with self.assertRaises(RuntimeError) as context_manager:
+ with self.assertRaises(JinjaLoadError) as context_manager:
m_load.side_effect = OSError(errno.EACCES, "Not allowed")
h.handle_part(
data="data",
diff --git a/tests/unittests/test_cli.py b/tests/unittests/test_cli.py
index 04f5f457..dd85a1c7 100644
--- a/tests/unittests/test_cli.py
+++ b/tests/unittests/test_cli.py
@@ -247,9 +247,10 @@ class TestCLI:
[
"**Supported distros:** all",
"**Supported distros:** almalinux, alpine, centos, "
- "cloudlinux, debian, eurolinux, fedora, miraclelinux, "
- "openEuler, openmandriva, opensuse, photon, rhel, rocky, "
- "sles, ubuntu, virtuozzo",
+ "cloudlinux, cos, debian, eurolinux, fedora, freebsd, "
+ "mariner, miraclelinux, "
+ "openbsd, openEuler, openmandriva, "
+ "opensuse, photon, rhel, rocky, sles, ubuntu, virtuozzo",
"**Config schema**:\n **resize_rootfs:** "
"(``true``/``false``/``noblock``)",
"**Examples**::\n\n runcmd:\n - [ ls, -l, / ]\n",
diff --git a/tests/unittests/test_data.py b/tests/unittests/test_data.py
index eda04093..07a9e3fd 100644
--- a/tests/unittests/test_data.py
+++ b/tests/unittests/test_data.py
@@ -10,9 +10,12 @@ from email.mime.application import MIMEApplication
from email.mime.base import MIMEBase
from email.mime.multipart import MIMEMultipart
from io import BytesIO, StringIO
+from pathlib import Path
from unittest import mock
-import httpretty
+import pytest
+import requests
+import responses
from cloudinit import handlers
from cloudinit import helpers as c_helpers
@@ -24,6 +27,8 @@ from cloudinit.settings import PER_INSTANCE
from tests.unittests import helpers
from tests.unittests.util import FakeDataSource
+MPATH = "cloudinit.stages"
+
def count_messages(root):
am = 0
@@ -634,21 +639,21 @@ c: 4
self.assertEqual("quxC", cfg["foo"])
-class TestConsumeUserDataHttp(TestConsumeUserData, helpers.HttprettyTestCase):
+class TestConsumeUserDataHttp(TestConsumeUserData, helpers.ResponsesTestCase):
def setUp(self):
TestConsumeUserData.setUp(self)
- helpers.HttprettyTestCase.setUp(self)
+ helpers.ResponsesTestCase.setUp(self)
def tearDown(self):
TestConsumeUserData.tearDown(self)
- helpers.HttprettyTestCase.tearDown(self)
+ helpers.ResponsesTestCase.tearDown(self)
@mock.patch("cloudinit.url_helper.time.sleep")
def test_include(self, mock_sleep):
"""Test #include."""
included_url = "http://hostname/path"
included_data = "#cloud-config\nincluded: true\n"
- httpretty.register_uri(httpretty.GET, included_url, included_data)
+ self.responses.add(responses.GET, included_url, included_data)
blob = "#include\n%s\n" % included_url
@@ -666,11 +671,11 @@ class TestConsumeUserDataHttp(TestConsumeUserData, helpers.HttprettyTestCase):
"""Test #include with a bad URL."""
bad_url = "http://bad/forbidden"
bad_data = "#cloud-config\nbad: true\n"
- httpretty.register_uri(httpretty.GET, bad_url, bad_data, status=403)
+ self.responses.add(responses.GET, bad_url, bad_data, status=403)
included_url = "http://hostname/path"
included_data = "#cloud-config\nincluded: true\n"
- httpretty.register_uri(httpretty.GET, included_url, included_data)
+ self.responses.add(responses.GET, included_url, included_data)
blob = "#include\n%s\n%s" % (bad_url, included_url)
@@ -692,12 +697,18 @@ class TestConsumeUserDataHttp(TestConsumeUserData, helpers.HttprettyTestCase):
def test_include_bad_url_no_fail(self, mock_sleep):
"""Test #include with a bad URL and failure disabled"""
bad_url = "http://bad/forbidden"
- bad_data = "#cloud-config\nbad: true\n"
- httpretty.register_uri(httpretty.GET, bad_url, bad_data, status=403)
+ self.responses.add(
+ responses.GET,
+ bad_url,
+ body=requests.HTTPError(
+ f"403 Client Error: Forbidden for url: {bad_url}"
+ ),
+ status=403,
+ )
included_url = "http://hostname/path"
included_data = "#cloud-config\nincluded: true\n"
- httpretty.register_uri(httpretty.GET, included_url, included_data)
+ self.responses.add(responses.GET, included_url, included_data)
blob = "#include\n%s\n%s" % (bad_url, included_url)
@@ -766,99 +777,102 @@ class TestConvertString(helpers.TestCase):
self.assertEqual("Just text", msg.get_payload(decode=False))
-class TestFetchBaseConfig(helpers.TestCase):
- def test_only_builtin_gets_builtin(self):
- ret = helpers.wrap_and_call(
- "cloudinit.stages",
- {
- "util.read_conf_with_confd": None,
- "util.read_conf_from_cmdline": None,
- "read_runtime_config": {"return_value": {}},
- },
- stages.fetch_base_config,
- )
- self.assertEqual(util.get_builtin_cfg(), ret)
+class TestFetchBaseConfig:
+ @pytest.fixture(autouse=True)
+ def mocks(self, mocker):
+ mocker.patch(f"{MPATH}.util.read_conf_from_cmdline")
+ mocker.patch(f"{MPATH}.read_runtime_config")
- def test_conf_d_overrides_defaults(self):
+ def test_only_builtin_gets_builtin(self, mocker):
+ mocker.patch(f"{MPATH}.read_runtime_config", return_value={})
+ mocker.patch(f"{MPATH}.util.read_conf_with_confd")
+ config = stages.fetch_base_config()
+ assert util.get_builtin_cfg() == config
+
+ def test_conf_d_overrides_defaults(self, mocker):
builtin = util.get_builtin_cfg()
test_key = sorted(builtin)[0]
test_value = "test"
- ret = helpers.wrap_and_call(
- "cloudinit.stages",
- {
- "util.read_conf_with_confd": {
- "return_value": {test_key: test_value}
- },
- "util.read_conf_from_cmdline": None,
- "read_runtime_config": {"return_value": {}},
- },
- stages.fetch_base_config,
+
+ mocker.patch(
+ f"{MPATH}.util.read_conf_with_confd",
+ return_value={test_key: test_value},
)
- self.assertEqual(ret.get(test_key), test_value)
+ mocker.patch(f"{MPATH}.read_runtime_config", return_value={})
+ config = stages.fetch_base_config()
+ assert config.get(test_key) == test_value
builtin[test_key] = test_value
- self.assertEqual(ret, builtin)
+ assert config == builtin
+
+ def test_confd_with_template(self, mocker, tmp_path: Path):
+ instance_data_path = tmp_path / "test_confd_with_template.json"
+ instance_data_path.write_text('{"template_var": "template_value"}')
+ cfg_path = tmp_path / "test_conf_with_template.cfg"
+ cfg_path.write_text('## template:jinja\n{"key": "{{template_var}}"}')
+
+ mocker.patch("cloudinit.stages.CLOUD_CONFIG", cfg_path)
+ mocker.patch(f"{MPATH}.util.get_builtin_cfg", return_value={})
+ config = stages.fetch_base_config(
+ instance_data_file=instance_data_path
+ )
+ assert config == {"key": "template_value"}
- def test_cmdline_overrides_defaults(self):
+ def test_cmdline_overrides_defaults(self, mocker):
builtin = util.get_builtin_cfg()
test_key = sorted(builtin)[0]
test_value = "test"
cmdline = {test_key: test_value}
- ret = helpers.wrap_and_call(
- "cloudinit.stages",
- {
- "util.read_conf_from_cmdline": {"return_value": cmdline},
- "util.read_conf_with_confd": None,
- "read_runtime_config": None,
- },
- stages.fetch_base_config,
+
+ mocker.patch(f"{MPATH}.util.read_conf_with_confd")
+ mocker.patch(
+ f"{MPATH}.util.read_conf_from_cmdline",
+ return_value=cmdline,
)
- self.assertEqual(ret.get(test_key), test_value)
+ mocker.patch(f"{MPATH}.read_runtime_config")
+ config = stages.fetch_base_config()
+ assert config.get(test_key) == test_value
builtin[test_key] = test_value
- self.assertEqual(ret, builtin)
+ assert config == builtin
- def test_cmdline_overrides_confd_runtime_and_defaults(self):
+ def test_cmdline_overrides_confd_runtime_and_defaults(self, mocker):
builtin = {"key1": "value0", "key3": "other2"}
conf_d = {"key1": "value1", "key2": "other1"}
cmdline = {"key3": "other3", "key2": "other2"}
runtime = {"key3": "runtime3"}
- ret = helpers.wrap_and_call(
- "cloudinit.stages",
- {
- "util.read_conf_with_confd": {"return_value": conf_d},
- "util.get_builtin_cfg": {"return_value": builtin},
- "read_runtime_config": {"return_value": runtime},
- "util.read_conf_from_cmdline": {"return_value": cmdline},
- },
- stages.fetch_base_config,
- )
- self.assertEqual(
- ret, {"key1": "value1", "key2": "other2", "key3": "other3"}
+
+ mocker.patch(f"{MPATH}.util.read_conf_with_confd", return_value=conf_d)
+ mocker.patch(f"{MPATH}.util.get_builtin_cfg", return_value=builtin)
+ mocker.patch(f"{MPATH}.read_runtime_config", return_value=runtime)
+ mocker.patch(
+ f"{MPATH}.util.read_conf_from_cmdline",
+ return_value=cmdline,
)
- def test_order_precedence_is_builtin_system_runtime_cmdline(self):
+ config = stages.fetch_base_config()
+ assert config == {"key1": "value1", "key2": "other2", "key3": "other3"}
+
+ def test_order_precedence_is_builtin_system_runtime_cmdline(self, mocker):
builtin = {"key1": "builtin0", "key3": "builtin3"}
conf_d = {"key1": "confd1", "key2": "confd2", "keyconfd1": "kconfd1"}
runtime = {"key1": "runtime1", "key2": "runtime2"}
cmdline = {"key1": "cmdline1"}
- ret = helpers.wrap_and_call(
- "cloudinit.stages",
- {
- "util.read_conf_with_confd": {"return_value": conf_d},
- "util.get_builtin_cfg": {"return_value": builtin},
- "util.read_conf_from_cmdline": {"return_value": cmdline},
- "read_runtime_config": {"return_value": runtime},
- },
- stages.fetch_base_config,
- )
- self.assertEqual(
- ret,
- {
- "key1": "cmdline1",
- "key2": "runtime2",
- "key3": "builtin3",
- "keyconfd1": "kconfd1",
- },
+
+ mocker.patch(f"{MPATH}.util.read_conf_with_confd", return_value=conf_d)
+ mocker.patch(f"{MPATH}.util.get_builtin_cfg", return_value=builtin)
+ mocker.patch(
+ f"{MPATH}.util.read_conf_from_cmdline",
+ return_value=cmdline,
)
+ mocker.patch(f"{MPATH}.read_runtime_config", return_value=runtime)
+
+ config = stages.fetch_base_config()
+
+ assert config == {
+ "key1": "cmdline1",
+ "key2": "runtime2",
+ "key3": "builtin3",
+ "keyconfd1": "kconfd1",
+ }
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py
index 11048750..f4b9403d 100644
--- a/tests/unittests/test_ds_identify.py
+++ b/tests/unittests/test_ds_identify.py
@@ -557,6 +557,10 @@ class TestDsIdentify(DsIdentifyBase):
"""SAP Converged Cloud identification"""
self._test_ds_found("OpenStack-SAPCCloud")
+ def test_openstack_huawei_cloud(self):
+ """Open Huawei Cloud identification."""
+ self._test_ds_found("OpenStack-HuaweiCloud")
+
def test_openstack_asset_tag_nova(self):
"""OpenStack identification via asset tag OpenStack Nova."""
self._test_ds_found("OpenStack-AssetTag-Nova")
@@ -753,6 +757,10 @@ class TestDsIdentify(DsIdentifyBase):
"""Hetzner cloud is identified in sys_vendor."""
self._test_ds_found("Hetzner")
+ def test_nwcs_found(self):
+ """NWCS is identified in sys_vendor."""
+ self._test_ds_found("NWCS")
+
def test_smartos_bhyve(self):
"""SmartOS cloud identified by SmartDC in dmi."""
self._test_ds_found("SmartOS-bhyve")
@@ -808,6 +816,18 @@ class TestDsIdentify(DsIdentifyBase):
"""EC2: bobrightbox.com in product_serial is not brightbox'"""
self._test_ds_not_found("Ec2-E24Cloud-negative")
+ def test_outscale_is_ec2(self):
+ """EC2: outscale identified by sys_vendor and product_name"""
+ self._test_ds_found("Ec2-Outscale")
+
+ def test_outscale_not_active_sysvendor(self):
+ """EC2: outscale in sys_vendor is not outscale'"""
+ self._test_ds_not_found("Ec2-Outscale-negative-sysvendor")
+
+ def test_outscale_not_active_productname(self):
+ """EC2: outscale in product_name is not outscale'"""
+ self._test_ds_not_found("Ec2-Outscale-negative-productname")
+
def test_vmware_no_valid_transports(self):
"""VMware: no valid transports"""
self._test_ds_not_found("VMware-NoValidTransports")
@@ -1212,6 +1232,12 @@ VALID_CFG = {
"files": {P_CHASSIS_ASSET_TAG: "SAP CCloud VM\n"},
"mocks": [MOCK_VIRT_IS_VMWARE],
},
+ "OpenStack-HuaweiCloud": {
+ # Huawei Cloud hosts use OpenStack
+ "ds": "OpenStack",
+ "files": {P_CHASSIS_ASSET_TAG: "HUAWEICLOUD\n"},
+ "mocks": [MOCK_VIRT_IS_KVM],
+ },
"OpenStack-AssetTag-Nova": {
# VMware vSphere can't modify product-name, LP: #1669875
"ds": "OpenStack",
@@ -1425,6 +1451,21 @@ VALID_CFG = {
"ds": "Hetzner",
"mocks": [{"name": "dmi_decode", "ret": 0, "RET": "Hetzner"}],
},
+ "NWCS": {
+ "ds": "NWCS",
+ "files": {P_SYS_VENDOR: "NWCS\n"},
+ },
+ "NWCS-kenv": {
+ "ds": "NWCS",
+ "mocks": [
+ MOCK_UNAME_IS_FREEBSD,
+ {"name": "get_kenv_field", "ret": 0, "RET": "NWCS"},
+ ],
+ },
+ "NWCS-dmidecode": {
+ "ds": "NWCS",
+ "mocks": [{"name": "dmi_decode", "ret": 0, "RET": "NWCS"}],
+ },
"IBMCloud-metadata": {
"ds": "IBMCloud",
"mocks": [
@@ -1810,6 +1851,27 @@ VALID_CFG = {
MOCK_VIRT_IS_VMWARE,
],
},
+ "Ec2-Outscale": {
+ "ds": "Ec2",
+ "files": {
+ P_PRODUCT_NAME: "3DS Outscale VM\n",
+ P_SYS_VENDOR: "3DS Outscale\n",
+ },
+ },
+ "Ec2-Outscale-negative-sysvendor": {
+ "ds": "Ec2",
+ "files": {
+ P_PRODUCT_NAME: "3DS Outscale VM\n",
+ P_SYS_VENDOR: "Not 3DS Outscale\n",
+ },
+ },
+ "Ec2-Outscale-negative-productname": {
+ "ds": "Ec2",
+ "files": {
+ P_PRODUCT_NAME: "Not 3DS Outscale VM\n",
+ P_SYS_VENDOR: "3DS Outscale\n",
+ },
+ },
}
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_render_cloudcfg.py b/tests/unittests/test_render_cloudcfg.py
index 9f95d448..d3aeb1b7 100644
--- a/tests/unittests/test_render_cloudcfg.py
+++ b/tests/unittests/test_render_cloudcfg.py
@@ -17,6 +17,7 @@ DISTRO_VARIANTS = [
"fedora",
"freebsd",
"gentoo",
+ "mariner",
"netbsd",
"openbsd",
"photon",
@@ -69,6 +70,7 @@ class TestRenderCloudCfg:
"amazon": "ec2-user",
"debian": "ubuntu",
"rhel": "cloud-user",
+ "centos": "cloud-user",
"unknown": "ubuntu",
}
default_user = system_cfg["system_info"]["default_user"]["name"]
diff --git a/tests/unittests/test_temp_utils.py b/tests/unittests/test_temp_utils.py
index bbdb3bd1..102c23fc 100644
--- a/tests/unittests/test_temp_utils.py
+++ b/tests/unittests/test_temp_utils.py
@@ -48,6 +48,7 @@ class TestTempUtils(CiTestCase):
"tempfile.mkdtemp": {"side_effect": fake_mkdtemp},
"_TMPDIR": {"new": None},
"os.path.isdir": True,
+ "util.has_mount_opt": True,
},
mkdtemp,
needs_exe=True,
diff --git a/tests/unittests/test_upgrade.py b/tests/unittests/test_upgrade.py
index ed3c7efb..218915ed 100644
--- a/tests/unittests/test_upgrade.py
+++ b/tests/unittests/test_upgrade.py
@@ -1,7 +1,5 @@
# Copyright (C) 2020 Canonical Ltd.
#
-# Author: Daniel Watkins <oddbloke@ubuntu.com>
-#
# This file is part of cloud-init. See LICENSE file for license information.
"""Upgrade testing for cloud-init.
@@ -19,8 +17,15 @@ import pathlib
import pytest
from cloudinit.sources import pkl_load
+from cloudinit.sources.DataSourceAzure import DataSourceAzure
+from cloudinit.sources.DataSourceNoCloud import DataSourceNoCloud
from tests.unittests.helpers import resourceLocation
+DSNAME_TO_CLASS = {
+ "Azure": DataSourceAzure,
+ "NoCloud": DataSourceNoCloud,
+}
+
class TestUpgrade:
@pytest.fixture(
@@ -36,6 +41,25 @@ class TestUpgrade:
"""
return pkl_load(str(request.param))
+ def test_pkl_load_defines_all_init_side_effect_attributes(
+ self, previous_obj_pkl
+ ):
+ """Any attrs as side-effects of __init__ exist in unpickled obj."""
+ ds_class = DSNAME_TO_CLASS[previous_obj_pkl.dsname]
+ sys_cfg = previous_obj_pkl.sys_cfg
+ distro = previous_obj_pkl.distro
+ paths = previous_obj_pkl.paths
+ ds = ds_class(sys_cfg, distro, paths)
+ if ds.dsname == "NoCloud" and previous_obj_pkl.__dict__:
+ expected = (
+ set({"seed_dirs"}), # LP: #1568150 handled with getattr checks
+ set(),
+ )
+ else:
+ expected = (set(),)
+ missing_attrs = ds.__dict__.keys() - previous_obj_pkl.__dict__.keys()
+ assert missing_attrs in expected
+
def test_networking_set_on_distro(self, previous_obj_pkl):
"""We always expect to have ``.networking`` on ``Distro`` objects."""
assert previous_obj_pkl.distro.networking is not None
diff --git a/tests/unittests/test_url_helper.py b/tests/unittests/test_url_helper.py
index 214e5727..289e67ac 100644
--- a/tests/unittests/test_url_helper.py
+++ b/tests/unittests/test_url_helper.py
@@ -5,7 +5,6 @@ from functools import partial
from threading import Event
from time import process_time
-import httpretty
import pytest
import requests
import responses
@@ -51,7 +50,7 @@ class TestOAuthHeaders(CiTestCase):
def test_oauth_headers_calls_oathlibclient_when_available(self, m_client):
"""oauth_headers calls oaut1.hClient.sign with the provided url."""
- class fakeclient(object):
+ class fakeclient:
def sign(self, url):
# The first and 3rd item of the client.sign tuple are ignored
return ("junk", url, "junk2")
@@ -82,56 +81,61 @@ class TestReadFileOrUrl(CiTestCase):
self.assertEqual(result.contents, data)
self.assertEqual(str(result), data.decode("utf-8"))
- @httpretty.activate
+ @responses.activate
def test_read_file_or_url_str_from_url(self):
"""Test that str(result.contents) on url is text version of contents.
It should not be "b'data'", but just "'data'" """
url = "http://hostname/path"
data = b"This is my url content\n"
- httpretty.register_uri(httpretty.GET, url, data)
+ responses.add(responses.GET, url, data)
result = read_file_or_url(url)
self.assertEqual(result.contents, data)
self.assertEqual(str(result), data.decode("utf-8"))
- @httpretty.activate
+ @responses.activate
def test_read_file_or_url_str_from_url_streamed(self):
"""Test that str(result.contents) on url is text version of contents.
It should not be "b'data'", but just "'data'" """
url = "http://hostname/path"
data = b"This is my url content\n"
- httpretty.register_uri(httpretty.GET, url, data)
+ responses.add(responses.GET, url, data)
result = read_file_or_url(url, stream=True)
assert isinstance(result, UrlResponse)
self.assertEqual(result.contents, data)
self.assertEqual(str(result), data.decode("utf-8"))
- @httpretty.activate
+ @responses.activate
def test_read_file_or_url_str_from_url_redacting_headers_from_logs(self):
"""Headers are redacted from logs but unredacted in requests."""
url = "http://hostname/path"
headers = {"sensitive": "sekret", "server": "blah"}
- httpretty.register_uri(httpretty.GET, url)
- # By default, httpretty will log our request along with the header,
- # so if we don't change this the secret will show up in the logs
- logging.getLogger("httpretty.core").setLevel(logging.CRITICAL)
+
+ def _request_callback(request):
+ for k in headers.keys():
+ self.assertEqual(headers[k], request.headers[k])
+ return (200, request.headers, "does_not_matter")
+
+ responses.add_callback(responses.GET, url, callback=_request_callback)
read_file_or_url(url, headers=headers, headers_redact=["sensitive"])
logs = self.logs.getvalue()
- for k in headers.keys():
- self.assertEqual(headers[k], httpretty.last_request().headers[k])
self.assertIn(REDACTED, logs)
self.assertNotIn("sekret", logs)
- @httpretty.activate
+ @responses.activate
def test_read_file_or_url_str_from_url_redacts_noheaders(self):
"""When no headers_redact, header values are in logs and requests."""
url = "http://hostname/path"
headers = {"sensitive": "sekret", "server": "blah"}
- httpretty.register_uri(httpretty.GET, url)
+
+ def _request_callback(request):
+ for k in headers.keys():
+ self.assertEqual(headers[k], request.headers[k])
+ return (200, request.headers, "does_not_matter")
+
+ responses.add_callback(responses.GET, url, callback=_request_callback)
read_file_or_url(url, headers=headers)
- for k in headers.keys():
- self.assertEqual(headers[k], httpretty.last_request().headers[k])
logs = self.logs.getvalue()
self.assertNotIn(REDACTED, logs)
self.assertIn("sekret", logs)
@@ -514,7 +518,7 @@ class TestUrlHelper:
If this test proves flaky, increase wait time. Since it is async,
increasing wait time for the non-responding endpoint should not
increase total test time, assuming async_delay=0 is used and at least
- one non-waiting endpoint is registered with httpretty.
+ one non-waiting endpoint is registered with responses.
Subsequent tests will continue execution after the first response is
received.
"""
diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py
index 0b297ef1..0c2735ae 100644
--- a/tests/unittests/test_util.py
+++ b/tests/unittests/test_util.py
@@ -25,7 +25,7 @@ from cloudinit.helpers import Paths
from cloudinit.sources import DataSourceHostname
from cloudinit.subp import SubpResult
from tests.unittests import helpers
-from tests.unittests.helpers import CiTestCase
+from tests.unittests.helpers import CiTestCase, skipUnlessJinja
LOG = logging.getLogger(__name__)
M_PATH = "cloudinit.util."
@@ -341,7 +341,34 @@ OS_RELEASE_OPENMANDRIVA = dedent(
"""
)
+OS_RELEASE_COS = dedent(
+ """\
+ NAME="Container-Optimized OS"
+ ID=cos
+ PRETTY_NAME="Container-Optimized OS from Google"
+ HOME_URL="https://cloud.google.com/container-optimized-os/docs"
+ BUG_REPORT_URL="https://cloud.google.com/container-optimized-os/docs/resources/support-policy#contact_us"
+ VERSION=93
+ VERSION_ID=93
+"""
+)
+OS_RELEASE_MARINER = dedent(
+ """\
+ NAME="CBL-Mariner"
+ VERSION="2.0.20221004"
+ ID=mariner
+ VERSION_ID=2.0
+ PRETTY_NAME="CBL-Mariner/Linux"
+ ANSI_COLOR="1;34"
+ HOME_URL="https://aka.ms/cbl-mariner"
+ BUG_REPORT_URL="https://aka.ms/cbl-mariner"
+ SUPPORT_URL="https://aka.ms/cbl-mariner"
+"""
+)
+
+
+@pytest.mark.usefixtures("fake_filesystem")
class TestUtil:
def test_parse_mount_info_no_opts_no_arg(self):
result = util.parse_mount_info("/home", MOUNT_INFO, LOG)
@@ -355,6 +382,22 @@ class TestUtil:
result = util.parse_mount_info("/", MOUNT_INFO, LOG, True)
assert ("/dev/sda1", "btrfs", "/", "ro,relatime") == result
+ @pytest.mark.parametrize(
+ "opt, expected_result",
+ [
+ ("rw", True),
+ ("relatime", True),
+ ("idmapped", True),
+ ("noexec", False),
+ ],
+ )
+ @mock.patch(
+ M_PATH + "get_mount_info",
+ return_value=("/dev/sda", "ext4", "/", "rw,relatime,idmapped"),
+ )
+ def test_has_mount_opt(self, m_get_mount_info, opt, expected_result):
+ assert expected_result == util.has_mount_opt("/", opt)
+
@mock.patch(M_PATH + "get_mount_info")
def test_mount_is_rw(self, m_mount_info):
m_mount_info.return_value = ("/dev/sda1", "btrfs", "/", "rw,relatime")
@@ -367,6 +410,59 @@ class TestUtil:
is_rw = util.mount_is_read_write("/")
assert is_rw is False
+ def test_read_conf(self, mocker):
+ mocker.patch("cloudinit.util.load_file", return_value='{"a": "b"}')
+ assert util.read_conf("any") == {"a": "b"}
+
+ @skipUnlessJinja()
+ def test_read_conf_with_template(self, mocker, caplog):
+ mocker.patch("os.path.exists", return_value=True)
+ mocker.patch(
+ "cloudinit.util.load_file",
+ return_value='## template: jinja\n{"a": "{{c}}"}',
+ )
+ mocker.patch(
+ "cloudinit.handlers.jinja_template.load_file",
+ return_value='{"c": "d"}',
+ )
+
+ conf = util.read_conf("cfg_path", instance_data_file="vars_path")
+ assert conf == {"a": "d"}
+ assert (
+ "Applied instance data in 'vars_path' to configuration loaded "
+ "from 'cfg_path'"
+ ) in caplog.text
+
+ @skipUnlessJinja()
+ def test_read_conf_with_failed_template(self, mocker, caplog):
+ mocker.patch("os.path.exists", return_value=True)
+ mocker.patch(
+ "cloudinit.util.load_file",
+ return_value='## template: jinja\n{"a": "{{c}}"', # missing }
+ )
+ mocker.patch(
+ "cloudinit.handlers.jinja_template.load_file",
+ return_value='{"c": "d"}',
+ )
+ conf = util.read_conf("cfg_path", instance_data_file="vars_path")
+ assert "Failed loading yaml blob" in caplog.text
+ assert conf == {}
+
+ @skipUnlessJinja()
+ def test_read_conf_with_failed_vars(self, mocker, caplog):
+ mocker.patch("os.path.exists", return_value=True)
+ mocker.patch(
+ "cloudinit.util.load_file",
+ return_value='## template: jinja\n{"a": "{{c}}"}',
+ )
+ mocker.patch(
+ "cloudinit.handlers.jinja_template.load_file",
+ return_value='{"c": "d"', # missing }
+ )
+ conf = util.read_conf("cfg_path", instance_data_file="vars_path")
+ assert "Could not apply Jinja template" in caplog.text
+ assert conf == {"a": "{{c}}"}
+
@mock.patch(
M_PATH + "read_conf",
side_effect=(OSError(errno.EACCES, "Not allowed"), {"0": "0"}),
@@ -442,7 +538,9 @@ class TestUtil:
assert not out
assert not err
if create_confd:
- assert [mock.call(confd_fn)] == m_read_confd.call_args_list
+ assert [
+ mock.call(confd_fn, instance_data_file=None)
+ ] == m_read_confd.call_args_list
assert [expected_call] == m_mergemanydict.call_args_list
@pytest.mark.parametrize("custom_cloud_dir", [True, False])
@@ -724,46 +822,60 @@ class TestBlkid(CiTestCase):
)
+@mock.patch("cloudinit.subp.which")
@mock.patch("cloudinit.subp.subp")
class TestUdevadmSettle(CiTestCase):
- def test_with_no_params(self, m_subp):
+ def test_with_no_params(self, m_which, m_subp):
"""called with no parameters."""
+ m_which.side_effect = lambda m: m in ("udevadm",)
util.udevadm_settle()
m_subp.called_once_with(mock.call(["udevadm", "settle"]))
- def test_with_exists_and_not_exists(self, m_subp):
+ def test_udevadm_not_present(self, m_which, m_subp):
+ """where udevadm program does not exist should not invoke subp."""
+ m_which.side_effect = lambda m: m in ("",)
+ util.udevadm_settle()
+ m_subp.called_once_with(["which", "udevadm"])
+
+ def test_with_exists_and_not_exists(self, m_which, m_subp):
"""with exists=file where file does not exist should invoke subp."""
+ m_which.side_effect = lambda m: m in ("udevadm",)
mydev = self.tmp_path("mydev")
util.udevadm_settle(exists=mydev)
m_subp.called_once_with(
["udevadm", "settle", "--exit-if-exists=%s" % mydev]
)
- def test_with_exists_and_file_exists(self, m_subp):
- """with exists=file where file does exist should not invoke subp."""
+ def test_with_exists_and_file_exists(self, m_which, m_subp):
+ """with exists=file where file does exist should only invoke subp
+ once for 'which' call."""
+ m_which.side_effect = lambda m: m in ("udevadm",)
mydev = self.tmp_path("mydev")
util.write_file(mydev, "foo\n")
util.udevadm_settle(exists=mydev)
- self.assertIsNone(m_subp.call_args)
+ m_subp.called_once_with(["which", "udevadm"])
- def test_with_timeout_int(self, m_subp):
+ def test_with_timeout_int(self, m_which, m_subp):
"""timeout can be an integer."""
+ m_which.side_effect = lambda m: m in ("udevadm",)
timeout = 9
util.udevadm_settle(timeout=timeout)
m_subp.called_once_with(
["udevadm", "settle", "--timeout=%s" % timeout]
)
- def test_with_timeout_string(self, m_subp):
+ def test_with_timeout_string(self, m_which, m_subp):
"""timeout can be a string."""
+ m_which.side_effect = lambda m: m in ("udevadm",)
timeout = "555"
util.udevadm_settle(timeout=timeout)
- m_subp.assert_called_once_with(
+ m_subp.called_once_with(
["udevadm", "settle", "--timeout=%s" % timeout]
)
- def test_with_exists_and_timeout(self, m_subp):
+ def test_with_exists_and_timeout(self, m_which, m_subp):
"""test call with both exists and timeout."""
+ m_which.side_effect = lambda m: m in ("udevadm",)
mydev = self.tmp_path("mydev")
timeout = "3"
util.udevadm_settle(exists=mydev)
@@ -776,7 +888,8 @@ class TestUdevadmSettle(CiTestCase):
]
)
- def test_subp_exception_raises_to_caller(self, m_subp):
+ def test_subp_exception_raises_to_caller(self, m_which, m_subp):
+ m_which.side_effect = lambda m: m in ("udevadm",)
m_subp.side_effect = subp.ProcessExecutionError("BOOM")
self.assertRaises(subp.ProcessExecutionError, util.udevadm_settle)
@@ -1055,6 +1168,14 @@ class TestGetLinuxDistro(CiTestCase):
dist = util.get_linux_distro()
self.assertEqual(("photon", "4.0", "VMware Photon OS/Linux"), dist)
+ @mock.patch("cloudinit.util.load_file")
+ def test_get_linux_mariner_os_release(self, m_os_release, m_path_exists):
+ """Verify we get the correct name and machine arch on MarinerOS"""
+ m_os_release.return_value = OS_RELEASE_MARINER
+ m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(("mariner", "2.0", ""), dist)
+
@mock.patch(M_PATH + "load_file")
def test_get_linux_openmandriva(self, m_os_release, m_path_exists):
"""Verify we get the correct name and machine arch on OpenMandriva"""
@@ -1063,6 +1184,14 @@ class TestGetLinuxDistro(CiTestCase):
dist = util.get_linux_distro()
self.assertEqual(("openmandriva", "4.90", "nickel"), dist)
+ @mock.patch(M_PATH + "load_file")
+ def test_get_linux_cos(self, m_os_release, m_path_exists):
+ """Verify we get the correct name and machine arch on COS"""
+ m_os_release.return_value = OS_RELEASE_COS
+ m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(("cos", "93", ""), dist)
+
@mock.patch("platform.system")
@mock.patch("platform.dist", create=True)
def test_get_linux_distro_no_data(
@@ -1113,6 +1242,7 @@ class TestGetVariant:
({"system": "linux", "dist": ("debian",)}, "debian"),
({"system": "linux", "dist": ("eurolinux",)}, "eurolinux"),
({"system": "linux", "dist": ("fedora",)}, "fedora"),
+ ({"system": "linux", "dist": ("mariner",)}, "mariner"),
({"system": "linux", "dist": ("openEuler",)}, "openeuler"),
({"system": "linux", "dist": ("photon",)}, "photon"),
({"system": "linux", "dist": ("rhel",)}, "rhel"),
@@ -1494,7 +1624,7 @@ class TestRedirectOutputPreexecFn:
assert 0 == m_setgid.call_count
-class FakeSelinux(object):
+class FakeSelinux:
def __init__(self, match_what):
self.match_what = match_what
self.restored = []
@@ -2691,6 +2821,20 @@ class TestVersion:
) or util.Version.from_str(v1) == util.Version.from_str(v2)
@pytest.mark.parametrize(
+ ("version"),
+ (
+ ("3.1.0"),
+ ("3.0.1"),
+ ("3.1"),
+ ("3.1.0.0"),
+ ("3.1.1"),
+ ),
+ )
+ def test_to_version_and_back_to_str(self, version):
+ """Verify __str__, __iter__, and Version.from_str()"""
+ assert version == str(util.Version.from_str(version))
+
+ @pytest.mark.parametrize(
("str_ver", "cls_ver"),
(
(
diff --git a/tests/unittests/util.py b/tests/unittests/util.py
index 4635ca3f..e7094ec5 100644
--- a/tests/unittests/util.py
+++ b/tests/unittests/util.py
@@ -23,7 +23,7 @@ def get_cloud(
cls = distros.fetch(distro) if distro else MockDistro
mydist = cls(distro, sys_cfg, paths)
if mocked_distro:
- mydist = mock.Mock(wraps=mydist)
+ mydist = mock.MagicMock(wraps=mydist)
myds = DataSourceTesting(sys_cfg, mydist, paths)
if metadata:
myds.metadata.update(metadata)
@@ -148,6 +148,9 @@ class MockDistro(distros.Distro):
def update_package_sources(self):
return (True, "yay")
+ def do_as(self, command, args=None, **kwargs):
+ return ("stdout", "stderr")
+
TEST_INSTANCE_ID = "i-testing"
diff --git a/tools/.github-cla-signers b/tools/.github-cla-signers
index 271a4710..2826b9d8 100644
--- a/tools/.github-cla-signers
+++ b/tools/.github-cla-signers
@@ -1,5 +1,6 @@
aciba90
ader1990
+adobley
ajmyyra
akutz
AlexBaranowski
@@ -28,6 +29,7 @@ Conan-Kudo
cvstealth
dankenigsberg
david-caro
+dbungert
ddymko
dermotbradley
dhensby
@@ -40,12 +42,16 @@ esposem
GabrielNagy
garzdin
giggsoff
+glyg
hamalq
holmanb
impl
irishgordo
+ITJamie
+ixjhuang
izzyleung
j5awry
+Jehops
jf
Jille
JohnKepplers
@@ -73,6 +79,7 @@ maxnet
megian
michaelrommel
mitechie
+mxwebdev
nazunalika
netcho
nicolasbock
@@ -81,16 +88,20 @@ olivierlemasle
omBratteng
onitake
Oursin
+outscale-mdr
qubidt
RedKrieg
renanrodrigo
rhansen
riedel
+rmhsawyer
rongz609
+s-makin
SadeghHayeri
sarahwzadara
scorpion44
shaardie
+shell-skrimp
shi2wei3
slingamn
slyon
@@ -109,6 +120,7 @@ tnt-dev
tomponline
tsanghan
tSU-RooT
+tylerschultz
vorlonofportland
vteratipally
Vultaire
diff --git a/tools/check_json_format.sh b/tools/check_json_format.sh
new file mode 100755
index 00000000..62f7d6cd
--- /dev/null
+++ b/tools/check_json_format.sh
@@ -0,0 +1,11 @@
+#!/bin/sh
+#
+# Run python's json.tool and check for changes
+#
+# requires python 3.9 for --indent
+#
+file=$1
+before=$(cat "$file") &&
+ python3 -m json.tool --indent 2 "$file" "$file" &&
+ after=$(cat "$file") &&
+ test "$before" = "$after"
diff --git a/tools/ds-identify b/tools/ds-identify
index b4e434c3..0b9f9a8a 100755
--- a/tools/ds-identify
+++ b/tools/ds-identify
@@ -127,7 +127,7 @@ DI_DSNAME=""
DI_DSLIST_DEFAULT="MAAS ConfigDrive NoCloud AltCloud Azure Bigstep \
CloudSigma CloudStack DigitalOcean Vultr AliYun Ec2 GCE OpenNebula OpenStack \
OVF SmartOS Scaleway Hetzner IBMCloud Oracle Exoscale RbxCloud UpCloud VMware \
-LXD"
+LXD NWCS"
DI_DSLIST=""
DI_MODE=""
DI_ON_FOUND=""
@@ -736,7 +736,6 @@ probe_floppy() {
return "${STATE_FLOPPY_PROBED}"
}
-
dscheck_CloudStack() {
is_container && return ${DS_NOT_FOUND}
dmi_product_name_matches "CloudStack*" && return $DS_FOUND
@@ -1135,6 +1134,13 @@ ec2_identify_platform() {
e24cloud) _RET="E24cloud"; return 0;;
esac
+
+ local product_name="${DI_DMI_PRODUCT_NAME}"
+ if [ "${product_name}" = "3DS Outscale VM" ] && \
+ [ "${vendor}" = "3DS Outscale" ]; then
+ _RET="Outscale"; return 0
+ fi
+
# AWS http://docs.aws.amazon.com/AWSEC2/
# latest/UserGuide/identify_ec2_instances.html
local uuid="" hvuuid="${PATH_SYS_HYPERVISOR}/uuid"
@@ -1240,6 +1246,10 @@ dscheck_OpenStack() {
return ${DS_FOUND}
fi
+ if dmi_chassis_asset_tag_matches "HUAWEICLOUD"; then
+ return ${DS_FOUND}
+ fi
+
# LP: #1669875 : allow identification of OpenStack by asset tag
if dmi_chassis_asset_tag_matches "$nova"; then
return ${DS_FOUND}
@@ -1340,6 +1350,11 @@ dscheck_Hetzner() {
return ${DS_NOT_FOUND}
}
+dscheck_NWCS() {
+ dmi_sys_vendor_is NWCS && return ${DS_FOUND}
+ return ${DS_NOT_FOUND}
+}
+
dscheck_Oracle() {
local asset_tag="OracleCloud.com"
dmi_chassis_asset_tag_matches "${asset_tag}" && return ${DS_FOUND}
diff --git a/tools/mock-meta.py b/tools/mock-meta.py
index 4ac1ea4f..0a9992c7 100755
--- a/tools/mock-meta.py
+++ b/tools/mock-meta.py
@@ -194,7 +194,7 @@ class HTTPServerV6(HTTPServer):
address_family = socket.AF_INET6
-class MetaDataHandler(object):
+class MetaDataHandler:
def __init__(self, opts):
self.opts = opts
self.instances = {}
@@ -306,7 +306,7 @@ class MetaDataHandler(object):
return NOT_IMPL_RESPONSE
-class UserDataHandler(object):
+class UserDataHandler:
def __init__(self, opts):
self.opts = opts
diff --git a/tools/read-version b/tools/read-version
index c5cd153f..9eaecb33 100755
--- a/tools/read-version
+++ b/tools/read-version
@@ -39,20 +39,20 @@ def which(program):
def is_gitdir(path):
# Return boolean indicating if path is a git tree.
- git_meta = os.path.join(path, '.git')
+ git_meta = os.path.join(path, ".git")
if os.path.isdir(git_meta):
return True
if os.path.exists(git_meta):
# in a git worktree, .git is a file with 'gitdir: x'
with open(git_meta, "rb") as fp:
- if b'gitdir:' in fp.read():
+ if b"gitdir:" in fp.read():
return True
return False
-use_long = '--long' in sys.argv or os.environ.get('CI_RV_LONG')
-use_tags = '--tags' in sys.argv or os.environ.get('CI_RV_TAGS')
-output_json = '--json' in sys.argv
+use_long = "--long" in sys.argv or os.environ.get("CI_RV_LONG")
+use_tags = "--tags" in sys.argv or os.environ.get("CI_RV_TAGS")
+output_json = "--json" in sys.argv
src_version = ci_version.version_string()
version_long = None
@@ -60,32 +60,48 @@ version_long = None
# If we're performing CI for a new release branch (which our tooling creates
# with an "upstream/" prefix), then we don't want to enforce strict version
# matching because we know it will fail.
-is_release_branch_ci = (
+github_ci_release_br = bool(
+ os.environ.get("GITHUB_HEAD_REF", "").startswith(f"upstream/{src_version}")
+)
+travis_ci_release_br = bool(
os.environ.get("TRAVIS_PULL_REQUEST_BRANCH", "").startswith("upstream/")
)
+is_release_branch_ci = bool(github_ci_release_br or travis_ci_release_br)
+
if is_gitdir(_tdir) and which("git") and not is_release_branch_ci:
- flags = []
- if use_tags:
- flags = ['--tags']
- cmd = ['git', 'describe', '--abbrev=8', '--match=[0-9]*'] + flags
-
- try:
- version = tiny_p(cmd).strip()
- except RuntimeError:
- version = None
-
- if version is None or not version.startswith(src_version):
- sys.stderr.write("git describe version (%s) differs from "
- "cloudinit.version (%s)\n" % (version, src_version))
- sys.stderr.write(
- "Please get the latest upstream tags.\n"
- "As an example, this can be done with the following:\n"
- "$ git remote add upstream https://git.launchpad.net/cloud-init\n"
- "$ git fetch upstream --tags\n"
- )
- sys.exit(1)
-
- version_long = tiny_p(cmd + ["--long"]).strip()
+ # This cmd can be simplified to ["git", "branch", "--show-current"]
+ # after bionic EOL.
+ branch_name = tiny_p(["git", "rev-parse", "--abbrev-ref", "HEAD"]).strip()
+ if branch_name.startswith(f"upstream/{src_version}"):
+ version = src_version
+ version_long = None
+ else:
+
+ flags = []
+ if use_tags:
+ flags = ["--tags"]
+ cmd = ["git", "describe", "--abbrev=8", "--match=[0-9]*"] + flags
+
+ try:
+ version = tiny_p(cmd).strip()
+ except RuntimeError:
+ version = None
+
+ if version is None or not version.startswith(src_version):
+ sys.stderr.write(
+ f"git describe version ({version}) differs from "
+ f"cloudinit.version ({src_version})\n"
+ )
+ sys.stderr.write(
+ "Please get the latest upstream tags.\n"
+ "As an example, this can be done with the following:\n"
+ "$ git remote add upstream https://git.launchpad.net/"
+ "cloud-init\n"
+ "$ git fetch upstream --tags\n"
+ )
+ sys.exit(1)
+
+ version_long = tiny_p(cmd + ["--long"]).strip()
else:
version = src_version
version_long = None
@@ -105,13 +121,13 @@ if version_long:
commit = commit[1:]
data = {
- 'release': release,
- 'version': version,
- 'version_long': version_long,
- 'extra': extra,
- 'commit': commit,
- 'distance': distance,
- 'is_release_branch_ci': is_release_branch_ci,
+ "release": release,
+ "version": version,
+ "version_long": version_long,
+ "extra": extra,
+ "commit": commit,
+ "distance": distance,
+ "is_release_branch_ci": is_release_branch_ci,
}
if output_json:
diff --git a/tools/render-cloudcfg b/tools/render-cloudcfg
index eae83217..c04daead 100755
--- a/tools/render-cloudcfg
+++ b/tools/render-cloudcfg
@@ -21,6 +21,7 @@ def main():
"fedora",
"freebsd",
"gentoo",
+ "mariner",
"miraclelinux",
"netbsd",
"openbsd",
diff --git a/tox.ini b/tox.ini
index 21527ff3..dd7973b7 100644
--- a/tox.ini
+++ b/tox.ini
@@ -34,6 +34,10 @@ types-requests==2.27.8
types-setuptools==57.4.9
typing-extensions==4.1.1
+[files]
+schema = cloudinit/config/schemas/schema-cloud-config-v1.json
+version = cloudinit/config/schemas/versions.schema.cloud-config.json
+
[testenv:flake8]
deps =
flake8=={[format_deps]flake8}
@@ -123,6 +127,8 @@ deps =
commands =
{envpython} -m isort .
{envpython} -m black .
+ {envpython} -m json.tool --indent 2 {[files]schema} {[files]schema}
+ {envpython} -m json.tool --indent 2 {[files]version} {[files]version}
[testenv:do_format_tip]
deps =
@@ -165,17 +171,14 @@ commands = {envpython} -X tracemalloc=40 -Wall -m pytest \
tests/unittests}
-[lowest-supported-deps]
+[testenv:lowest-supported]
# Tox is going to install requirements from pip. This is fine for
# testing python version compatibility, but when we build cloud-init, we are
# building against the dependencies in the OS repo, not pip. The OS
# dependencies will generally be older than what is found in pip.
# To obtain these versions, check the versions of these libraries
-# in the oldest support Ubuntu distro.
-
-# httpretty isn't included here because python2.7 requires a higher version
-# than whats run on bionic, so we need two different definitions.
+# in the oldest support Ubuntu distro. Theses versions are from bionic.
deps =
jinja2==2.10
oauthlib==2.0.6
@@ -193,22 +196,6 @@ deps =
# Needed by pytest and default causes failures
attrs==17.4.0
responses==0.5.1
-
-[testenv:lowest-supported]
-# This definition will run on bionic with the version of httpretty
-# that runs there
-deps =
- {[lowest-supported-deps]deps}
- httpretty==0.8.14
-commands = {[testenv:py3]commands}
-
-[testenv:lowest-supported-dev]
-# The oldest httpretty version to work with Python 3.7+ is 0.9.5,
-# because it is the first to include this commit:
-# https://github.com/gabrielfalcao/HTTPretty/commit/5776d97da3992b9071db5e21faf175f6e8729060
-deps =
- {[lowest-supported-deps]deps}
- httpretty==0.9.5
commands = {[testenv:py3]commands}
[testenv:doc]