summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAlberto Contreras <alberto.contreras@canonical.com>2023-02-24 10:06:27 +0100
committerAlberto Contreras <alberto.contreras@canonical.com>2023-02-24 10:06:27 +0100
commit5298edba0671019f0324db9466a0238342cef3cf (patch)
treeb6ab6f3325b7f2a10c56bb110f6e742c970dfe9d
parent14d6358e9f1d7929e9e49b01ebaf2b0077c9d0ff (diff)
parent242f6bc43a9e8130a3f6e1fe2082ee1dd9211e07 (diff)
downloadcloud-init-git-5298edba0671019f0324db9466a0238342cef3cf.tar.gz
merge from 23.1 at 23.1
-rw-r--r--.github/PULL_REQUEST_TEMPLATE.md2
-rw-r--r--.github/workflows/check_format.yml38
-rw-r--r--.github/workflows/integration.yml84
-rw-r--r--.github/workflows/unit.yml34
-rw-r--r--.travis.yml124
-rw-r--r--CONTRIBUTING.rst172
-rw-r--r--ChangeLog219
-rw-r--r--README.md8
-rw-r--r--SECURITY.md4
-rwxr-xr-xcloudinit/cmd/clean.py14
-rwxr-xr-xcloudinit/cmd/devel/net_convert.py10
-rw-r--r--cloudinit/cmd/status.py20
-rw-r--r--cloudinit/config/cc_ansible.py2
-rw-r--r--cloudinit/config/cc_ca_certs.py165
-rw-r--r--cloudinit/config/cc_disk_setup.py80
-rw-r--r--cloudinit/config/cc_ntp.py12
-rw-r--r--cloudinit/config/cc_puppet.py59
-rw-r--r--cloudinit/config/cc_resizefs.py19
-rw-r--r--cloudinit/config/cc_resolv_conf.py23
-rw-r--r--cloudinit/config/cc_runcmd.py2
-rw-r--r--cloudinit/config/cc_set_hostname.py2
-rw-r--r--cloudinit/config/cc_set_passwords.py71
-rw-r--r--cloudinit/config/cc_ssh.py11
-rw-r--r--cloudinit/config/cc_wireguard.py2
-rw-r--r--cloudinit/config/cc_write_files.py13
-rw-r--r--cloudinit/config/cc_yum_add_repo.py2
-rw-r--r--cloudinit/config/cc_zypper_add_repo.py14
-rw-r--r--cloudinit/config/schema.py220
-rw-r--r--cloudinit/config/schemas/schema-cloud-config-v1.json105
-rw-r--r--cloudinit/distros/OpenCloudOS.py9
-rw-r--r--cloudinit/distros/TencentOS.py9
-rw-r--r--cloudinit/distros/__init__.py44
-rw-r--r--cloudinit/distros/freebsd.py44
-rw-r--r--cloudinit/distros/networking.py1
-rw-r--r--cloudinit/distros/openEuler.py4
-rw-r--r--cloudinit/distros/opensuse-leap.py14
-rw-r--r--cloudinit/distros/opensuse-microos.py14
-rw-r--r--cloudinit/distros/opensuse-tumbleweed.py14
-rw-r--r--cloudinit/distros/opensuse.py77
-rw-r--r--cloudinit/distros/parsers/ifconfig.py26
-rw-r--r--cloudinit/distros/rhel.py1
-rw-r--r--cloudinit/distros/sle-micro.py14
-rw-r--r--cloudinit/distros/sle_hpc.py14
-rw-r--r--cloudinit/dmi.py28
-rw-r--r--cloudinit/features.py21
-rw-r--r--cloudinit/net/__init__.py54
-rw-r--r--cloudinit/net/activators.py2
-rw-r--r--cloudinit/net/bsd.py66
-rw-r--r--cloudinit/net/ephemeral.py6
-rw-r--r--cloudinit/net/freebsd.py23
-rw-r--r--cloudinit/net/netplan.py63
-rw-r--r--cloudinit/net/network_state.py19
-rw-r--r--cloudinit/net/networkd.py51
-rw-r--r--cloudinit/net/sysconfig.py2
-rw-r--r--cloudinit/safeyaml.py46
-rw-r--r--cloudinit/sources/DataSourceAliYun.py5
-rw-r--r--cloudinit/sources/DataSourceAzure.py424
-rw-r--r--cloudinit/sources/DataSourceEc2.py90
-rw-r--r--cloudinit/sources/DataSourceLXD.py28
-rw-r--r--cloudinit/sources/DataSourceNoCloud.py5
-rw-r--r--cloudinit/sources/DataSourceOVF.py554
-rw-r--r--cloudinit/sources/DataSourceOpenStack.py78
-rw-r--r--cloudinit/sources/DataSourceVMware.py196
-rw-r--r--cloudinit/sources/DataSourceVultr.py10
-rw-r--r--cloudinit/sources/__init__.py11
-rw-r--r--cloudinit/sources/azure/__init__.py0
-rw-r--r--cloudinit/sources/azure/imds.py156
-rw-r--r--cloudinit/sources/helpers/azure.py21
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config.py6
-rw-r--r--cloudinit/sources/helpers/vmware/imc/guestcust_util.py470
-rw-r--r--cloudinit/sources/helpers/vultr.py26
-rw-r--r--cloudinit/ssh_util.py35
-rw-r--r--cloudinit/stages.py17
-rw-r--r--cloudinit/url_helper.py8
-rw-r--r--cloudinit/user_data.py1
-rw-r--r--cloudinit/util.py77
-rw-r--r--cloudinit/version.py2
-rw-r--r--config/cloud.cfg.tmpl22
-rw-r--r--doc-requirements.txt2
-rw-r--r--doc/examples/cloud-config-ansible-controller.txt4
-rw-r--r--doc/examples/cloud-config-ansible-pull.txt4
-rw-r--r--doc/examples/cloud-config-ca-certs.txt11
-rw-r--r--doc/examples/cloud-config-datasources.txt7
-rw-r--r--doc/examples/cloud-config-disk-setup.txt48
-rw-r--r--doc/rtd/conf.py25
-rw-r--r--doc/rtd/development/code_review.rst (renamed from doc/rtd/topics/code_review.rst)207
-rw-r--r--doc/rtd/development/contributing.rst (renamed from doc/rtd/topics/contributing.rst)1
-rw-r--r--doc/rtd/development/debugging.rst325
-rw-r--r--doc/rtd/development/dir_layout.rst (renamed from doc/rtd/topics/dir_layout.rst)34
-rw-r--r--doc/rtd/development/docs.rst136
-rw-r--r--doc/rtd/development/index.rst47
-rw-r--r--doc/rtd/development/integration_tests.rst214
-rw-r--r--doc/rtd/development/logging.rst278
-rw-r--r--doc/rtd/development/module_creation.rst (renamed from doc/rtd/topics/module_creation.rst)30
-rw-r--r--doc/rtd/development/testing.rst (renamed from doc/rtd/topics/testing.rst)131
-rw-r--r--doc/rtd/explanation/analyze.rst352
-rw-r--r--doc/rtd/explanation/boot.rst263
-rw-r--r--doc/rtd/explanation/configuration.rst81
-rw-r--r--doc/rtd/explanation/events.rst95
-rw-r--r--doc/rtd/explanation/format.rst224
-rw-r--r--doc/rtd/explanation/index.rst20
-rw-r--r--doc/rtd/explanation/instancedata.rst (renamed from doc/rtd/topics/instancedata.rst)509
-rw-r--r--doc/rtd/explanation/kernel-cmdline.rst69
-rw-r--r--doc/rtd/explanation/security.rst (renamed from doc/rtd/topics/security.rst)4
-rw-r--r--doc/rtd/explanation/vendordata.rst71
-rw-r--r--doc/rtd/googleaf254801a5285c31.html1
-rw-r--r--doc/rtd/howto/bugs.rst115
-rw-r--r--doc/rtd/howto/index.rst23
-rw-r--r--doc/rtd/howto/module_run_frequency.rst40
-rw-r--r--doc/rtd/howto/predeploy_testing.rst141
-rw-r--r--doc/rtd/index.rst145
-rw-r--r--doc/rtd/reference/availability.rst (renamed from doc/rtd/topics/availability.rst)16
-rw-r--r--doc/rtd/reference/base_config_reference.rst (renamed from doc/rtd/topics/base_config_reference.rst)232
-rw-r--r--doc/rtd/reference/cli.rst416
-rw-r--r--doc/rtd/reference/datasources.rst134
-rw-r--r--doc/rtd/reference/datasources/aliyun.rst111
-rw-r--r--doc/rtd/reference/datasources/altcloud.rst89
-rw-r--r--doc/rtd/reference/datasources/azure.rst133
-rw-r--r--doc/rtd/reference/datasources/cloudsigma.rst38
-rw-r--r--doc/rtd/reference/datasources/cloudstack.rst66
-rw-r--r--doc/rtd/reference/datasources/configdrive.rst141
-rw-r--r--doc/rtd/reference/datasources/digitalocean.rst32
-rw-r--r--doc/rtd/reference/datasources/e24cloud.rst10
-rw-r--r--doc/rtd/reference/datasources/ec2.rst153
-rw-r--r--doc/rtd/reference/datasources/exoscale.rst83
-rw-r--r--doc/rtd/reference/datasources/fallback.rst19
-rw-r--r--doc/rtd/reference/datasources/gce.rst52
-rw-r--r--doc/rtd/reference/datasources/lxd.rst114
-rw-r--r--doc/rtd/reference/datasources/maas.rst (renamed from doc/rtd/topics/datasources/maas.rst)0
-rw-r--r--doc/rtd/reference/datasources/nocloud.rst221
-rw-r--r--doc/rtd/reference/datasources/nwcs.rst28
-rw-r--r--doc/rtd/reference/datasources/opennebula.rst (renamed from doc/rtd/topics/datasources/opennebula.rst)77
-rw-r--r--doc/rtd/reference/datasources/openstack.rst128
-rw-r--r--doc/rtd/reference/datasources/oracle.rst53
-rw-r--r--doc/rtd/reference/datasources/ovf.rst12
-rw-r--r--doc/rtd/reference/datasources/rbxcloud.rst23
-rw-r--r--doc/rtd/reference/datasources/smartos.rst181
-rw-r--r--doc/rtd/reference/datasources/upcloud.rst22
-rw-r--r--doc/rtd/reference/datasources/vmware.rst451
-rw-r--r--doc/rtd/reference/datasources/vultr.rst32
-rw-r--r--doc/rtd/reference/datasources/zstack.rst36
-rw-r--r--doc/rtd/reference/examples.rst (renamed from doc/rtd/topics/examples.rst)15
-rw-r--r--doc/rtd/reference/faq.rst303
-rw-r--r--doc/rtd/reference/index.rst21
-rw-r--r--doc/rtd/reference/merging.rst292
-rw-r--r--doc/rtd/reference/modules.rst (renamed from doc/rtd/topics/modules.rst)38
-rw-r--r--doc/rtd/reference/network-config-format-eni.rst (renamed from doc/rtd/topics/network-config-format-eni.rst)15
-rw-r--r--doc/rtd/reference/network-config-format-v1.rst647
-rw-r--r--doc/rtd/reference/network-config-format-v2.rst (renamed from doc/rtd/topics/network-config-format-v2.rst)387
-rw-r--r--doc/rtd/reference/network-config.rst329
-rw-r--r--doc/rtd/topics/analyze.rst316
-rw-r--r--doc/rtd/topics/boot.rst246
-rw-r--r--doc/rtd/topics/bugs.rst108
-rw-r--r--doc/rtd/topics/cli.rst344
-rw-r--r--doc/rtd/topics/configuration.rst79
-rw-r--r--doc/rtd/topics/datasources.rst114
-rw-r--r--doc/rtd/topics/datasources/aliyun.rst89
-rw-r--r--doc/rtd/topics/datasources/altcloud.rst94
-rw-r--r--doc/rtd/topics/datasources/azure.rst125
-rw-r--r--doc/rtd/topics/datasources/cloudsigma.rst42
-rw-r--r--doc/rtd/topics/datasources/cloudstack.rst54
-rw-r--r--doc/rtd/topics/datasources/configdrive.rst133
-rw-r--r--doc/rtd/topics/datasources/digitalocean.rst32
-rw-r--r--doc/rtd/topics/datasources/e24cloud.rst9
-rw-r--r--doc/rtd/topics/datasources/ec2.rst141
-rw-r--r--doc/rtd/topics/datasources/exoscale.rst69
-rw-r--r--doc/rtd/topics/datasources/fallback.rst18
-rw-r--r--doc/rtd/topics/datasources/gce.rst42
-rw-r--r--doc/rtd/topics/datasources/lxd.rst110
-rw-r--r--doc/rtd/topics/datasources/nocloud.rst161
-rw-r--r--doc/rtd/topics/datasources/nwcs.rst30
-rw-r--r--doc/rtd/topics/datasources/openstack.rst93
-rw-r--r--doc/rtd/topics/datasources/oracle.rst49
-rw-r--r--doc/rtd/topics/datasources/ovf.rst46
-rw-r--r--doc/rtd/topics/datasources/rbxcloud.rst23
-rw-r--r--doc/rtd/topics/datasources/smartos.rst170
-rw-r--r--doc/rtd/topics/datasources/upcloud.rst24
-rw-r--r--doc/rtd/topics/datasources/vmware.rst358
-rw-r--r--doc/rtd/topics/datasources/vultr.rst35
-rw-r--r--doc/rtd/topics/datasources/zstack.rst37
-rw-r--r--doc/rtd/topics/debugging.rst265
-rw-r--r--doc/rtd/topics/docs.rst76
-rw-r--r--doc/rtd/topics/events.rst95
-rw-r--r--doc/rtd/topics/faq.rst430
-rw-r--r--doc/rtd/topics/format.rst214
-rw-r--r--doc/rtd/topics/integration_tests.rst208
-rw-r--r--doc/rtd/topics/kernel-cmdline.rst71
-rw-r--r--doc/rtd/topics/logging.rst267
-rw-r--r--doc/rtd/topics/merging.rst288
-rw-r--r--doc/rtd/topics/network-config-format-v1.rst625
-rw-r--r--doc/rtd/topics/network-config.rst326
-rw-r--r--doc/rtd/topics/tutorial.rst141
-rw-r--r--doc/rtd/topics/vendordata.rst73
-rw-r--r--doc/rtd/tutorial/index.rst37
-rw-r--r--doc/rtd/tutorial/lxd.rst175
-rw-r--r--doc/rtd/tutorial/qemu-debugging.rst41
-rwxr-xr-xdoc/rtd/tutorial/qemu-script.sh47
-rw-r--r--doc/rtd/tutorial/qemu.rst292
-rw-r--r--integration-requirements.txt2
-rw-r--r--packages/README.md17
-rwxr-xr-xpackages/bddeb6
-rw-r--r--packages/pkg-deps.json7
-rw-r--r--pyproject.toml2
-rw-r--r--setup.py8
-rw-r--r--systemd/cloud-init-generator.tmpl2
-rw-r--r--systemd/cloud-init.service.tmpl5
-rwxr-xr-xsysvinit/freebsd/cloudconfig4
-rwxr-xr-xsysvinit/freebsd/cloudfinal4
-rwxr-xr-xsysvinit/freebsd/cloudinit2
-rwxr-xr-xsysvinit/freebsd/cloudinitlocal4
-rw-r--r--templates/chrony.conf.opensuse-leap.tmpl38
-rw-r--r--templates/chrony.conf.opensuse-microos.tmpl38
-rw-r--r--templates/chrony.conf.opensuse-tumbleweed.tmpl38
-rw-r--r--templates/chrony.conf.sle-micro.tmpl38
-rw-r--r--templates/chrony.conf.sle_hpc.tmpl38
-rw-r--r--tests/data/netinfo/freebsd-duplicate-macs-ifconfig-output13
-rw-r--r--tests/data/netinfo/freebsd-ifconfig-output2
-rw-r--r--tests/data/vmware/cust-dhcp-2nic-instance-id.cfg37
-rw-r--r--tests/integration_tests/clouds.py12
-rw-r--r--tests/integration_tests/cmd/test_schema.py27
-rw-r--r--tests/integration_tests/conftest.py2
-rw-r--r--tests/integration_tests/datasources/test_lxd_hotplug.py18
-rw-r--r--tests/integration_tests/integration_settings.py1
-rw-r--r--tests/integration_tests/modules/test_ansible.py11
-rw-r--r--tests/integration_tests/modules/test_ca_certs.py6
-rw-r--r--tests/integration_tests/modules/test_cli.py2
-rw-r--r--tests/integration_tests/modules/test_combined.py40
-rw-r--r--tests/integration_tests/modules/test_lxd.py5
-rw-r--r--tests/integration_tests/modules/test_puppet.py7
-rw-r--r--tests/integration_tests/modules/test_set_password.py9
-rw-r--r--tests/integration_tests/modules/test_ssh_keys_provided.py13
-rw-r--r--tests/integration_tests/modules/test_write_files.py29
-rw-r--r--tests/integration_tests/util.py10
-rw-r--r--tests/unittests/cmd/test_clean.py24
-rw-r--r--tests/unittests/cmd/test_cloud_id.py11
-rw-r--r--tests/unittests/cmd/test_status.py34
-rw-r--r--tests/unittests/config/test_apt_source_v3.py3
-rw-r--r--tests/unittests/config/test_cc_ansible.py2
-rw-r--r--tests/unittests/config/test_cc_ca_certs.py251
-rw-r--r--tests/unittests/config/test_cc_disk_setup.py45
-rw-r--r--tests/unittests/config/test_cc_growpart.py7
-rw-r--r--tests/unittests/config/test_cc_grub_dpkg.py12
-rw-r--r--tests/unittests/config/test_cc_package_update_upgrade_install.py19
-rw-r--r--tests/unittests/config/test_cc_power_state_change.py14
-rw-r--r--tests/unittests/config/test_cc_puppet.py194
-rw-r--r--tests/unittests/config/test_cc_resizefs.py23
-rw-r--r--tests/unittests/config/test_cc_scripts_vendor.py7
-rw-r--r--tests/unittests/config/test_cc_set_hostname.py17
-rw-r--r--tests/unittests/config/test_cc_set_passwords.py244
-rw-r--r--tests/unittests/config/test_cc_ssh.py25
-rw-r--r--tests/unittests/config/test_cc_update_etc_hosts.py7
-rw-r--r--tests/unittests/config/test_cc_users_groups.py48
-rw-r--r--tests/unittests/config/test_schema.py318
-rw-r--r--tests/unittests/conftest.py16
-rw-r--r--tests/unittests/distros/test__init__.py96
-rw-r--r--tests/unittests/distros/test_freebsd.py34
-rw-r--r--tests/unittests/distros/test_ifconfig.py16
-rw-r--r--tests/unittests/distros/test_netconfig.py137
-rw-r--r--tests/unittests/distros/test_opensuse.py333
-rw-r--r--tests/unittests/net/test_dhcp.py16
-rw-r--r--tests/unittests/net/test_network_state.py141
-rw-r--r--tests/unittests/net/test_networkd.py57
-rw-r--r--tests/unittests/sources/azure/test_imds.py491
-rw-r--r--tests/unittests/sources/helpers/test_cloudsigma.py67
-rw-r--r--tests/unittests/sources/test_aliyun.py13
-rw-r--r--tests/unittests/sources/test_azure.py952
-rw-r--r--tests/unittests/sources/test_azure_helper.py68
-rw-r--r--tests/unittests/sources/test_ec2.py19
-rw-r--r--tests/unittests/sources/test_init.py10
-rw-r--r--tests/unittests/sources/test_lxd.py55
-rw-r--r--tests/unittests/sources/test_opennebula.py4
-rw-r--r--tests/unittests/sources/test_openstack.py118
-rw-r--r--tests/unittests/sources/test_ovf.py745
-rw-r--r--tests/unittests/sources/test_vmware.py709
-rw-r--r--tests/unittests/sources/test_vultr.py76
-rw-r--r--tests/unittests/sources/vmware/test_vmware_config_file.py44
-rw-r--r--tests/unittests/test_atomic_helper.py10
-rw-r--r--tests/unittests/test_cli.py6
-rw-r--r--tests/unittests/test_dmi.py51
-rw-r--r--tests/unittests/test_ds_identify.py133
-rw-r--r--tests/unittests/test_net.py427
-rw-r--r--tests/unittests/test_net_freebsd.py12
-rw-r--r--tests/unittests/test_safeyaml.py39
-rw-r--r--tests/unittests/test_ssh_util.py38
-rw-r--r--tests/unittests/test_stages.py20
-rw-r--r--tests/unittests/test_util.py197
-rw-r--r--tests/unittests/util.py5
-rw-r--r--tools/.github-cla-signers12
-rwxr-xr-xtools/check-cla-signers14
-rwxr-xr-xtools/ds-identify16
-rwxr-xr-xtools/read-dependencies249
-rwxr-xr-xtools/read-version60
-rwxr-xr-xtools/render-cloudcfg2
-rwxr-xr-xtools/run-container10
-rw-r--r--tox.ini2
295 files changed, 16008 insertions, 11422 deletions
diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
index 017e82e4..8f86cadc 100644
--- a/.github/PULL_REQUEST_TEMPLATE.md
+++ b/.github/PULL_REQUEST_TEMPLATE.md
@@ -27,6 +27,6 @@ setup, and teardown. Scripts used may be attached directly to this PR. -->
## Checklist:
<!-- Go over all the following points, and put an `x` in all the boxes
that apply. -->
- - [ ] My code follows the process laid out in [the documentation](https://cloudinit.readthedocs.io/en/latest/topics/contributing.html)
+ - [ ] My code follows the process laid out in [the documentation](https://cloudinit.readthedocs.io/en/latest/development/contributing.html)
- [ ] I have updated or added any unit tests accordingly
- [ ] I have updated or added any documentation accordingly
diff --git a/.github/workflows/check_format.yml b/.github/workflows/check_format.yml
index 4e7a7271..e6d3c705 100644
--- a/.github/workflows/check_format.yml
+++ b/.github/workflows/check_format.yml
@@ -62,3 +62,41 @@ jobs:
run: |
tools/check_json_format.sh cloudinit/config/schemas/schema-cloud-config-v1.json
tools/check_json_format.sh cloudinit/config/schemas/versions.schema.cloud-config.json
+
+ doc:
+ strategy:
+ fail-fast: false
+ name: Check docs
+ runs-on: ubuntu-22.04
+ steps:
+ - name: "Checkout #1"
+ uses: actions/checkout@v3.0.0
+
+ - name: "Checkout #2 (for tools/read-version)"
+ run: |
+ git fetch --unshallow
+ git remote add upstream https://git.launchpad.net/cloud-init
+ - name: "Install Python 3.10"
+ uses: actions/setup-python@v4
+ with:
+ python-version: '3.10.8'
+ - name: "Install dependencies"
+ run: |
+ sudo DEBIAN_FRONTEND=noninteractive apt-get -qy update
+ sudo DEBIAN_FRONTEND=noninteractive apt-get -qy install tox lintian
+ - name: "Spellcheck"
+ run: |
+ make check_spelling
+ - name: "Build docs"
+ env:
+ TOXENV: doc
+ run: |
+ tox
+
+ check-cla-signers:
+ runs-on: ubuntu-22.04
+ steps:
+ - uses: actions/checkout@v3.0.0
+
+ - name: Check CLA signers file
+ run: tools/check-cla-signers
diff --git a/.github/workflows/integration.yml b/.github/workflows/integration.yml
new file mode 100644
index 00000000..889897a3
--- /dev/null
+++ b/.github/workflows/integration.yml
@@ -0,0 +1,84 @@
+name: Integration Tests
+
+on:
+ pull_request:
+ branches-ignore:
+ - 'ubuntu/**'
+
+concurrency:
+ group: 'ci-${{ github.workflow }}-${{ github.ref }}'
+ cancel-in-progress: true
+
+defaults:
+ run:
+ shell: sh -ex {0}
+
+env:
+ RELEASE: bionic
+
+jobs:
+ package-build:
+ runs-on: ubuntu-22.04
+ steps:
+ - name: "Checkout"
+ uses: actions/checkout@v3
+ with:
+ # Fetch all tags for tools/read-version
+ fetch-depth: 0
+ - name: Prepare dependencies
+ run: |
+ sudo DEBIAN_FRONTEND=noninteractive apt-get update
+ sudo DEBIAN_FRONTEND=noninteractive apt-get -y install \
+ debhelper \
+ dh-python \
+ fakeroot \
+ python3-setuptools \
+ sbuild \
+ ubuntu-dev-tools
+ sudo sbuild-adduser $USER
+ cp /usr/share/doc/sbuild/examples/example.sbuildrc /home/$USER/.sbuildrc
+ - name: Build package
+ run: |
+ ./packages/bddeb -S -d --release ${{ env.RELEASE }}
+ sudo -E su $USER -c 'mk-sbuild ${{ env.RELEASE }}'
+ sudo -E su $USER -c 'DEB_BUILD_OPTIONS=nocheck sbuild --nolog --no-run-lintian --no-run-autopkgtest --verbose --dist=${{ env.RELEASE }} --build-dir=${{ runner.temp }} cloud-init_*.dsc'
+ - name: Archive debs as artifacts
+ uses: actions/upload-artifact@v3
+ with:
+ name: 'cloud-init-${{ env.RELEASE }}-deb'
+ path: '${{ runner.temp }}/cloud-init*.deb'
+ retention-days: 3
+
+ integration-tests:
+ needs: package-build
+ runs-on: ubuntu-22.04
+ steps:
+ - name: "Checkout"
+ uses: actions/checkout@v3
+ with:
+ # Fetch all tags for tools/read-version
+ fetch-depth: 0
+ - name: Retrieve cloud-init package
+ uses: actions/download-artifact@v3
+ with:
+ name: 'cloud-init-${{ env.RELEASE }}-deb'
+ - name: Verify deb package
+ run: |
+ ls -hal cloud-init*.deb
+ - name: Prepare test tools
+ run: |
+ sudo DEBIAN_FRONTEND=noninteractive apt-get -y update
+ sudo DEBIAN_FRONTEND=noninteractive apt-get -y install tox wireguard
+ - name: Initialize LXD
+ run: |
+ ssh-keygen -P "" -q -f ~/.ssh/id_rsa
+ echo "[lxd]" > /home/$USER/.config/pycloudlib.toml
+ sudo adduser $USER lxd
+ # Jammy GH Action runners have docker installed, which edits iptables
+ # in a way that is incompatible with lxd.
+ # https://linuxcontainers.org/lxd/docs/master/howto/network_bridge_firewalld/#prevent-issues-with-lxd-and-docker
+ sudo iptables -I DOCKER-USER -j ACCEPT
+ sudo lxd init --auto
+ - name: Run integration Tests
+ run: |
+ sg lxd -c 'CLOUD_INIT_CLOUD_INIT_SOURCE="$(ls cloud-init*.deb)" tox -e integration-tests-ci'
diff --git a/.github/workflows/unit.yml b/.github/workflows/unit.yml
new file mode 100644
index 00000000..26c278d5
--- /dev/null
+++ b/.github/workflows/unit.yml
@@ -0,0 +1,34 @@
+name: Unit Tests
+on:
+ pull_request:
+concurrency:
+ group: 'ci-${{ github.workflow }}-${{ github.ref }}'
+ cancel-in-progress: true
+defaults:
+ run:
+ shell: sh -ex {0}
+jobs:
+ unittests:
+ strategy:
+ matrix:
+ python-version: [ "3.6", "3.7", "3.8", "3.9", "3.10", "3.11" ]
+ name: Python ${{matrix.python-version}} unittest
+ runs-on: ubuntu-20.04
+ steps:
+ - name: "Checkout #1"
+ uses: actions/checkout@v3.0.0
+ - name: "Checkout #2 (for tools/read-version)"
+ run: |
+ git fetch --unshallow
+ git remote add upstream https://git.launchpad.net/cloud-init
+ - name: Install Python ${{matrix.python-version}}
+ uses: actions/setup-python@v4
+ with:
+ python-version: ${{matrix.python-version}}
+ - name: Install tox
+ run: pip install tox
+ - name: Run unittest
+ env:
+ TOXENV: py3
+ PYTEST_ADDOPTS: -v
+ run: tox
diff --git a/.travis.yml b/.travis.yml
index 6456204b..b56fdfdc 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -1,27 +1,6 @@
language: python
dist: bionic
-# We use two different caching strategies. The default is to cache pip
-# packages (as most of our jobs use pip packages), which is configured here.
-# For the integration tests, we instead want to cache the lxd images and
-# package build schroot.
-#
-# We cache the lxd images because this saves a few seconds in the general
-# case, but provides substantial speed-ups when cloud-images.ubuntu.com, the
-# source of the images, is under heavy load. The directory in which the lxd
-# images are stored (/var/snap/lxd/common/lxd/images/) is not
-# readable/writeable by the default user (which is a requirement for caching),
-# so we instead cache the `lxd_images/` directory. We move lxd images out of
-# there before we run tests and back in once tests are complete. We _move_ the
-# images out and only copy the most recent lxd image back into the cache, to
-# avoid our cache growing without bound. (We only need the most recent lxd
-# image because the integration tests only use a single image.)
-#
-# We cache the package build schroot because it saves 2-3 minutes per build.
-# Without caching, we have to perform a debootstrap for every build. We update
-# the schroot before storing it back in the cache, to ensure that we aren't
-# just using an increasingly-old schroot as time passes. The cached schroot is
-# stored as a tarball, to preserve permissions/ownership.
cache: pip
install:
@@ -44,111 +23,12 @@ matrix:
fast_finish: true
include:
- python: 3.6
- - name: "Integration Tests"
- if: NOT branch =~ /^ubuntu\//
- env: {}
- cache:
- - directories:
- - lxd_images
- - chroots
- before_cache:
- - |
- # Find the most recent image file
- latest_file="$(sudo ls -Art /var/snap/lxd/common/lxd/images/ | tail -n 1)"
- # This might be <hash>.rootfs or <hash>, normalise
- latest_file="$(basename $latest_file .rootfs)"
- # Find all files with that prefix and copy them to our cache dir
- sudo find /var/snap/lxd/common/lxd/images/ -name $latest_file* -print -exec cp {} "$TRAVIS_BUILD_DIR/lxd_images/" \;
- install:
- - git fetch --unshallow
- - sudo apt-get install -y --install-recommends sbuild ubuntu-dev-tools fakeroot tox debhelper wireguard
- - pip install .
- - pip install tox
- # bionic has lxd from deb installed, remove it first to ensure
- # pylxd talks only to the lxd from snap
- - sudo apt remove --purge lxd lxd-client
- - sudo rm -Rf /var/lib/lxd
- - sudo snap install lxd
- - sudo lxd init --auto
- - sudo mkdir --mode=1777 -p /var/snap/lxd/common/consoles
- # Move any cached lxd images into lxd's image dir
- - sudo find "$TRAVIS_BUILD_DIR/lxd_images/" -type f -print -exec mv {} /var/snap/lxd/common/lxd/images/ \;
- - sudo usermod -a -G lxd $USER
- - sudo sbuild-adduser $USER
- - cp /usr/share/doc/sbuild/examples/example.sbuildrc /home/$USER/.sbuildrc
- - echo "[lxd]" > /home/$USER/.config/pycloudlib.toml
- script:
- # Ubuntu LTS: Build
- - ./packages/bddeb -S -d --release bionic
- - |
- needs_caching=false
- if [ -e "$TRAVIS_BUILD_DIR/chroots/bionic-amd64.tar" ]; then
- # If we have a cached chroot, move it into place
- sudo mkdir -p /var/lib/schroot/chroots/bionic-amd64
- sudo tar --sparse --xattrs --preserve-permissions --numeric-owner -xf "$TRAVIS_BUILD_DIR/chroots/bionic-amd64.tar" -C /var/lib/schroot/chroots/bionic-amd64
- # Write its configuration
- cat > sbuild-bionic-amd64 << EOM
- [bionic-amd64]
- description=bionic-amd64
- groups=sbuild,root,admin
- root-groups=sbuild,root,admin
- # Uncomment these lines to allow members of these groups to access
- # the -source chroots directly (useful for automated updates, etc).
- #source-root-users=sbuild,root,admin
- #source-root-groups=sbuild,root,admin
- type=directory
- profile=sbuild
- union-type=overlay
- directory=/var/lib/schroot/chroots/bionic-amd64
- EOM
- sudo mv sbuild-bionic-amd64 /etc/schroot/chroot.d/
- sudo chown root /etc/schroot/chroot.d/sbuild-bionic-amd64
- # And ensure it's up-to-date.
- before_pkgs="$(sudo schroot -c source:bionic-amd64 -d / dpkg -l | sha256sum)"
- sudo schroot -c source:bionic-amd64 -d / -- sh -c "apt-get update && apt-get -qqy upgrade"
- after_pkgs=$(sudo schroot -c source:bionic-amd64 -d / dpkg -l | sha256sum)
- if [ "$before_pkgs" != "$after_pkgs" ]; then
- needs_caching=true
- fi
- else
- # Otherwise, create the chroot
- sudo -E su $USER -c 'mk-sbuild bionic'
- needs_caching=true
- fi
- # If there are changes to the schroot (or it's entirely new),
- # tar up the schroot (to preserve ownership/permissions) and
- # move it into the cached dir; no need to compress it because
- # Travis will do that anyway
- if [ "$needs_caching" = "true" ]; then
- sudo tar --sparse --xattrs --xattrs-include=* -cf "$TRAVIS_BUILD_DIR/chroots/bionic-amd64.tar" -C /var/lib/schroot/chroots/bionic-amd64 .
- fi
- # Use sudo to get a new shell where we're in the sbuild group
- # Don't run integration tests when build fails
- - |
- sudo -E su $USER -c 'DEB_BUILD_OPTIONS=nocheck sbuild --nolog --no-run-lintian --no-run-autopkgtest --verbose --dist=bionic cloud-init_*.dsc' &&
- ssh-keygen -P "" -q -f ~/.ssh/id_rsa &&
- sg lxd -c 'CLOUD_INIT_CLOUD_INIT_SOURCE="$(ls *.deb)" tox -e integration-tests-ci'
- - python: 3.6
env:
TOXENV=lowest-supported
PYTEST_ADDOPTS=-v # List all tests run by pytest
dist: bionic
- - python: 3.10
- env: TOXENV=doc
- install:
- - git fetch --unshallow
- # Not pinning setuptools can cause failures on python 3.7 and 3.8 builds
- # See https://github.com/pypa/setuptools/issues/3118
- - pip install setuptools==59.6.0
- - sudo apt-get install lintian
- - pip install tox
- script:
- - make check_spelling && tox
# Test all supported Python versions (but at the end, so we schedule
# longer-running jobs first)
- python: 3.12-dev
- - python: 3.11-dev
- - python: "3.10"
- - python: 3.9
- - python: 3.8
- - python: 3.7
+ allow_failures:
+ - python: 3.12-dev
diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst
index 62628fd5..6d4cd65f 100644
--- a/CONTRIBUTING.rst
+++ b/CONTRIBUTING.rst
@@ -1,7 +1,9 @@
-Contributing to cloud-init
-**************************
+.. _contributing:
-This document describes how to contribute changes to cloud-init.
+Contributing to ``cloud-init``
+******************************
+
+This document describes how to contribute changes to ``cloud-init``.
It assumes you have a `GitHub`_ account, and refers to your GitHub user
as ``GH_USER`` throughout.
@@ -13,35 +15,31 @@ Summary
Before any pull request can be accepted, you must do the following:
-* Sign the Canonical `contributor license agreement`_
-* Add your Github username (alphabetically) to the in-repository list that we use
- to track CLA signatures:
- `tools/.github-cla-signers`_
-* Add or update any `unit tests`_ accordingly
-* Add or update any `integration tests`_ (if applicable)
-* Format code (using black and isort) with `tox -e do_format`
-* Ensure unit tests and linting pass using `tox`_
-* Submit a PR against the `main` branch of the `cloud-init` repository
-
-.. _unit tests: https://cloudinit.readthedocs.io/en/latest/topics/testing.html
-.. _integration tests: https://cloudinit.readthedocs.io/en/latest/topics/integration_tests.html
+* Sign the Canonical `contributor license agreement`_.
+* Add your GitHub username (alphabetically) to the in-repository list that we
+ use to track CLA signatures: `tools/.github-cla-signers`_.
+* Add or update any :ref:`unit tests<testing>` accordingly.
+* Add or update any :ref:`integration_tests` (if applicable).
+* Format code (using ``black`` and ``isort``) with `tox -e do_format`.
+* Ensure unit tests and linting pass using `tox`_.
+* Submit a PR against the ``main`` branch of the ``cloud-init`` repository.
The detailed instructions
-------------------------
-Follow these steps to submit your first pull request to cloud-init:
+Follow these steps to submit your first pull request to ``cloud-init``:
-* To contribute to cloud-init, you must sign the Canonical `contributor
- license agreement`_
+* To contribute to ``cloud-init``, you must sign the Canonical
+ `contributor license agreement`_.
* If you have already signed it as an individual, your Launchpad user
will be listed in the `contributor-agreement-canonical`_ group.
- (Unfortunately there is no easy way to check if an organization or
- company you are doing work for has signed.)
+ Unfortunately there is no easy way to check if an organization or
+ company you are doing work for has signed.
* When signing it:
- * ensure that you fill in the GitHub username field.
+ * ensure that you fill in the GitHub username field,
* when prompted for 'Project contact' or 'Canonical Project
Manager', enter 'James Falcon'.
@@ -51,37 +49,38 @@ Follow these steps to submit your first pull request to cloud-init:
* For any questions or help with the process, please email `James
Falcon <mailto:james.falcon@canonical.com>`_ with the subject,
- "Cloud-Init CLA"
+ "Cloud-init CLA".
* You also may contact user ``falcojr`` in the ``#cloud-init``
- channel on the Libera IRC network.
+ channel on the `Libera IRC network`_.
-* Configure git with your email and name for commit messages.
+* Configure ``git`` with your email and name for commit messages.
Your name will appear in commit messages and will also be used in
- changelogs or release notes. Give yourself credit!::
+ changelogs or release notes. Give yourself credit! ::
git config user.name "Your Name"
git config user.email "Your Email"
-* Sign into your `GitHub`_ account
+* Sign in to your `GitHub`_ account.
-* Fork the upstream `repository`_ on Github and clicking on the ``Fork`` button
+* Fork the upstream `repository`_ on GitHub and click on the ``Fork`` button
* Create a new remote pointing to your personal GitHub repository.
- .. code:: sh
+.. code-block:: sh
git clone git@github.com:GH_USER/cloud-init.git
cd cloud-init
git remote add upstream git@github.com:canonical/cloud-init.git
git push origin main
-* Read through the cloud-init `Code Review Process`_, so you understand
- how your changes will end up in cloud-init's codebase.
+* Read through the ``cloud-init``
+ :ref:`Code Review Process<code_review_process>`, so you understand
+ how your changes will end up in ``cloud-init``'s codebase.
-* Submit your first cloud-init pull request, adding your Github username to the
- in-repository list that we use to track CLA signatures:
+* Submit your first ``cloud-init`` pull request, adding your GitHub username
+ to the in-repository list that we use to track CLA signatures:
`tools/.github-cla-signers`_
* See `PR #344`_ and `PR #345`_ for examples of what this pull
@@ -89,60 +88,54 @@ Follow these steps to submit your first pull request to cloud-init:
* Note that ``.github-cla-signers`` is sorted alphabetically.
- * (If you already have a change that you want to submit, you can
- also include the change to ``tools/.github-cla-signers`` in that
- pull request, there is no need for two separate PRs.)
+ * You may use ``tools/check-cla-signers`` to sort ``.github-cla-signers``
+ or check that it is sorted.
-.. _GitHub: https://github.com
-.. _Launchpad: https://launchpad.net
-.. _repository: https://github.com/canonical/cloud-init
-.. _contributor license agreement: https://ubuntu.com/legal/contributors
-.. _contributor-agreement-canonical: https://launchpad.net/%7Econtributor-agreement-canonical/+members
-.. _PR #344: https://github.com/canonical/cloud-init/pull/344
-.. _PR #345: https://github.com/canonical/cloud-init/pull/345
+ * If you already have a change that you want to submit, you can
+ also include the change to ``tools/.github-cla-signers`` in that
+ pull request, there is no need for two separate PRs.
-Transferring CLA Signatures from Launchpad to Github
+Transferring CLA Signatures from Launchpad to GitHub
----------------------------------------------------
-For existing contributors who have signed the agreement in Launchpad
-before the Github username field was included, we need to verify the
-link between your `Launchpad`_ account and your `GitHub`_ account. To
-enable us to do this, we ask that you create a branch with both your
-Launchpad and GitHub usernames against both the Launchpad and GitHub
-cloud-init repositories. We've added a tool
-(``tools/migrate-lp-user-to-github``) to the cloud-init repository to
-handle this migration as automatically as possible.
+For existing contributors who signed the agreement in Launchpad before the
+GitHub username field was included, we need to verify the link between your
+`Launchpad`_ account and your `GitHub`_ account. To enable us to do this, we
+ask that you create a branch with both your Launchpad and GitHub usernames
+against both the Launchpad and GitHub ``cloud-init`` repositories. We've added
+a tool (``tools/migrate-lp-user-to-github``) to the ``cloud-init`` repository
+to handle this migration as automatically as possible.
-The cloud-init team will review the two merge proposals and verify that
-the CLA has been signed for the Launchpad user and record the
-associated GitHub account.
+The ``cloud-init`` team will review the two merge proposals, verify that the
+CLA has been signed for the Launchpad user, and record the associated GitHub
+account.
.. note::
If you are a first time contributor, you will not need to touch
- Launchpad to contribute to cloud-init: all new CLA signatures are
+ Launchpad to contribute to ``cloud-init``. All new CLA signatures are
handled as part of the GitHub pull request process described above.
Do these things for each feature or bug
=======================================
-* Create a new topic branch for your work::
+* Create a new topic branch for your work: ::
git checkout -b my-topic-branch
* Make and commit your changes (note, you can make multiple commits,
- fixes, more commits.)::
+ fixes, and add more commits.): ::
git commit
-* Apply black and isort formatting rules with `tox`_::
+* Apply ``black`` and ``isort`` formatting rules with `tox`_: ::
tox -e do_format
-* Run unit tests and lint/formatting checks with `tox`_::
+* Run unit tests and lint/formatting checks with `tox`_: ::
tox
-* Push your changes to your personal GitHub repository::
+* Push your changes to your personal GitHub repository: ::
git push -u origin my-topic-branch
@@ -151,17 +144,18 @@ Do these things for each feature or bug
- Open the branch on GitHub
- You can see a web view of your repository and navigate to the branch at:
+ ::
- ``https://github.com/GH_USER/cloud-init/tree/my-topic-branch``
+ https://github.com/GH_USER/cloud-init/tree/my-topic-branch
- - Click 'Pull Request`
+ - Click :guilabel:`Pull Request`.
- Fill out the pull request title, summarizing the change and a longer
- message indicating important details about the changes included, like ::
+ message indicating important details about the changes included, like: ::
Activate the frobnicator.
The frobnicator was previously inactive and now runs by default.
- This may save the world some day. Then, list the bugs you fixed
+ This may save the world some day. Then, list the bugs you fixed
as footers with syntax as shown here.
The commit message should be one summary line of less than
@@ -179,35 +173,32 @@ Do these things for each feature or bug
Note that the project continues to use LP: #NNNNN format for closing
launchpad bugs rather than GitHub Issues.
- - Click 'Create Pull Request`
+ - Click :guilabel:`Create Pull Request`
-Then, a cloud-init committer will review your changes and
-follow up in the pull request. Look at the `Code Review Process`_ doc
-to understand the following steps.
+Then, a ``cloud-init`` committer will review your changes and
+follow up in the pull request. Look at the :ref:`Code Review Process<code_review_process>` doc to understand the following steps.
-Feel free to ping and/or join ``#cloud-init`` on Libera irc if you
+Feel free to ping and/or join ``#cloud-init`` on Libera IRC if you
have any questions.
-.. _tox: https://tox.readthedocs.io/en/latest/
-.. _Code Review Process: https://cloudinit.readthedocs.io/en/latest/topics/code_review.html
-
Design
======
This section captures design decisions that are helpful to know when
-hacking on cloud-init.
+hacking on ``cloud-init``.
-Python Support
+Python support
--------------
-Cloud-init upstream currently supports Python 3.6 and above.
-Cloud-init upstream will stay compatible with a particular python version
+``Cloud-init`` upstream currently supports Python 3.6 and above.
+
+``Cloud-init`` upstream will stay compatible with a particular Python version
for 6 years after release. After 6 years, we will stop testing upstream
-changes against the unsupported version of python and may introduce
+changes against the unsupported version of Python and may introduce
breaking changes. This policy may change as needed.
-The following table lists the cloud-init versions in which the
-minimum python version changed:
+The following table lists the ``cloud-init`` versions in which the
+minimum Python version changed:
================== ==================
Cloud-init version Python version
@@ -226,23 +217,32 @@ Cloud Config Modules
Tests
-----
-Submissions to cloud-init must include testing. See :ref:`testing` for
+Submissions to ``cloud-init`` must include testing. See :ref:`testing` for
details on these requirements.
-Type Annotations
+Type annotations
----------------
-The cloud-init codebase uses Python's annotation support for storing
-type annotations in the style specified by `PEP-484`_ and `PEP-526`_.
+The ``cloud-init`` codebase uses Python's annotation support for storing
+``type`` annotations in the style specified by `PEP-484`_ and `PEP-526`_.
Their use in the codebase is encouraged.
-.. _PEP-484: https://www.python.org/dev/peps/pep-0484/
-.. _PEP-526: https://www.python.org/dev/peps/pep-0526/
-
Feature Flags
-------------
.. automodule:: cloudinit.features
:members:
+.. LINKS:
.. _tools/.github-cla-signers: https://github.com/canonical/cloud-init/blob/main/tools/.github-cla-signers
+.. _Libera IRC network: https://kiwiirc.com/nextclient/irc.libera.chat/cloud-init
+.. _GitHub: https://github.com
+.. _Launchpad: https://launchpad.net
+.. _repository: https://github.com/canonical/cloud-init
+.. _contributor license agreement: https://ubuntu.com/legal/contributors
+.. _contributor-agreement-canonical: https://launchpad.net/%7Econtributor-agreement-canonical/+members
+.. _PR #344: https://github.com/canonical/cloud-init/pull/344
+.. _PR #345: https://github.com/canonical/cloud-init/pull/345
+.. _tox: https://tox.readthedocs.io/en/latest/
+.. _PEP-484: https://www.python.org/dev/peps/pep-0484/
+.. _PEP-526: https://www.python.org/dev/peps/pep-0526/
diff --git a/ChangeLog b/ChangeLog
index b3b45df0..b1157b2b 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,160 @@
+23.1
+ - Support transactional-updates for SUSE based distros (#1997)
+ [Robert Schweikert]
+ - Set ownership for new folders in Write Files Module (#1980)
+ [Jack] (LP: #1990513)
+ - add OpenCloudOS and TencentOS support (#1964) [wynnfeng]
+ - lxd: Retry if the server isn't ready (#2025)
+ - test: switch pycloudlib source to pypi (#2024)
+ - test: Fix integration test deprecation message (#2023)
+ - Recognize opensuse-microos, dev tooling fixes [Robert Schweikert]
+ - sources/azure: refactor imds handler into own module (#1977)
+ [Chris Patterson]
+ - docs: deprecation generation support [1/2] (#2013)
+ - add function is_virtual to distro/FreeBSD (#1957) [Mina Galić]
+ - cc_ssh: support multiple hostcertificates (#2018) (LP: #1999164)
+ - Fix minor schema validation regression and fixup typing (#2017)
+ - doc: Reword user data debug section (#2019)
+ - Overhaul/rewrite of certificate handling as follows: (#1962)
+ [dermotbradley] (LP: #1931174)
+ - disk_setup: use byte string when purging the partition table (#2012)
+ [Stefan Prietl]
+ - cli: schema also validate vendordata*.
+ - ci: sort and add checks for cla signers file [Stefan Prietl]
+ - Add "ederst" as contributor (#2010) [Stefan Prietl]
+ - readme: add reference to packages dir (#2001)
+ - docs: update downstream package list (#2002)
+ - docs: add google search verification (#2000) [s-makin]
+ - docs: fix 404 render use default notfound_urls_prefix in RTD conf (#2004)
+ - Fix OpenStack datasource detection on bare metal (#1923)
+ [Alexander Birkner] (LP: #1815990)
+ - docs: add themed RTD 404 page and pointer to readthedocs-hosted (#1993)
+ - schema: fix gpt labels, use type string for GUID (#1995)
+ - cc_disk_setup: code cleanup (#1996)
+ - netplan: keep custom strict perms when 50-cloud-init.yaml exists
+ - cloud-id: better handling of change in datasource files
+ [d1r3ct0r] (LP: #1998998)
+ - tests: Remove restart check from test
+ - Ignore duplicate macs from mscc_felix and fsl_enetc (LP: #1997922)
+ - Warn on empty network key (#1990)
+ - Fix Vultr cloud_interfaces usage (#1986) [eb3095]
+ - cc_puppet: Update puppet service name (#1970) [d1r3ct0r] (LP: #2002969)
+ - docs: Clarify networking docs (#1987)
+ - lint: remove httpretty (#1985) [sxt1001]
+ - cc_set_passwords: Prevent traceback when restarting ssh (#1981)
+ - tests: fix lp1912844 (#1978)
+ - tests: Skip ansible test on bionic (#1984)
+ - Wait for NetworkManager (#1983) [Robert Schweikert]
+ - docs: minor polishing (#1979) [s-makin]
+ - CI: migrate integration-test to GH actions (#1969)
+ - Fix permission of SSH host keys (#1971) [Ron Gebauer]
+ - Fix default route rendering on v2 ipv6 (#1973) (LP: #2003562)
+ - doc: fix path in net_convert command (#1975)
+ - docs: update net_convert docs (#1974)
+ - doc: fix dead link
+ - cc_set_hostname: ignore /var/lib/cloud/data/set-hostname if it's empty
+ (#1967) [Emanuele Giuseppe Esposito]
+ - distros/rhel.py: _read_hostname() missing strip on "hostname" (#1941)
+ [Mark Mielke]
+ - integration tests: add IBM VPC support (SC-1352) (#1915)
+ - machine-id: set to uninitialized to trigger regeneration on clones
+ (LP: #1999680)
+ - sources/azure: retry on connection error when fetching metdata (#1968)
+ [Chris Patterson]
+ - Ensure ssh state accurately obtained (#1966)
+ - bddeb: drop dh-systemd dependency on newer deb-based releases [d1r3ct0r]
+ - doc: fix `config formats` link in cloudsigma.rst (#1960)
+ - Fix wrong subp syntax in cc_set_passwords.py (#1961)
+ - docs: update the PR template link to readthedocs (#1958) [d1r3ct0r]
+ - ci: switch unittests to gh actions (#1956)
+ - Add mount_default_fields for PhotonOS. (#1952) [Shreenidhi Shedi]
+ - sources/azure: minor refactor for metadata source detection logic
+ (#1936) [Chris Patterson]
+ - add "CalvoM" as contributor (#1955) [d1r3ct0r]
+ - ci: doc to gh actions (#1951)
+ - lxd: handle 404 from missing devices route for LXD 4.0 (LP: #2001737)
+ - docs: Diataxis overhaul (#1933) [s-makin]
+ - vultr: Fix issue regarding cache and region codes (#1938) [eb3095]
+ - cc_set_passwords: Move ssh status checking later (SC-1368) (#1909)
+ (LP: #1998526)
+ - Improve Wireguard module idempotency (#1940) [Fabian Lichtenegger-Lukas]
+ - network/netplan: add gateways as on-link when necessary (#1931)
+ [Louis Sautier] (LP: #2000596)
+ - tests: test_lxd assert features.networks.zones when present (#1939)
+ - Use btrfs enquque when available (#1926) [Robert Schweikert]
+ - sources/azure: drop description for report_failure_to_fabric() (#1934)
+ [Chris Patterson]
+ - cc_disk_setup.py: fix MBR single partition creation (#1932)
+ [dermotbradley] (LP: #1851438)
+ - Fix typo with package_update/package_upgrade (#1927) [eb3095]
+ - sources/azure: fix device driver matching for net config (#1914)
+ [Chris Patterson]
+ - BSD: fix duplicate macs in Ifconfig parser (#1917) [Mina Galić]
+ - test: mock dns calls (#1922)
+ - pycloudlib: add lunar support for integration tests (#1928)
+ - nocloud: add support for dmi variable expansion for seedfrom URL
+ (LP: #1994980)
+ - tools: read-version drop extra call to git describe --long
+ - doc: improve cc_write_files doc (#1916)
+ - read-version: When insufficient tags, use cloudinit.version.get_version
+ - mounts: document weird prefix in schema (#1913)
+ - add utility function test cases (#1910) [sxt1001]
+ - test: mock file deletion in dhcp tests (#1911)
+ - Ensure network ready before cloud-init service runs on RHEL (#1893)
+ (LP: #1998655)
+ - docs: add copy button to code blocks (#1890) [s-makin]
+ - netplan: define features.NETPLAN_CONFIG_ROOT_READ_ONLY flag
+ - azure: fix support for systems without az command installed (#1908)
+ - Networking Clarification (#1892)
+ - Fix the distro.osfamily output problem in the openEuler system. (#1895)
+ [sxt1001] (LP: #1999042)
+ - pycloudlib: bump commit dropping azure api smoke test
+ - * net: netplan config root read-only as wifi config can contain creds
+ - autoinstall: clarify docs for users
+ - sources/azure: encode health report as utf-8 (#1897) [Chris Patterson]
+ - Add back gateway4/6 deprecation to docs (#1898)
+ - networkd: Add support for multiple [Route] sections (#1868)
+ [Nigel Kukard]
+ - doc: add qemu tutorial (#1863)
+ - lint: fix tip-flake8 and tip-mypy (#1896)
+ - Add support for setting uid when creating users on FreeBSD (#1888)
+ [einsibjarni]
+ - Fix exception in BSD networking code-path (#1894) [Mina Galić]
+ - Append derivatives to is_rhel list in cloud.cfg.tmpl (#1887) [Louis Abel]
+ - FreeBSD init: use cloudinit_enable as only rcvar (#1875) [Mina Galić]
+ - feat: add support aliyun metadata security harden mode (#1865)
+ [Manasseh Zhou]
+ - docs: uprate analyze to performance page [s-makin]
+ - test: fix lxd preseed managed network config (#1881)
+ - Add support for static IPv6 addresses for FreeBSD (#1839) [einsibjarni]
+ - Make 3.12 failures not fail the build (#1873)
+ - Docs: adding relative links [s-makin]
+ - Update read-version
+ - Fix setup.py to align with PEP 440 versioning replacing trailing
+ - travis: promote 3.11-dev to 3.11 (#1866)
+ - test_cloud_sigma: delete useless test (#1828) [sxt1001]
+ - Add "nkukard" as contributor (#1864) [Nigel Kukard]
+ - tests: ds-id mocks for vmware-rpctool as utility may not exist in env
+ - doc: add how to render new module doc (#1855)
+ - doc: improve module creation explanation (#1851)
+ - Add Support for IPv6 metadata to OpenStack (#1805)
+ [Marvin Vogt] (LP: #1906849)
+ - add xiaoge1001 to .github-cla-signers (#1854) [sxt1001]
+ - network: Deprecate gateway{4,6} keys in network config v2 (#1794)
+ (LP: #1992512)
+ - VMware: Move Guest Customization transport from OVF to VMware (#1573)
+ [PengpengSun]
+ - doc: home page links added (#1852) [s-makin]
+
+22.4.2
+ - status: handle ds not defined in status.json (#1876) (LP: #1997559)
+
+22.4.1
+ - net: skip duplicate mac check for netvsc nic and its VF (#1853)
+ [Anh Vo] (LP: #1844191)
+ - ChangeLog: whitespace cleanup (#1850)
+ - changelog: capture 22.3.1-4 releases
+
22.4
- test: fix pro integration test [Alberto Contreras]
- cc_disk_setup: pass options in correct order to utils (#1829)
@@ -3163,7 +3320,7 @@
- support network rendering to sysconfig (for centos and RHEL)
- write_files: if no permissions are given, just use default without warn.
- user_data: fix error when user-data is not utf-8 decodable (LP: #1532072)
- - fix mcollective module with python3 (LP: #1597699) [Sergii Golovatiuk]
+ - fix mcollective module with python3 (LP: #1597699) [Sergii Golovatiuk]
0.7.6:
- open 0.7.6
@@ -3288,7 +3445,7 @@
filesystems. Useful if attached disks are not formatted (LP: #1218506)
- Fix usage of libselinux-python when selinux is disabled. [Garrett Holmstrom]
- multi_log: only write to /dev/console if it exists [Garrett Holmstrom]
- - config/cloud.cfg: add 'sudo' to list groups for the default user
+ - config/cloud.cfg: add 'sudo' to list groups for the default user
(LP: #1228228)
- documentation fix for use of 'mkpasswd' [Eric Nordlund]
- respect /etc/growroot-disabled file (LP: #1234331)
@@ -3354,7 +3511,7 @@
can be more useful.
0.7.1:
- - sysvinit: fix missing dependency in cloud-init job for RHEL 5.6
+ - sysvinit: fix missing dependency in cloud-init job for RHEL 5.6
- config-drive: map hostname to local-hostname (LP: #1061964)
- landscape: install landscape-client package if not installed.
only take action if cloud-config is present (LP: #1066115)
@@ -3403,14 +3560,14 @@
0.7.0:
- add a 'exception_cb' argument to 'wait_for_url'. If provided, this
method will be called back with the exception received and the message.
- - utilize the 'exception_cb' above to modify the oauth timestamp in
+ - utilize the 'exception_cb' above to modify the oauth timestamp in
DataSourceMAAS requests if a 401 or 403 is received. (LP: #978127)
- catch signals and exit rather than stack tracing
- if logging fails, enable a fallback logger by patching the logging module
- do not 'start networking' in cloud-init-nonet, but add
cloud-init-container job that runs only if in container and emits
net-device-added (LP: #1031065)
- - search only top level dns for 'instance-data' in
+ - search only top level dns for 'instance-data' in
DataSourceEc2 (LP: #1040200)
- add support for config-drive-v2 (LP:#1037567)
- support creating users, including the default user.
@@ -3448,10 +3605,10 @@
reduces reuse and limits future functionality, and makes testing harder)
- removal of global config that defined paths, shared config, now this is
via objects making unit testing testing and global side-effects a non issue
- - creation of a 'helpers.py'
- - this contains an abstraction for the 'lock' like objects that the various
- module/handler running stages use to avoid re-running a given
- module/handler for a given frequency. this makes it separated from
+ - creation of a 'helpers.py'
+ - this contains an abstraction for the 'lock' like objects that the various
+ module/handler running stages use to avoid re-running a given
+ module/handler for a given frequency. this makes it separated from
the actual usage of that object (thus helpful for testing and clear lines
usage and how the actual job is accomplished)
- a common 'runner' class is the main entrypoint using these locks to
@@ -3460,11 +3617,11 @@
- add in a 'paths' object that provides access to the previously global
and/or config based paths (thus providing a single entrypoint object/type
that provides path information)
- - this also adds in the ability to change the path when constructing
- that path 'object' and adding in additional config that can be used to
+ - this also adds in the ability to change the path when constructing
+ that path 'object' and adding in additional config that can be used to
alter the root paths of 'joins' (useful for testing or possibly useful
in chroots?)
- - config options now avaiable that can alter the 'write_root' and the
+ - config options now avaiable that can alter the 'write_root' and the
'read_root' when backing code uses the paths join() function
- add a config parser subclass that will automatically add unknown sections
and return default values (instead of throwing exceptions for these cases)
@@ -3488,7 +3645,7 @@
the passed in logger (its still passed in)
- ensure that all places where exception are caught and where applicable
that the util logexc() is called, so that no exceptions that may occur
- are dropped without first being logged (where it makes sense for this
+ are dropped without first being logged (where it makes sense for this
to happen)
- add a 'requires' file that lists cloud-init dependencies
- applying it in package creation (bdeb and brpm) as well as using it
@@ -3500,12 +3657,12 @@
subp() utility method, which now has an exception type that will provide
detailed information on python 2.6 and 2.7
- forced all code loading, moving, chmod, writing files and other system
- level actions to go through standard set of util functions, this greatly
+ level actions to go through standard set of util functions, this greatly
helps in debugging and determining exactly which system actions cloud-init
is performing
- adjust url fetching and url trying to go through a single function that
reads urls in the new 'url helper' file, this helps in tracing, debugging
- and knowing which urls are being called and/or posted to from with-in
+ and knowing which urls are being called and/or posted to from with-in
cloud-init code
- add in the sending of a 'User-Agent' header for all urls fetched that
do not provide there own header mapping, derive this user-agent from
@@ -3515,7 +3672,7 @@
and defined output that should be easier to parse than a custom format
- add a set of distro specific classes, that handle distro specific actions
that modules and or handler code can use as needed, this is organized into
- a base abstract class with child classes that implement the shared
+ a base abstract class with child classes that implement the shared
functionality. config determines exactly which subclass to load, so it can
be easily extended as needed.
- current functionality
@@ -3527,16 +3684,16 @@
- interface up/down activating
- implemented a debian + ubuntu subclass
- implemented a redhat + fedora subclass
- - adjust the root 'cloud.cfg' file to now have distrobution/path specific
+ - adjust the root 'cloud.cfg' file to now have distrobution/path specific
configuration values in it. these special configs are merged as the normal
config is, but the system level config is not passed into modules/handlers
- modules/handlers must go through the path and distro object instead
- - have the cloudstack datasource test the url before calling into boto to
+ - have the cloudstack datasource test the url before calling into boto to
avoid the long wait for boto to finish retrying and finally fail when
the gateway meta-data address is unavailable
- add a simple mock ec2 meta-data python based http server that can serve a
very simple set of ec2 meta-data back to callers
- - useful for testing or for understanding what the ec2 meta-data
+ - useful for testing or for understanding what the ec2 meta-data
service can provide in terms of data or functionality
- for ssh key and authorized key file parsing add in classes and util
functions that maintain the state of individual lines, allowing for a
@@ -3570,7 +3727,7 @@
- use 'is None' instead of the frowned upon '== None' which picks up a large
set of 'true' cases than is typically desired (ie for objects that have
there own equality)
- - use context managers on locks, tempdir, chdir, file, selinux, umask,
+ - use context managers on locks, tempdir, chdir, file, selinux, umask,
unmounting commands so that these actions do not have to be closed and/or
cleaned up manually in finally blocks, which is typically not done and
will eventually be a bug in the future
@@ -3594,7 +3751,7 @@
- place the rfc 8222 time formatting and uptime repeated pieces of code in the
util module as a set of function with the name 'time_rfc2822'/'uptime'
- separate the pylint+pep8 calling from one tool into two indivudal tools so
- that they can be called independently, add make file sections that can be
+ that they can be called independently, add make file sections that can be
used to call these independently
- remove the support for the old style config that was previously located in
'/etc/ec2-init/ec2-config.cfg', no longer supported!
@@ -3605,12 +3762,12 @@
- use the new defaulting config parser (that will not raise errors on sections
that do not exist or return errors when values are fetched that do not
exist) in the 'puppet' module
- - for config 'modules' add in the ability for the module to provide a list of
+ - for config 'modules' add in the ability for the module to provide a list of
distro names which it is known to work with, if when ran and the distro
being used name does not match one of those in this list, a warning will be
written out saying that this module may not work correctly on this
distrobution
- - for all dynamically imported modules ensure that they are fixed up before
+ - for all dynamically imported modules ensure that they are fixed up before
they are used by ensuring that they have certain attributes, if they do not
have those attributes they will be set to a sensible set of defaults instead
- adjust all 'config' modules and handlers to use the adjusted util functions
@@ -3638,7 +3795,7 @@
- support setting of Acquire::HTTP::Proxy via 'apt_proxy'
- DataSourceEc2: more resilliant to slow metadata service
- config change: 'retries' dropped, 'max_wait' added, timeout increased
- - close stdin in all cloud-init programs that are launched at boot
+ - close stdin in all cloud-init programs that are launched at boot
(LP: #903993)
- revert management of /etc/hosts to 0.6.1 style (LP: #890501, LP: #871966)
- write full ssh keys to console for easy machine consumption (LP: #893400)
@@ -3650,7 +3807,7 @@
in the payload parameter. (LP: #874342)
- add test case framework [Mike Milner] (LP: #890851)
- fix pylint warnings [Juerg Haefliger] (LP: #914739)
- - add support for adding and deleting CA Certificates [Mike Milner]
+ - add support for adding and deleting CA Certificates [Mike Milner]
(LP: #915232)
- in ci-info lines, use '.' to indicate empty field for easier machine reading
- support empty lines in "#include" files (LP: #923043)
@@ -3664,17 +3821,17 @@
- DataSourceMaaS: add data source for Ubuntu Machines as a Service (MaaS)
(LP: #942061)
- DataSourceCloudStack: add support for CloudStack datasource [Cosmin Luta]
- - add option 'apt_pipelining' to address issue with S3 mirrors
+ - add option 'apt_pipelining' to address issue with S3 mirrors
(LP: #948461) [Ben Howard]
- warn on non-multipart, non-handled user-data [Martin Packman]
- run resizefs in the background in order to not block boot (LP: #961226)
- Fix bug in Chef support where validation_key was present in config, but
'validation_cert' was not (LP: #960547)
- - Provide user friendly message when an invalid locale is set
+ - Provide user friendly message when an invalid locale is set
[Ben Howard] (LP: #859814)
- Support reading cloud-config from kernel command line parameter and
populating local file with it, which can then provide data for DataSources
- - improve chef examples for working configurations on 11.10 and 12.04
+ - improve chef examples for working configurations on 11.10 and 12.04
[Lorin Hochstein] (LP: #960564)
0.6.2:
@@ -3723,7 +3880,7 @@
This was done by changing all users of util.subp to have None input unless
specified
- Add some debug info to the console when cloud-init runs.
- This is useful if debugging, IP and route information is printed to the
+ This is useful if debugging, IP and route information is printed to the
console.
- change the mechanism for handling .ssh/authorized_keys, to update entries
rather than appending. This ensures that the authorized_keys that are
@@ -3785,7 +3942,7 @@
- moved upstart/cloud-run-user-script.conf to upstart/cloud-final.conf
- cloud-final.conf now runs runs cloud-config modules similar
to cloud-config and cloud-init.
- - LP: #653271
+ - LP: #653271
- added writing of "boot-finished" to /var/lib/cloud/instance/boot-finished
this is the last thing done, indicating cloud-init is finished booting
- writes message to console with timestamp and uptime
@@ -3808,6 +3965,6 @@
- add support for reading Rightscale style user data (LP: #668400)
and acting on it in cloud-config (cc_rightscale_userdata.py)
- make the message on 'disable_root' more clear (LP: #672417)
- - do not require public key if private is given in ssh cloud-config
+ - do not require public key if private is given in ssh cloud-config
(LP: #648905)
# vi: syntax=text textwidth=79
diff --git a/README.md b/README.md
index feb896ac..45a5bd9e 100644
--- a/README.md
+++ b/README.md
@@ -39,11 +39,11 @@ get in contact with that distribution and send them our way!
| Supported OSes | Supported Public Clouds | Supported Private Clouds |
|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| --- | --- |
-| Alpine Linux<br />Arch Linux<br />Container-Optimized OS<br />Debian<br />DragonFlyBSD<br />Fedora<br />FreeBSD<br />Gentoo Linux<br />NetBSD<br />OpenBSD<br />openEuler<br />OpenMandriva<br />RHEL/CentOS/AlmaLinux/Rocky/PhotonOS/Virtuozzo/EuroLinux/CloudLinux/MIRACLE LINUX/MarinerOS<br />SLES/openSUSE<br />Ubuntu<br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /> | Amazon Web Services<br />Microsoft Azure<br />Google Cloud Platform<br />Oracle Cloud Infrastructure<br />Softlayer<br />Rackspace Public Cloud<br />IBM Cloud<br />DigitalOcean<br />Bigstep<br />Hetzner<br />Joyent<br />CloudSigma<br />Alibaba Cloud<br />Huawei Cloud<br />OVH<br />OpenNebula<br />Exoscale<br />Scaleway<br />CloudStack<br />AltCloud<br />SmartOS<br />HyperOne<br />Vultr<br />Rootbox<br /> | Bare metal installs<br />OpenStack<br />LXD<br />KVM<br />Metal-as-a-Service (MAAS)<br />VMware<br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br />|
+| Alpine Linux<br />Arch Linux<br />Container-Optimized OS<br />Debian<br />DragonFlyBSD<br />Fedora<br />FreeBSD<br />Gentoo Linux<br />NetBSD<br />OpenBSD<br />openEuler<br />OpenCloudOS<br />OpenMandriva<br />RHEL/CentOS/AlmaLinux/Rocky/PhotonOS/Virtuozzo/EuroLinux/CloudLinux/MIRACLE LINUX/MarinerOS<br />SLES/openSUSE<br />TencentOS<br />Ubuntu<br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /> | Amazon Web Services<br />Microsoft Azure<br />Google Cloud Platform<br />Oracle Cloud Infrastructure<br />Softlayer<br />Rackspace Public Cloud<br />IBM Cloud<br />DigitalOcean<br />Bigstep<br />Hetzner<br />Joyent<br />CloudSigma<br />Alibaba Cloud<br />Huawei Cloud<br />OVH<br />OpenNebula<br />Exoscale<br />Scaleway<br />CloudStack<br />AltCloud<br />SmartOS<br />HyperOne<br />Vultr<br />Rootbox<br /> | Bare metal installs<br />OpenStack<br />LXD<br />KVM<br />Metal-as-a-Service (MAAS)<br />VMware<br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br />|
## To start developing cloud-init
-Checkout the [contributing](https://cloudinit.readthedocs.io/en/latest/topics/contributing.html)
+Checkout the [contributing](https://cloudinit.readthedocs.io/en/latest/development/contributing.html)
document that outlines the steps necessary to develop, test, and submit code.
## Daily builds
@@ -54,3 +54,7 @@ features or to verify bug fixes.
For Ubuntu, see the [Daily PPAs](https://code.launchpad.net/~cloud-init-dev/+archive/ubuntu/daily)
For CentOS, see the [COPR build repos](https://copr.fedorainfracloud.org/coprs/g/cloud-init/cloud-init-dev/)
+
+## Build / packaging
+
+To see reference build/packaging implementations, refer to [packages](packages).
diff --git a/SECURITY.md b/SECURITY.md
index 69360bb7..28639e06 100644
--- a/SECURITY.md
+++ b/SECURITY.md
@@ -1,4 +1,4 @@
-# Security Policy
+# Security policy
The following documents the upstream cloud-init security policy.
@@ -30,7 +30,7 @@ removal from the list for the company or individual involved.
If the reported bug is deemed a real security issue a CVE is assigned by
the Canonical Security Team as CVE Numbering Authority (CNA).
-If it is deemed a regular, non-security, issue, the reporter will be asked to
+If it is deemed a regular, non-security issue, the reporter will be asked to
follow typical bug reporting procedures.
In addition to the disclosure timeline, the core Canonical cloud-init team
diff --git a/cloudinit/cmd/clean.py b/cloudinit/cmd/clean.py
index 65d3eece..5a61eac5 100755
--- a/cloudinit/cmd/clean.py
+++ b/cloudinit/cmd/clean.py
@@ -12,6 +12,7 @@ import os
import sys
from cloudinit import settings
+from cloudinit.distros import uses_systemd
from cloudinit.stages import Init
from cloudinit.subp import ProcessExecutionError, runparts, subp
from cloudinit.util import (
@@ -20,6 +21,7 @@ from cloudinit.util import (
error,
get_config_logfiles,
is_link,
+ write_file,
)
ETC_MACHINE_ID = "/etc/machine-id"
@@ -55,8 +57,9 @@ def get_parser(parser=None):
action="store_true",
default=False,
help=(
- "Remove /etc/machine-id for golden image creation."
- " Next boot generates a new machine-id."
+ "Set /etc/machine-id to 'uninitialized\n' for golden image"
+ "creation. On next boot, systemd generates a new machine-id."
+ " Remove /etc/machine-id on non-systemd environments."
),
)
parser.add_argument(
@@ -120,7 +123,12 @@ def handle_clean_args(name, args):
"""Handle calls to 'cloud-init clean' as a subcommand."""
exit_code = remove_artifacts(args.remove_logs, args.remove_seed)
if args.machine_id:
- del_file(ETC_MACHINE_ID)
+ if uses_systemd():
+ # Systemd v237 and later will create a new machine-id on next boot
+ write_file(ETC_MACHINE_ID, "uninitialized\n", mode=0o444)
+ else:
+ # Non-systemd like FreeBSD regen machine-id when file is absent
+ del_file(ETC_MACHINE_ID)
if exit_code == 0 and args.reboot:
cmd = ["shutdown", "-r", "now"]
try:
diff --git a/cloudinit/cmd/devel/net_convert.py b/cloudinit/cmd/devel/net_convert.py
index 269d72cd..eee49860 100755
--- a/cloudinit/cmd/devel/net_convert.py
+++ b/cloudinit/cmd/devel/net_convert.py
@@ -19,8 +19,8 @@ from cloudinit.net import (
sysconfig,
)
from cloudinit.sources import DataSourceAzure as azure
-from cloudinit.sources import DataSourceOVF as ovf
from cloudinit.sources.helpers import openstack
+from cloudinit.sources.helpers.vmware.imc import guestcust_util
NAME = "net-convert"
@@ -130,8 +130,12 @@ def handle_args(name, args):
json.loads(net_data)["network"]
)
elif args.kind == "vmware-imc":
- config = ovf.Config(ovf.ConfigFile(args.network_data.name))
- pre_ns = ovf.get_network_config_from_conf(config, False)
+ config = guestcust_util.Config(
+ guestcust_util.ConfigFile(args.network_data.name)
+ )
+ pre_ns = guestcust_util.get_network_data_from_vmware_cust_cfg(
+ config, False
+ )
distro_cls = distros.fetch(args.distro)
distro = distro_cls(args.distro, {}, None)
diff --git a/cloudinit/cmd/status.py b/cloudinit/cmd/status.py
index df136288..e1c37a78 100644
--- a/cloudinit/cmd/status.py
+++ b/cloudinit/cmd/status.py
@@ -13,11 +13,12 @@ import json
import os
import sys
from time import gmtime, sleep, strftime
-from typing import Any, Dict, List, NamedTuple, Tuple, Union
+from typing import Any, Dict, List, NamedTuple, Optional, Tuple, Union
from cloudinit import safeyaml
from cloudinit.cmd.devel import read_cfg_paths
from cloudinit.distros import uses_systemd
+from cloudinit.helpers import Paths
from cloudinit.util import get_cmdline, load_file, load_json
CLOUDINIT_DISABLED_FILE = "/etc/cloud/cloud-init.disabled"
@@ -63,7 +64,7 @@ class StatusDetails(NamedTuple):
description: str
errors: List[str]
last_update: str
- datasource: str
+ datasource: Optional[str]
TABULAR_LONG_TMPL = """\
@@ -124,7 +125,7 @@ def handle_status_args(name, args) -> int:
sys.stdout.flush()
details = get_status_details(paths)
sleep(0.25)
- details_dict: Dict[str, Union[str, List[str], Dict[str, Any]]] = {
+ details_dict: Dict[str, Union[None, str, List[str], Dict[str, Any]]] = {
"datasource": details.datasource,
"boot_status_code": details.boot_status_code.value,
"status": details.status.value,
@@ -195,7 +196,7 @@ def get_bootstatus(disable_file, paths) -> Tuple[UXAppBootStatusCode, str]:
return (bootstatus_code, reason)
-def get_status_details(paths=None) -> StatusDetails:
+def get_status_details(paths: Optional[Paths] = None) -> StatusDetails:
"""Return a dict with status, details and errors.
@param paths: An initialized cloudinit.helpers.paths object.
@@ -206,7 +207,7 @@ def get_status_details(paths=None) -> StatusDetails:
status = UXAppStatus.NOT_RUN
errors = []
- datasource = ""
+ datasource: Optional[str] = ""
status_v1 = {}
status_file = os.path.join(paths.run_dir, "status.json")
@@ -228,9 +229,14 @@ def get_status_details(paths=None) -> StatusDetails:
status = UXAppStatus.RUNNING
description = "Running in stage: {0}".format(value)
elif key == "datasource":
+ if value is None:
+ # If ds not yet written in status.json, then keep previous
+ # description
+ datasource = value
+ continue
description = value
- datasource, _, _ = value.partition(" ")
- datasource = datasource.lower().replace("datasource", "")
+ ds, _, _ = value.partition(" ")
+ datasource = ds.lower().replace("datasource", "")
elif isinstance(value, dict):
errors.extend(value.get("errors", []))
start = value.get("start") or 0
diff --git a/cloudinit/config/cc_ansible.py b/cloudinit/config/cc_ansible.py
index d8fee517..876dbc6b 100644
--- a/cloudinit/config/cc_ansible.py
+++ b/cloudinit/config/cc_ansible.py
@@ -132,7 +132,7 @@ class AnsiblePullPip(AnsiblePull):
if not self.is_installed():
# bootstrap pip if required
try:
- import pip # type: ignore # noqa: F401
+ import pip # noqa: F401
except ImportError:
self.distro.install_packages(self.distro.pip_package_name)
cmd = [sys.executable, "-m", "pip", "install"]
diff --git a/cloudinit/config/cc_ca_certs.py b/cloudinit/config/cc_ca_certs.py
index 302a67a4..169b0e18 100644
--- a/cloudinit/config/cc_ca_certs.py
+++ b/cloudinit/config/cc_ca_certs.py
@@ -8,45 +8,47 @@ import os
from logging import Logger
from textwrap import dedent
+from cloudinit import log as logging
from cloudinit import subp, util
from cloudinit.cloud import Cloud
from cloudinit.config import Config
from cloudinit.config.schema import MetaSchema, get_meta_doc
from cloudinit.settings import PER_INSTANCE
+LOG = logging.getLogger(__name__)
+
DEFAULT_CONFIG = {
- "ca_cert_path": "/usr/share/ca-certificates/",
- "ca_cert_filename": "cloud-init-ca-certs.crt",
+ "ca_cert_path": None,
+ "ca_cert_local_path": "/usr/local/share/ca-certificates/",
+ "ca_cert_filename": "cloud-init-ca-cert-{cert_index}.crt",
"ca_cert_config": "/etc/ca-certificates.conf",
- "ca_cert_system_path": "/etc/ssl/certs/",
"ca_cert_update_cmd": ["update-ca-certificates"],
}
DISTRO_OVERRIDES = {
"rhel": {
- "ca_cert_path": "/usr/share/pki/ca-trust-source/",
- "ca_cert_filename": "anchors/cloud-init-ca-certs.crt",
+ "ca_cert_path": "/etc/pki/ca-trust/",
+ "ca_cert_local_path": "/usr/share/pki/ca-trust-source/",
+ "ca_cert_filename": "anchors/cloud-init-ca-cert-{cert_index}.crt",
"ca_cert_config": None,
- "ca_cert_system_path": "/etc/pki/ca-trust/",
"ca_cert_update_cmd": ["update-ca-trust"],
- }
+ },
}
MODULE_DESCRIPTION = """\
-This module adds CA certificates to ``/etc/ca-certificates.conf`` and updates
-the ssl cert cache using ``update-ca-certificates``. The default certificates
-can be removed from the system with the configuration option
-``remove_defaults``.
+This module adds CA certificates to the system's CA store and updates any
+related files using the appropriate OS-specific utility. The default CA
+certificates can be disabled/deleted from use by the system with the
+configuration option ``remove_defaults``.
.. note::
certificates must be specified using valid yaml. in order to specify a
multiline certificate, the yaml multiline list syntax must be used
.. note::
- For Alpine Linux the "remove_defaults" functionality works if the
- ca-certificates package is installed but not if the
- ca-certificates-bundle package is installed.
+ Alpine Linux requires the ca-certificates package to be installed in
+ order to provide the ``update-ca-certificates`` command.
"""
-distros = ["alpine", "debian", "ubuntu", "rhel"]
+distros = ["alpine", "debian", "rhel", "ubuntu"]
meta: MetaSchema = {
"id": "cc_ca_certs",
@@ -79,11 +81,11 @@ def _distro_ca_certs_configs(distro_name):
"""Return a distro-specific ca_certs config dictionary
@param distro_name: String providing the distro class name.
- @returns: Dict of distro configurations for ca-cert.
+ @returns: Dict of distro configurations for ca_cert.
"""
cfg = DISTRO_OVERRIDES.get(distro_name, DEFAULT_CONFIG)
cfg["ca_cert_full_path"] = os.path.join(
- cfg["ca_cert_path"], cfg["ca_cert_filename"]
+ cfg["ca_cert_local_path"], cfg["ca_cert_filename"]
)
return cfg
@@ -100,124 +102,145 @@ def update_ca_certs(distro_cfg):
def add_ca_certs(distro_cfg, certs):
"""
Adds certificates to the system. To actually apply the new certificates
- you must also call L{update_ca_certs}.
+ you must also call the appropriate distro-specific utility such as
+ L{update_ca_certs}.
@param distro_cfg: A hash providing _distro_ca_certs_configs function.
@param certs: A list of certificate strings.
"""
if not certs:
return
- # First ensure they are strings...
- cert_file_contents = "\n".join([str(c) for c in certs])
- util.write_file(
- distro_cfg["ca_cert_full_path"], cert_file_contents, mode=0o644
- )
- update_cert_config(distro_cfg)
+ # Write each certificate to a separate file.
+ for cert_index, c in enumerate(certs, 1):
+ # First ensure they are strings...
+ cert_file_contents = str(c)
+ cert_file_name = distro_cfg["ca_cert_full_path"].format(
+ cert_index=cert_index
+ )
+ util.write_file(cert_file_name, cert_file_contents, mode=0o644)
-def update_cert_config(distro_cfg):
+def disable_default_ca_certs(distro_name, distro_cfg):
"""
- Update Certificate config file to add the file path managed cloud-init
+ Disables all default trusted CA certificates. For Alpine, Debian and
+ Ubuntu to actually apply the changes you must also call
+ L{update_ca_certs}.
+
+ @param distro_name: String providing the distro class name.
+ @param distro_cfg: A hash providing _distro_ca_certs_configs function.
+ """
+ if distro_name == "rhel":
+ remove_default_ca_certs(distro_cfg)
+ elif distro_name in ["alpine", "debian", "ubuntu"]:
+ disable_system_ca_certs(distro_cfg)
+
+ if distro_name in ["debian", "ubuntu"]:
+ debconf_sel = (
+ "ca-certificates ca-certificates/trust_new_crts " + "select no"
+ )
+ subp.subp(("debconf-set-selections", "-"), debconf_sel)
+
+
+def disable_system_ca_certs(distro_cfg):
+ """
+ For every entry in the CA_CERT_CONFIG file prefix the entry with a "!"
+ in order to disable it.
@param distro_cfg: A hash providing _distro_ca_certs_configs function.
"""
if distro_cfg["ca_cert_config"] is None:
return
- if os.stat(distro_cfg["ca_cert_config"]).st_size == 0:
- # If the CA_CERT_CONFIG file is empty (i.e. all existing
- # CA certs have been deleted) then simply output a single
- # line with the cloud-init cert filename.
- out = "%s\n" % distro_cfg["ca_cert_filename"]
- else:
- # Append cert filename to CA_CERT_CONFIG file.
- # We have to strip the content because blank lines in the file
- # causes subsequent entries to be ignored. (LP: #1077020)
+ header_comment = (
+ "# Modified by cloud-init to deselect certs due to user-data"
+ )
+ added_header = False
+ if os.stat(distro_cfg["ca_cert_config"]).st_size != 0:
orig = util.load_file(distro_cfg["ca_cert_config"])
- cr_cont = "\n".join(
- [
- line
- for line in orig.splitlines()
- if line != distro_cfg["ca_cert_filename"]
- ]
- )
- out = "%s\n%s\n" % (cr_cont.rstrip(), distro_cfg["ca_cert_filename"])
- util.write_file(distro_cfg["ca_cert_config"], out, omode="wb")
+ out_lines = []
+ for line in orig.splitlines():
+ if line == header_comment:
+ added_header = True
+ out_lines.append(line)
+ elif line == "" or line[0] in ("#", "!"):
+ out_lines.append(line)
+ else:
+ if not added_header:
+ out_lines.append(header_comment)
+ added_header = True
+ out_lines.append("!" + line)
+ util.write_file(
+ distro_cfg["ca_cert_config"], "\n".join(out_lines) + "\n", omode="wb"
+ )
-def remove_default_ca_certs(distro_name, distro_cfg):
+def remove_default_ca_certs(distro_cfg):
"""
- Removes all default trusted CA certificates from the system. To actually
- apply the change you must also call L{update_ca_certs}.
+ Removes all default trusted CA certificates from the system.
- @param distro_name: String providing the distro class name.
@param distro_cfg: A hash providing _distro_ca_certs_configs function.
"""
- util.delete_dir_contents(distro_cfg["ca_cert_path"])
- util.delete_dir_contents(distro_cfg["ca_cert_system_path"])
- util.write_file(distro_cfg["ca_cert_config"], "", mode=0o644)
+ if distro_cfg["ca_cert_path"] is None:
+ return
- if distro_name in ["debian", "ubuntu"]:
- debconf_sel = (
- "ca-certificates ca-certificates/trust_new_crts " + "select no"
- )
- subp.subp(("debconf-set-selections", "-"), debconf_sel)
+ LOG.debug("Deleting system CA certificates")
+ util.delete_dir_contents(distro_cfg["ca_cert_path"])
+ util.delete_dir_contents(distro_cfg["ca_cert_local_path"])
def handle(
name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
) -> None:
"""
- Call to handle ca-cert sections in cloud-config file.
+ Call to handle ca_cert sections in cloud-config file.
- @param name: The module name "ca-cert" from cloud.cfg
+ @param name: The module name "ca_cert" from cloud.cfg
@param cfg: A nested dict containing the entire cloud config contents.
@param cloud: The L{CloudInit} object in use.
@param log: Pre-initialized Python logger object to use for logging.
@param args: Any module arguments from cloud.cfg
"""
if "ca-certs" in cfg:
- log.warning(
+ LOG.warning(
"DEPRECATION: key 'ca-certs' is now deprecated. Use 'ca_certs'"
" instead."
)
elif "ca_certs" not in cfg:
- log.debug(
+ LOG.debug(
"Skipping module named %s, no 'ca_certs' key in configuration",
name,
)
return
if "ca-certs" in cfg and "ca_certs" in cfg:
- log.warning(
+ LOG.warning(
"Found both ca-certs (deprecated) and ca_certs config keys."
" Ignoring ca-certs."
)
ca_cert_cfg = cfg.get("ca_certs", cfg.get("ca-certs"))
distro_cfg = _distro_ca_certs_configs(cloud.distro.name)
- # If there is a remove_defaults option set to true, remove the system
+ # If there is a remove_defaults option set to true, disable the system
# default trusted CA certs first.
if "remove-defaults" in ca_cert_cfg:
- log.warning(
+ LOG.warning(
"DEPRECATION: key 'ca-certs.remove-defaults' is now deprecated."
" Use 'ca_certs.remove_defaults' instead."
)
- if ca_cert_cfg.get("remove-defaults", False):
- log.debug("Removing default certificates")
- remove_default_ca_certs(cloud.distro.name, distro_cfg)
- elif ca_cert_cfg.get("remove_defaults", False):
- log.debug("Removing default certificates")
- remove_default_ca_certs(cloud.distro.name, distro_cfg)
+ if ca_cert_cfg.get(
+ "remove_defaults", ca_cert_cfg.get("remove-defaults", False)
+ ):
+ LOG.debug("Disabling/removing default certificates")
+ disable_default_ca_certs(cloud.distro.name, distro_cfg)
# If we are given any new trusted CA certs to add, add them.
if "trusted" in ca_cert_cfg:
trusted_certs = util.get_cfg_option_list(ca_cert_cfg, "trusted")
if trusted_certs:
- log.debug("Adding %d certificates" % len(trusted_certs))
+ LOG.debug("Adding %d certificates", len(trusted_certs))
add_ca_certs(distro_cfg, trusted_certs)
# Update the system with the new cert configuration.
- log.debug("Updating certificates")
+ LOG.debug("Updating certificates")
update_ca_certs(distro_cfg)
diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py
index 71d52d3d..4aae5530 100644
--- a/cloudinit/config/cc_disk_setup.py
+++ b/cloudinit/config/cc_disk_setup.py
@@ -80,6 +80,10 @@ meta: MetaSchema = {
table_type: gpt
layout: [[100, 82]]
overwrite: true
+ /dev/sdd:
+ table_type: mbr
+ layout: true
+ overwrite: true
fs_setup:
- label: fs1
filesystem: ext4
@@ -91,10 +95,14 @@ meta: MetaSchema = {
- label: swap
device: swap_disk.1
filesystem: swap
+ - label: fs3
+ device: /dev/sdd1
+ filesystem: ext4
mounts:
- ["my_alias.1", "/mnt1"]
- ["my_alias.2", "/mnt2"]
- ["swap_disk.1", "none", "swap", "sw", "0", "0"]
+ - ["/dev/sdd1", "/mnt3"]
"""
)
],
@@ -431,33 +439,6 @@ def is_disk_used(device):
return False
-def get_dyn_func(*args):
- """
- Call the appropriate function.
-
- The first value is the template for function name
- The second value is the template replacement
- The remain values are passed to the function
-
- For example: get_dyn_func("foo_%s", 'bar', 1, 2, 3,)
- would call "foo_bar" with args of 1, 2, 3
- """
- if len(args) < 2:
- raise Exception("Unable to determine dynamic funcation name")
-
- func_name = args[0] % args[1]
- func_args = args[2:]
-
- try:
- if func_args:
- return globals()[func_name](*func_args)
- else:
- return globals()[func_name]
-
- except KeyError as e:
- raise Exception("No such function %s to call!" % func_name) from e
-
-
def get_hdd_size(device):
try:
size_in_bytes, _ = subp.subp([BLKDEV_CMD, "--getsize64", device])
@@ -556,9 +537,12 @@ def check_partition_layout(table_type, device, layout):
to add support for other disk layout schemes, add a
function called check_partition_%s_layout
"""
- found_layout = get_dyn_func(
- "check_partition_%s_layout", table_type, device, layout
- )
+ if "gpt" == table_type:
+ found_layout = check_partition_gpt_layout(device, layout)
+ elif "mbr" == table_type:
+ found_layout = check_partition_mbr_layout(device, layout)
+ else:
+ raise Exception("Unable to determine table type")
LOG.debug(
"called check_partition_%s_layout(%s, %s), returned: %s",
@@ -605,8 +589,8 @@ def get_partition_mbr_layout(size, layout):
"""
if not isinstance(layout, list) and isinstance(layout, bool):
- # Create a single partition
- return "0,"
+ # Create a single partition, default to Linux
+ return ",,83"
if (len(layout) == 0 and isinstance(layout, list)) or not isinstance(
layout, list
@@ -673,7 +657,7 @@ def get_partition_gpt_layout(size, layout):
def purge_disk_ptable(device):
# wipe the first and last megabyte of a disk (or file)
# gpt stores partition table both at front and at end.
- null = "\0"
+ null = b"\0"
start_len = 1024 * 1024
end_len = 1024 * 1024
with open(device, "rb+") as fp:
@@ -714,7 +698,11 @@ def get_partition_layout(table_type, size, layout):
other layouts, simply add a "get_partition_%s_layout"
function.
"""
- return get_dyn_func("get_partition_%s_layout", table_type, size, layout)
+ if "mbr" == table_type:
+ return get_partition_mbr_layout(size, layout)
+ elif "gpt" == table_type:
+ return get_partition_gpt_layout(size, layout)
+ raise Exception("Unable to determine table type")
def read_parttbl(device):
@@ -741,7 +729,7 @@ def exec_mkpart_mbr(device, layout):
types, i.e. gpt
"""
# Create the partitions
- prt_cmd = [SFDISK_CMD, "--Linux", "--unit=S", "--force", device]
+ prt_cmd = [SFDISK_CMD, "--force", device]
try:
subp.subp(prt_cmd, data="%s\n" % layout)
except Exception as e:
@@ -779,19 +767,6 @@ def exec_mkpart_gpt(device, layout):
read_parttbl(device)
-def exec_mkpart(table_type, device, layout):
- """
- Fetches the function for creating the table type.
- This allows to dynamically find which function to call.
-
- Paramaters:
- table_type: type of partition table to use
- device: the device to work on
- layout: layout definition specific to partition table
- """
- return get_dyn_func("exec_mkpart_%s", table_type, device, layout)
-
-
def assert_and_settle_device(device):
"""Assert that device exists and settle so it is fully recognized."""
if not os.path.exists(device):
@@ -869,7 +844,12 @@ def mkpart(device, definition):
LOG.debug(" Layout is: %s", part_definition)
LOG.debug("Creating partition table on %s", device)
- exec_mkpart(table_type, device, part_definition)
+ if "mbr" == table_type:
+ exec_mkpart_mbr(device, part_definition)
+ elif "gpt" == table_type:
+ exec_mkpart_gpt(device, part_definition)
+ else:
+ raise Exception("Unable to determine table type")
LOG.debug("Partition table created for %s", device)
@@ -968,7 +948,7 @@ def mkfs(fs_cfg):
odevice = device
LOG.debug("Identifying device to create %s filesytem on", label)
- # any mean pick the first match on the device with matching fs_type
+ # 'any' means pick the first match on the device with matching fs_type
label_match = True
if partition.lower() == "any":
label_match = False
diff --git a/cloudinit/config/cc_ntp.py b/cloudinit/config/cc_ntp.py
index 8ecc4eb8..b5620f37 100644
--- a/cloudinit/config/cc_ntp.py
+++ b/cloudinit/config/cc_ntp.py
@@ -37,12 +37,19 @@ distros = [
"miraclelinux",
"openbsd",
"openEuler",
+ "OpenCloudOS",
"openmandriva",
"opensuse",
+ "opensuse-microos",
+ "opensuse-tumbleweed",
+ "opensuse-leap",
"photon",
"rhel",
"rocky",
+ "sle_hpc",
+ "sle-micro",
"sles",
+ "TencentOS",
"ubuntu",
"virtuozzo",
]
@@ -215,6 +222,11 @@ DISTRO_CLIENT_CONFIG = {
},
}
+for distro in ("opensuse-microos", "opensuse-tumbleweed", "opensuse-leap"):
+ DISTRO_CLIENT_CONFIG[distro] = DISTRO_CLIENT_CONFIG["opensuse"]
+
+for distro in ("sle_hpc", "sle-micro"):
+ DISTRO_CLIENT_CONFIG[distro] = DISTRO_CLIENT_CONFIG["sles"]
# The schema definition for each cloud-config module is a strict contract for
# describing supported configuration parameters for each cloud-config section.
diff --git a/cloudinit/config/cc_puppet.py b/cloudinit/config/cc_puppet.py
index b8a9fe17..38c2cc99 100644
--- a/cloudinit/config/cc_puppet.py
+++ b/cloudinit/config/cc_puppet.py
@@ -25,6 +25,7 @@ from cloudinit.settings import PER_INSTANCE
AIO_INSTALL_URL = "https://raw.githubusercontent.com/puppetlabs/install-puppet/main/install.sh" # noqa: E501
PUPPET_AGENT_DEFAULT_ARGS = ["--test"]
+PUPPET_PACKAGE_NAMES = ("puppet-agent", "puppet")
MODULE_DESCRIPTION = """\
This module handles puppet installation and configuration. If the ``puppet``
@@ -118,26 +119,21 @@ class PuppetConstants:
self.csr_attributes_path = csr_attributes_path
-def _autostart_puppet(log):
- # Set puppet to automatically start
- if os.path.exists("/etc/default/puppet"):
- subp.subp(
- [
- "sed",
- "-i",
- "-e",
- "s/^START=.*/START=yes/",
- "/etc/default/puppet",
- ],
- capture=False,
- )
- elif subp.which("systemctl"):
- subp.subp(["systemctl", "enable", "puppet.service"], capture=False)
- elif os.path.exists("/sbin/chkconfig"):
- subp.subp(["/sbin/chkconfig", "puppet", "on"], capture=False)
- else:
+def _manage_puppet_services(log, cloud: Cloud, action: str):
+ """Attempts to perform action on one of the puppet services"""
+ service_managed: str = ""
+ for puppet_name in PUPPET_PACKAGE_NAMES:
+ try:
+ cloud.distro.manage_service(action, f"{puppet_name}.service")
+ service_managed = puppet_name
+ break
+ except subp.ProcessExecutionError:
+ pass
+ if not service_managed:
log.warning(
- "Sorry we do not know how to enable puppet services on this system"
+ "Could not '%s' any of the following services: %s",
+ action,
+ ", ".join(PUPPET_PACKAGE_NAMES),
)
@@ -221,7 +217,7 @@ def handle(
else: # default to 'packages'
puppet_user = "puppet"
puppet_bin = "puppet"
- puppet_package = "puppet"
+ puppet_package = None # changes with distro
package_name = util.get_cfg_option_str(
puppet_cfg, "package_name", puppet_package
@@ -238,7 +234,22 @@ def handle(
)
if install_type == "packages":
- cloud.distro.install_packages((package_name, version))
+ if package_name is None: # conf has no package_nam
+ for puppet_name in PUPPET_PACKAGE_NAMES:
+ try:
+ cloud.distro.install_packages((puppet_name, version))
+ package_name = puppet_name
+ break
+ except subp.ProcessExecutionError:
+ pass
+ if not package_name:
+ log.warning(
+ "No installable puppet package in any of: %s",
+ ", ".join(PUPPET_PACKAGE_NAMES),
+ )
+ else:
+ cloud.distro.install_packages((package_name, version))
+
elif install_type == "aio":
install_puppet_aio(
cloud.distro, aio_install_url, version, collection, cleanup
@@ -316,9 +327,9 @@ def handle(
yaml.dump(puppet_cfg["csr_attributes"], default_flow_style=False),
)
- # Set it up so it autostarts
if start_puppetd:
- _autostart_puppet(log)
+ # Enables the services
+ _manage_puppet_services(log, cloud, "enable")
# Run the agent if needed
if run:
@@ -344,7 +355,7 @@ def handle(
if start_puppetd:
# Start puppetd
- subp.subp(["service", "puppet", "start"], capture=False)
+ _manage_puppet_services(log, cloud, "start")
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py
index 7a0ecf96..0e6197a2 100644
--- a/cloudinit/config/cc_resizefs.py
+++ b/cloudinit/config/cc_resizefs.py
@@ -60,15 +60,28 @@ def _resize_btrfs(mount_point, devpth):
if not util.mount_is_read_write(mount_point) and os.path.isdir(
"%s/.snapshots" % mount_point
):
- return (
+ cmd = [
"btrfs",
"filesystem",
"resize",
"max",
"%s/.snapshots" % mount_point,
- )
+ ]
else:
- return ("btrfs", "filesystem", "resize", "max", mount_point)
+ cmd = ["btrfs", "filesystem", "resize", "max", mount_point]
+
+ # btrfs has exclusive operations and resize may fail if btrfs is busy
+ # doing one of the operations that prevents resize. As of btrfs 5.10
+ # the resize operation can be queued
+ btrfs_with_queue = util.Version.from_str("5.10")
+ system_btrfs_ver = util.Version.from_str(
+ subp.subp(["btrfs", "--version"])[0].split("v")[-1].strip()
+ )
+ if system_btrfs_ver >= btrfs_with_queue:
+ idx = cmd.index("resize")
+ cmd.insert(idx + 1, "--enqueue")
+
+ return tuple(cmd)
def _resize_ext(mount_point, devpth):
diff --git a/cloudinit/config/cc_resolv_conf.py b/cloudinit/config/cc_resolv_conf.py
index 8dbed71e..4629ca7d 100644
--- a/cloudinit/config/cc_resolv_conf.py
+++ b/cloudinit/config/cc_resolv_conf.py
@@ -26,17 +26,23 @@ RESOLVE_CONFIG_TEMPLATE_MAP = {
}
MODULE_DESCRIPTION = """\
+Unless manually editing :file:`/etc/resolv.conf` is the correct way to manage
+nameserver information on your operating system, you do not want to use
+this module. Many distros have moved away from manually editing ``resolv.conf``
+so please verify that this is the preferred nameserver management method for
+your distro before using this module.
+
+Note that using :ref:`network_config` is preferred, rather than using this
+module, when possible.
+
This module is intended to manage resolv.conf in environments where early
configuration of resolv.conf is necessary for further bootstrapping and/or
where configuration management such as puppet or chef own DNS configuration.
-As Debian/Ubuntu will, by default, utilize resolvconf, and similarly Red Hat
-will use sysconfig, this module is likely to be of little use unless those
-are configured correctly.
When using a :ref:`datasource_config_drive` and a RHEL-like system,
resolv.conf will also be managed automatically due to the available
information provided for DNS servers in the :ref:`network_config_v2` format.
-For those that with to have different settings, use this module.
+For those that wish to have different settings, use this module.
In order for the ``resolv_conf`` section to be applied, ``manage_resolv_conf``
must be set ``true``.
@@ -44,10 +50,6 @@ must be set ``true``.
.. note::
For Red Hat with sysconfig, be sure to set PEERDNS=no for all DHCP
enabled NICs.
-
-.. note::
- And, in Ubuntu/Debian it is recommended that DNS be configured via the
- standard /etc/network/interfaces configuration file.
"""
meta: MetaSchema = {
@@ -60,8 +62,13 @@ meta: MetaSchema = {
"fedora",
"mariner",
"opensuse",
+ "opensuse-leap",
+ "opensuse-microos",
+ "opensuse-tumbleweed",
"photon",
"rhel",
+ "sle_hpc",
+ "sle-micro",
"sles",
],
"frequency": PER_INSTANCE,
diff --git a/cloudinit/config/cc_runcmd.py b/cloudinit/config/cc_runcmd.py
index 464198c4..27c0429b 100644
--- a/cloudinit/config/cc_runcmd.py
+++ b/cloudinit/config/cc_runcmd.py
@@ -38,7 +38,7 @@ how it is executed:
Note that the ``runcmd`` module only writes the script to be run
later. The module that actually runs the script is ``scripts-user``
-in the :ref:`topics/boot:Final` boot stage.
+in the :ref:`Final<boot-Final>` boot stage.
.. note::
diff --git a/cloudinit/config/cc_set_hostname.py b/cloudinit/config/cc_set_hostname.py
index c0bda6fe..fa5c023c 100644
--- a/cloudinit/config/cc_set_hostname.py
+++ b/cloudinit/config/cc_set_hostname.py
@@ -107,7 +107,7 @@ def handle(
# distro._read_hostname implementation so we only validate one artifact.
prev_fn = os.path.join(cloud.get_cpath("data"), "set-hostname")
prev_hostname = {}
- if os.path.exists(prev_fn):
+ if os.path.exists(prev_fn) and os.stat(prev_fn).st_size > 0:
prev_hostname = util.load_json(util.load_file(prev_fn))
hostname_changed = hostname != prev_hostname.get(
"hostname"
diff --git a/cloudinit/config/cc_set_passwords.py b/cloudinit/config/cc_set_passwords.py
index 539887c5..3a0b3f5b 100644
--- a/cloudinit/config/cc_set_passwords.py
+++ b/cloudinit/config/cc_set_passwords.py
@@ -108,6 +108,18 @@ def get_users_by_type(users_list: list, pw_type: str) -> list:
)
+def _restart_ssh_daemon(distro, service):
+ try:
+ distro.manage_service("restart", service)
+ LOG.debug("Restarted the SSH daemon.")
+ except subp.ProcessExecutionError as e:
+ LOG.warning(
+ "'ssh_pwauth' configuration may not be applied. Cloud-init was "
+ "unable to restart SSH daemon due to error: '%s'",
+ e,
+ )
+
+
def handle_ssh_pwauth(pw_auth, distro: Distro):
"""Apply sshd PasswordAuthentication changes.
@@ -117,47 +129,6 @@ def handle_ssh_pwauth(pw_auth, distro: Distro):
@return: None"""
service = distro.get_option("ssh_svcname", "ssh")
- restart_ssh = True
- try:
- distro.manage_service("status", service)
- except subp.ProcessExecutionError as e:
- uses_systemd = distro.uses_systemd()
- if not uses_systemd:
- LOG.debug(
- "Writing config 'ssh_pwauth: %s'. SSH service '%s'"
- " will not be restarted because it is not running or not"
- " available.",
- pw_auth,
- service,
- )
- restart_ssh = False
- elif e.exit_code == 3:
- # Service is not running. Write ssh config.
- LOG.debug(
- "Writing config 'ssh_pwauth: %s'. SSH service '%s'"
- " will not be restarted because it is stopped.",
- pw_auth,
- service,
- )
- restart_ssh = False
- elif e.exit_code == 4:
- # Service status is unknown
- LOG.warning(
- "Ignoring config 'ssh_pwauth: %s'."
- " SSH service '%s' is not installed.",
- pw_auth,
- service,
- )
- return
- else:
- LOG.warning(
- "Ignoring config 'ssh_pwauth: %s'."
- " SSH service '%s' is not available. Error: %s.",
- pw_auth,
- service,
- e,
- )
- return
cfg_name = "PasswordAuthentication"
@@ -184,11 +155,21 @@ def handle_ssh_pwauth(pw_auth, distro: Distro):
LOG.debug("No need to restart SSH service, %s not updated.", cfg_name)
return
- if restart_ssh:
- distro.manage_service("restart", service)
- LOG.debug("Restarted the SSH daemon.")
+ if distro.uses_systemd():
+ state = subp.subp(
+ [
+ "systemctl",
+ "show",
+ "--property",
+ "ActiveState",
+ "--value",
+ service,
+ ]
+ ).stdout.strip()
+ if state.lower() in ["active", "activating", "reloading"]:
+ _restart_ssh_daemon(distro, service)
else:
- LOG.debug("Not restarting SSH service: service is stopped.")
+ _restart_ssh_daemon(distro, service)
def handle(
diff --git a/cloudinit/config/cc_ssh.py b/cloudinit/config/cc_ssh.py
index c9e59d16..1ec889f3 100644
--- a/cloudinit/config/cc_ssh.py
+++ b/cloudinit/config/cc_ssh.py
@@ -187,8 +187,8 @@ for k in GENERATE_KEY_NAMES:
CONFIG_KEY_TO_FILE.update(
{
f"{k}_private": (KEY_FILE_TPL % k, 0o600),
- f"{k}_public": (f"{KEY_FILE_TPL % k}.pub", 0o600),
- f"{k}_certificate": (f"{KEY_FILE_TPL % k}-cert.pub", 0o600),
+ f"{k}_public": (f"{KEY_FILE_TPL % k}.pub", 0o644),
+ f"{k}_certificate": (f"{KEY_FILE_TPL % k}-cert.pub", 0o644),
}
)
PRIV_TO_PUB[f"{k}_private"] = f"{k}_public"
@@ -211,6 +211,7 @@ def handle(
if "ssh_keys" in cfg:
# if there are keys and/or certificates in cloud-config, use them
+ cert_config = []
for (key, val) in cfg["ssh_keys"].items():
if key not in CONFIG_KEY_TO_FILE:
if pattern_unsupported_config_keys.match(key):
@@ -224,8 +225,10 @@ def handle(
util.write_file(tgt_fn, val, tgt_perms)
# set server to present the most recently identified certificate
if "_certificate" in key:
- cert_config = {"HostCertificate": tgt_fn}
- ssh_util.update_ssh_config(cert_config)
+ cert_config.append(("HostCertificate", str(tgt_fn)))
+
+ if cert_config:
+ ssh_util.append_ssh_config(cert_config)
for private_type, public_type in PRIV_TO_PUB.items():
if (
diff --git a/cloudinit/config/cc_wireguard.py b/cloudinit/config/cc_wireguard.py
index 850c5a4f..732440f0 100644
--- a/cloudinit/config/cc_wireguard.py
+++ b/cloudinit/config/cc_wireguard.py
@@ -163,7 +163,7 @@ def enable_wg(wg_int: dict, cloud: Cloud):
LOG.debug("Enabling wg-quick@%s at boot", wg_int["name"])
cloud.distro.manage_service("enable", f'wg-quick@{wg_int["name"]}')
LOG.debug("Bringing up interface wg-quick@%s", wg_int["name"])
- cloud.distro.manage_service("start", f'wg-quick@{wg_int["name"]}')
+ cloud.distro.manage_service("restart", f'wg-quick@{wg_int["name"]}')
except subp.ProcessExecutionError as e:
raise RuntimeError(
f"Failed enabling/starting Wireguard interface(s):{NL}{str(e)}"
diff --git a/cloudinit/config/cc_write_files.py b/cloudinit/config/cc_write_files.py
index f7d89935..a517d044 100644
--- a/cloudinit/config/cc_write_files.py
+++ b/cloudinit/config/cc_write_files.py
@@ -37,15 +37,18 @@ meta: MetaSchema = {
before being written. For empty file creation, content can be omitted.
.. note::
- if multiline data is provided, care should be taken to ensure that it
- follows yaml formatting standards. to specify binary data, use the yaml
+ If multiline data is provided, care should be taken to ensure that it
+ follows yaml formatting standards. To specify binary data, use the yaml
option ``!!binary``
.. note::
Do not write files under /tmp during boot because of a race with
systemd-tmpfiles-clean that can cause temp files to get cleaned during
the early boot process. Use /run/somedir instead to avoid race
- LP:1707222."""
+ LP:1707222.
+
+ .. warning::
+ Existing files will be overridden."""
),
"distros": ["all"],
"examples": [
@@ -179,7 +182,9 @@ def write_files(name, files, owner: str):
(u, g) = util.extract_usergroup(f_info.get("owner", owner))
perms = decode_perms(f_info.get("permissions"), DEFAULT_PERMS)
omode = "ab" if util.get_cfg_option_bool(f_info, "append") else "wb"
- util.write_file(path, contents, omode=omode, mode=perms)
+ util.write_file(
+ path, contents, omode=omode, mode=perms, user=u, group=g
+ )
util.chownbyname(path, u, g)
diff --git a/cloudinit/config/cc_yum_add_repo.py b/cloudinit/config/cc_yum_add_repo.py
index cf81b844..3087b22c 100644
--- a/cloudinit/config/cc_yum_add_repo.py
+++ b/cloudinit/config/cc_yum_add_repo.py
@@ -33,10 +33,12 @@ distros = [
"fedora",
"mariner",
"openEuler",
+ "OpenCloudOS",
"openmandriva",
"photon",
"rhel",
"rocky",
+ "TencentOS",
"virtuozzo",
]
diff --git a/cloudinit/config/cc_zypper_add_repo.py b/cloudinit/config/cc_zypper_add_repo.py
index 64e50577..958e4f94 100644
--- a/cloudinit/config/cc_zypper_add_repo.py
+++ b/cloudinit/config/cc_zypper_add_repo.py
@@ -18,7 +18,16 @@ from cloudinit.config import Config
from cloudinit.config.schema import MetaSchema, get_meta_doc
from cloudinit.settings import PER_ALWAYS
-distros = ["opensuse", "sles"]
+distros = [
+ "opensuse",
+ "opensuse-microos",
+ "opensuse-tumbleweed",
+ "opensuse-leap",
+ "sle_hpc",
+ "sle-micro",
+ "sles",
+]
+
MODULE_DESCRIPTION = """\
Zypper behavior can be configured using the ``config`` key, which will modify
``/etc/zypp/zypp.conf``. The configuration writer will only append the
@@ -31,7 +40,8 @@ options will be resolved by the way the zypp.conf INI file is parsed.
The ``repos`` key may be used to add repositories to the system. Beyond the
required ``id`` and ``baseurl`` attributions, no validation is performed
on the ``repos`` entries. It is assumed the user is familiar with the
-zypper repository file format.
+zypper repository file format. This configuration is also applicable for
+systems with transactional-updates.
"""
meta: MetaSchema = {
"id": "cc_zypper_add_repo",
diff --git a/cloudinit/config/schema.py b/cloudinit/config/schema.py
index 42792985..b0b5fccf 100644
--- a/cloudinit/config/schema.py
+++ b/cloudinit/config/schema.py
@@ -1,6 +1,5 @@
# This file is part of cloud-init. See LICENSE file for license information.
"""schema.py: Set of module functions for processing cloud-config schema."""
-
import argparse
import json
import logging
@@ -29,7 +28,6 @@ except ImportError:
ValidationError = Exception # type: ignore
-error = partial(error, sys_exit=True)
LOG = logging.getLogger(__name__)
VERSIONED_USERDATA_SCHEMA_FILE = "versions.schema.cloud-config.json"
@@ -164,17 +162,47 @@ def is_schema_byte_string(checker, instance):
) or isinstance(instance, (bytes,))
-def _add_deprecation_msg(description: Optional[str] = None) -> str:
- if description:
- return f"{DEPRECATED_PREFIX}{description}"
- return DEPRECATED_PREFIX.replace(":", ".").strip()
+def _add_deprecated_changed_or_new_msg(
+ config: dict, annotate=False, filter_key=None
+) -> str:
+ """combine description with new/changed/deprecated message
+
+ deprecated/changed/new keys require a _version key (this is verified
+ in a unittest), a _description key is optional
+ """
+
+ def format_message(key: str):
+ if not config.get(f"{key}"):
+ return ""
+ key_description = config.get(f"{key}_description", "")
+ v = config.get(
+ f"{key}_version",
+ f"<missing {key}_version key, please file a bug report>",
+ )
+ msg = f"{key.capitalize()} in version {v}. {key_description}"
+ if annotate:
+ return f" {msg}"
+ # italicised RST - no whitespace between astrisk and text
+ return f"\n\n*{msg.strip()}*"
-def _validator_deprecated(
+ # define print order
+ filter_keys = (
+ filter_key if filter_key else ["deprecated", "changed", "new"]
+ )
+
+ # build a deprecation/new/changed string
+ changed_new_deprecated = "".join(map(format_message, filter_keys))
+ description = config.get("description", "")
+ return f"{description}{changed_new_deprecated}".rstrip()
+
+
+def _validator(
_validator,
deprecated: bool,
_instance,
schema: dict,
+ filter_key: str,
error_type: Type[Exception] = SchemaDeprecationError,
):
"""Jsonschema validator for `deprecated` items.
@@ -183,11 +211,16 @@ def _validator_deprecated(
otherwise the instance is consider faulty.
"""
if deprecated:
- description = schema.get("description")
- msg = _add_deprecation_msg(description)
+ msg = _add_deprecated_changed_or_new_msg(
+ schema, annotate=True, filter_key=[filter_key]
+ )
yield error_type(msg)
+_validator_deprecated = partial(_validator, filter_key="deprecated")
+_validator_changed = partial(_validator, filter_key="changed")
+
+
def _anyOf(
validator,
anyOf,
@@ -316,6 +349,7 @@ def get_jsonschema_validator():
# Add deprecation handling
validators = dict(Draft4Validator.VALIDATORS)
validators[DEPRECATED_KEY] = _validator_deprecated
+ validators["changed"] = _validator_changed
validators["oneOf"] = _oneOf
validators["anyOf"] = _anyOf
@@ -382,7 +416,7 @@ def validate_cloudconfig_metaschema(validator, schema: dict, throw=True):
def validate_cloudconfig_schema(
config: dict,
- schema: dict = None,
+ schema: Optional[dict] = None,
strict: bool = False,
strict_metaschema: bool = False,
log_details: bool = True,
@@ -424,10 +458,14 @@ def validate_cloudconfig_schema(
errors: SchemaProblems = []
deprecations: SchemaProblems = []
- for error in sorted(validator.iter_errors(config), key=lambda e: e.path):
- path = ".".join([str(p) for p in error.path])
- problem = (SchemaProblem(path, error.message),)
- if isinstance(error, SchemaDeprecationError): # pylint: disable=W1116
+ for schema_error in sorted(
+ validator.iter_errors(config), key=lambda e: e.path
+ ):
+ path = ".".join([str(p) for p in schema_error.path])
+ problem = (SchemaProblem(path, schema_error.message),)
+ if isinstance(
+ schema_error, SchemaDeprecationError
+ ): # pylint: disable=W1116
deprecations += problem
else:
errors += problem
@@ -610,23 +648,7 @@ def validate_cloudconfig_file(config_path, schema, annotate=False):
@raises SchemaValidationError containing any of schema_errors encountered.
@raises RuntimeError when config_path does not exist.
"""
- if config_path is None:
- # Use system's raw userdata path
- if os.getuid() != 0:
- raise RuntimeError(
- "Unable to read system userdata as non-root user."
- " Try using sudo"
- )
- init = Init(ds_deps=[])
- init.fetch(existing="trust")
- init.consume_data()
- content = load_file(init.paths.get_ipath("cloud_config"), decode=False)
- else:
- if not os.path.exists(config_path):
- raise RuntimeError(
- "Configfile {0} does not exist".format(config_path)
- )
- content = load_file(config_path, decode=False)
+ content = load_file(config_path, decode=False)
if not content.startswith(CLOUD_CONFIG_HEADER):
errors = [
SchemaProblem(
@@ -693,7 +715,7 @@ def validate_cloudconfig_file(config_path, schema, annotate=False):
schema_deprecations=e.schema_deprecations,
)
)
- else:
+ elif e.schema_deprecations:
message = _format_schema_problems(
e.schema_deprecations,
prefix="Cloud config schema deprecations: ",
@@ -857,32 +879,35 @@ def _get_property_description(prop_config: dict) -> str:
Order and deprecated property description after active descriptions.
Add a trailing stop "." to any description not ending with ":".
"""
- prop_descr = prop_config.get("description", "")
+
+ def assign_descriptions(
+ config: dict, descriptions: list, deprecated_descriptions: list
+ ):
+ if any(
+ map(
+ config.get,
+ ("deprecated_version", "changed_version", "new_version"),
+ )
+ ):
+ deprecated_descriptions.append(
+ _add_deprecated_changed_or_new_msg(config)
+ )
+ elif config.get("description"):
+ descriptions.append(_add_deprecated_changed_or_new_msg(config))
+
oneOf = prop_config.get("oneOf", {})
anyOf = prop_config.get("anyOf", {})
- descriptions = []
- deprecated_descriptions = []
- if prop_descr:
- prop_descr = prop_descr.rstrip(".")
- if not prop_config.get(DEPRECATED_KEY):
- descriptions.append(prop_descr)
- else:
- deprecated_descriptions.append(_add_deprecation_msg(prop_descr))
+ descriptions: list = []
+ deprecated_descriptions: list = []
+
+ assign_descriptions(prop_config, descriptions, deprecated_descriptions)
for sub_item in chain(oneOf, anyOf):
- if not sub_item.get("description"):
- continue
- if not sub_item.get(DEPRECATED_KEY):
- descriptions.append(sub_item["description"].rstrip("."))
- else:
- deprecated_descriptions.append(
- f"{DEPRECATED_PREFIX}{sub_item['description'].rstrip('.')}"
- )
+ assign_descriptions(sub_item, descriptions, deprecated_descriptions)
+
# order deprecated descrs last
description = ". ".join(chain(descriptions, deprecated_descriptions))
if description:
description = f" {description}"
- if description[-1] != ":":
- description += "."
return description
@@ -1063,7 +1088,8 @@ def load_doc(requested_modules: list) -> str:
"Invalid --docs value {}. Must be one of: {}".format(
list(invalid_docs),
", ".join(all_modules),
- )
+ ),
+ sys_exit=True,
)
for mod_name in all_modules:
if "all" in requested_modules or mod_name in requested_modules:
@@ -1164,28 +1190,88 @@ def handle_schema_args(name, args):
"""Handle provided schema args and perform the appropriate actions."""
exclusive_args = [args.config_file, args.docs, args.system]
if len([arg for arg in exclusive_args if arg]) != 1:
- error("Expected one of --config-file, --system or --docs arguments")
+ error(
+ "Expected one of --config-file, --system or --docs arguments",
+ sys_exit=True,
+ )
if args.annotate and args.docs:
- error("Invalid flag combination. Cannot use --annotate with --docs")
+ error(
+ "Invalid flag combination. Cannot use --annotate with --docs",
+ sys_exit=True,
+ )
full_schema = get_schema()
- if args.config_file or args.system:
- try:
- validate_cloudconfig_file(
- args.config_file, full_schema, args.annotate
+ if args.docs:
+ print(load_doc(args.docs))
+ return
+ if args.config_file:
+ config_files = (("user-data", args.config_file),)
+ else:
+ if os.getuid() != 0:
+ error(
+ "Unable to read system userdata or vendordata as non-root"
+ " user. Try using sudo.",
+ sys_exit=True,
)
+ init = Init(ds_deps=[])
+ init.fetch(existing="trust")
+ userdata_file = init.paths.get_ipath("cloud_config")
+ if not userdata_file:
+ error(
+ "Unable to obtain user data file. No instance data available",
+ sys_exit=True,
+ )
+ return # Helps typing
+ config_files = (("user-data", userdata_file),)
+ vendor_config_files = (
+ ("vendor-data", init.paths.get_ipath("vendor_cloud_config")),
+ ("vendor2-data", init.paths.get_ipath("vendor2_cloud_config")),
+ )
+ for cfg_type, vendor_file in vendor_config_files:
+ if vendor_file and os.path.exists(vendor_file):
+ config_files += ((cfg_type, vendor_file),)
+ if not os.path.exists(config_files[0][1]):
+ error(
+ f"Config file {config_files[0][1]} does not exist",
+ fmt="Error: {}",
+ sys_exit=True,
+ )
+
+ nested_output_prefix = ""
+ multi_config_output = bool(len(config_files) > 1)
+ if multi_config_output:
+ print(
+ "Found cloud-config data types: %s"
+ % ", ".join(cfg_type for cfg_type, _ in config_files)
+ )
+ nested_output_prefix = " "
+
+ error_types = []
+ for idx, (cfg_type, cfg_file) in enumerate(config_files, 1):
+ if multi_config_output:
+ print(f"\n{idx}. {cfg_type} at {cfg_file}:")
+ try:
+ validate_cloudconfig_file(cfg_file, full_schema, args.annotate)
except SchemaValidationError as e:
if not args.annotate:
- error(str(e))
+ print(f"{nested_output_prefix}Invalid cloud-config {cfg_file}")
+ error(
+ str(e),
+ fmt=nested_output_prefix + "Error: {}\n",
+ )
+ error_types.append(cfg_type)
except RuntimeError as e:
- error(str(e))
+ print(f"{nested_output_prefix}Invalid cloud-config {cfg_type}")
+ error(str(e), fmt=nested_output_prefix + "Error: {}\n")
+ error_types.append(cfg_type)
else:
- if args.config_file is None:
- cfg_name = "system userdata"
- else:
- cfg_name = args.config_file
- print("Valid cloud-config:", cfg_name)
- elif args.docs:
- print(load_doc(args.docs))
+ cfg = cfg_file if args.config_file else cfg_type
+ print(f"{nested_output_prefix}Valid cloud-config: {cfg}")
+ if error_types:
+ error(
+ ", ".join(error_type for error_type in error_types),
+ fmt="Error: Invalid cloud-config schema: {}\n",
+ sys_exit=True,
+ )
def main():
diff --git a/cloudinit/config/schemas/schema-cloud-config-v1.json b/cloudinit/config/schemas/schema-cloud-config-v1.json
index a91dc482..10636e6d 100644
--- a/cloudinit/config/schemas/schema-cloud-config-v1.json
+++ b/cloudinit/config/schemas/schema-cloud-config-v1.json
@@ -174,6 +174,7 @@
"label": "<group_name>",
"description": "When providing an object for users.groups the ``<group_name>`` keys are the groups to add this user to",
"deprecated": true,
+ "deprecated_version": "23.1",
"type": [
"null"
],
@@ -197,9 +198,11 @@
},
"lock-passwd": {
"default": true,
- "description": "Dropped after April 2027. Use ``lock_passwd``. Default: ``true``",
"type": "boolean",
- "deprecated": true
+ "description": "Default: ``true``",
+ "deprecated": true,
+ "deprecated_version": "22.3",
+ "deprecated_description": "Use ``lock_passwd`` instead."
},
"lock_passwd": {
"default": true,
@@ -292,8 +295,9 @@
},
{
"type": "boolean",
- "deprecated": true,
- "description": "The value ``false`` will be dropped after April 2027. Use ``null`` or no ``sudo`` key instead."
+ "changed": true,
+ "changed_version": "22.2",
+ "changed_description": "The value ``false`` is deprecated for this key, use ``null`` instead."
}
]
},
@@ -305,8 +309,9 @@
},
{
"type": "string",
- "description": "The use of ``string`` type will be dropped after April 2027. Use an ``integer`` instead.",
- "deprecated": true
+ "changed": true,
+ "changed_description": "The use of ``string`` type is deprecated. Use an ``integer`` instead.",
+ "changed_version": "22.3"
}
]
}
@@ -361,10 +366,11 @@
"additionalProperties": false,
"properties": {
"remove-defaults": {
- "description": "Dropped after April 2027. Use ``remove_defaults``.",
"type": "boolean",
"default": false,
- "deprecated": true
+ "deprecated": true,
+ "deprecated_version": "22.3",
+ "deprecated_description": "Use ``remove_defaults`` instead."
},
"remove_defaults": {
"description": "Remove default CA certificates if true. Default: false",
@@ -920,7 +926,8 @@
},
{
"deprecated": true,
- "description": "Dropped after April 2027. Use ``ca_certs``."
+ "deprecated_version": "22.3",
+ "deprecated_description": "Use ``ca_certs`` instead."
}
]
}
@@ -1144,7 +1151,14 @@
{
"type": "array",
"items": {
- "type": "integer"
+ "anyOf": [
+ {
+ "type": "integer"
+ },
+ {
+ "type": "string"
+ }
+ ]
},
"minItems": 2,
"maxItems": 2
@@ -1180,7 +1194,7 @@
},
"device": {
"type": "string",
- "description": "Specified either as a path or as an alias in the format ``<alias name>.<y>`` where ``<y>`` denotes the partition number on the device. If specifying device using the ``<device name>.<partition number>`` format, the value of ``partition`` will be overwritten."
+ "description": "Specified either as a path or as an alias in the format ``<alias name>.<y>`` where ``<y>`` denotes the partition number on the device. If specifying device using the ``<alias name>.<partition number>`` format, the value of ``partition`` will be overwritten."
},
"partition": {
"type": [
@@ -1197,7 +1211,7 @@
]
}
],
- "description": "The partition can be specified by setting ``partition`` to the desired partition number. The ``partition`` option may also be set to ``auto``, in which this module will search for the existence of a filesystem matching the ``label``, ``type`` and ``device`` of the ``fs_setup`` entry and will skip creating the filesystem if one is found. The ``partition`` option may also be set to ``any``, in which case any file system that matches ``type`` and ``device`` will cause this module to skip filesystem creation for the ``fs_setup`` entry, regardless of ``label`` matching or not. To write a filesystem directly to a device, use ``partition: none``. ``partition: none`` will **always** write the filesystem, even when the ``label`` and ``filesystem`` are matched, and ``overwrite`` is ``false``."
+ "description": "The partition can be specified by setting ``partition`` to the desired partition number. The ``partition`` option may also be set to ``auto``, in which this module will search for the existence of a filesystem matching the ``label``, ``filesystem`` and ``device`` of the ``fs_setup`` entry and will skip creating the filesystem if one is found. The ``partition`` option may also be set to ``any``, in which case any filesystem that matches ``filesystem`` and ``device`` will cause this module to skip filesystem creation for the ``fs_setup`` entry, regardless of ``label`` matching or not. To write a filesystem directly to a device, use ``partition: none``. ``partition: none`` will **always** write the filesystem, even when the ``label`` and ``filesystem`` are matched, and ``overwrite`` is ``false``."
},
"overwrite": {
"type": "boolean",
@@ -1287,8 +1301,9 @@
"enum": [
false
],
- "description": "Specifying a boolean ``false`` value for this key is deprecated. Use ``off`` instead.",
- "deprecated": true
+ "changed": true,
+ "changed_version": "22.3",
+ "changed_description": "Specifying a boolean ``false`` value for ``mode`` is deprecated. Use ``off`` instead."
}
]
},
@@ -1335,8 +1350,9 @@
},
{
"type": "string",
- "description": "Use a boolean value instead.",
- "deprecated": true
+ "changed": true,
+ "changed_version": "22.3",
+ "changed_description": "Use a boolean value instead."
}
]
}
@@ -1344,8 +1360,9 @@
},
"grub-dpkg": {
"type": "object",
- "description": "Use ``grub_dpkg`` instead",
- "deprecated": true
+ "deprecated": true,
+ "deprecated_version": "22.2",
+ "deprecated_description": "Use ``grub_dpkg`` instead."
}
}
},
@@ -1764,7 +1781,7 @@
"description": "Path to the swap file to create"
},
"size": {
- "description": "The size in bytes of the swap file, 'auto' or a human-readable size abbreviation of the format <float_size><units> where units are one of B, K, M, G or T.",
+ "description": "The size in bytes of the swap file, 'auto' or a human-readable size abbreviation of the format <float_size><units> where units are one of B, K, M, G or T. **WARNING: Attempts to use IEC prefixes in your configuration prior to cloud-init version 23.1 will result in unexpected behavior. SI prefixes names (KB, MB) are required on pre-23.1 cloud-init, however IEC values are used. In summary, assume 1KB == 1024B, not 1000B**",
"oneOf": [
{
"enum": [
@@ -1911,20 +1928,26 @@
"apt_update": {
"type": "boolean",
"default": false,
- "description": "Dropped after April 2027. Use ``package_update``. Default: ``false``",
- "deprecated": true
+ "description": "Default: ``false``.",
+ "deprecated": true,
+ "deprecated_version": "22.2",
+ "deprecated_description": "Use ``package_update`` instead."
},
"apt_upgrade": {
"type": "boolean",
"default": false,
- "description": "Dropped after April 2027. Use ``package_upgrade``. Default: ``false``",
- "deprecated": true
+ "description": "Default: ``false``.",
+ "deprecated": true,
+ "deprecated_version": "22.2",
+ "deprecated_description": "Use ``package_upgrade`` instead."
},
"apt_reboot_if_required": {
"type": "boolean",
"default": false,
- "description": "Dropped after April 2027. Use ``package_reboot_if_required``. Default: ``false``",
- "deprecated": true
+ "description": "Default: ``false``.",
+ "deprecated": true,
+ "deprecated_version": "22.2",
+ "deprecated_description": "Use ``package_reboot_if_required`` instead."
}
}
},
@@ -1998,8 +2021,9 @@
{
"type": "string",
"pattern": "^\\+?[0-9]+$",
- "deprecated": true,
- "description": "Use of string for this value will be dropped after April 2027. Use ``now`` or integer type."
+ "changed": true,
+ "changed_version": "22.3",
+ "changed_description": "Use of type string for this value is deprecated. Use ``now`` or integer type."
},
{
"enum": [
@@ -2417,8 +2441,9 @@
},
{
"type": "string",
- "description": "Use of string for this value is DEPRECATED. Use a boolean value instead.",
- "deprecated": true
+ "deprecated": true,
+ "deprecated_version": "22.3",
+ "deprecated_description": "Use of type string for this value is deprecated. Use a boolean instead."
}
]
},
@@ -2515,11 +2540,12 @@
},
{
"type": "string",
- "description": "Use of non-boolean values for this field is DEPRECATED and will result in an error in a future version of cloud-init.",
- "deprecated": true
+ "changed": true,
+ "changed_version": "22.3",
+ "changed_description": "Use of non-boolean values for this field is deprecated."
}
],
- "description": "Sets whether or not to accept password authentication. ``true`` will enable password auth. ``false`` will disable. Default is to leave the value unchanged."
+ "description": "Sets whether or not to accept password authentication. ``true`` will enable password auth. ``false`` will disable. Default is to leave the value unchanged. In order for this config to be applied, SSH may need to be restarted. On systemd systems, this restart will only happen if the SSH service has already been started. On non-systemd systems, a restart will be attempted regardless of the service state."
},
"chpasswd": {
"type": "object",
@@ -2531,7 +2557,7 @@
"description": "Whether to expire all user passwords such that a password will need to be reset on the user's next login. Default: ``true``"
},
"users": {
- "description": "Replaces the deprecated ``list`` key. This key represents a list of existing users to set passwords for. Each item under users contains the following required keys: ``name`` and ``password`` or in the case of a randomly generated password, ``name`` and ``type``. The ``type`` key has a default value of ``hash``, and may alternatively be set to ``text`` or ``RANDOM``.",
+ "description": "This key represents a list of existing users to set passwords for. Each item under users contains the following required keys: ``name`` and ``password`` or in the case of a randomly generated password, ``name`` and ``type``. The ``type`` key has a default value of ``hash``, and may alternatively be set to ``text`` or ``RANDOM``.",
"type": "array",
"items": {
"minItems": 1,
@@ -2595,8 +2621,10 @@
}
],
"minItems": 1,
- "description": "List of ``username:password`` pairs. Each user will have the corresponding password set. A password can be randomly generated by specifying ``RANDOM`` or ``R`` as a user's password. A hashed password, created by a tool like ``mkpasswd``, can be specified. A regex (``r'\\$(1|2a|2y|5|6)(\\$.+){2}'``) is used to determine if a password value should be treated as a hash.\n\nUse of a multiline string for this field is DEPRECATED and will result in an error in a future version of cloud-init.",
- "deprecated": true
+ "description": "List of ``username:password`` pairs. Each user will have the corresponding password set. A password can be randomly generated by specifying ``RANDOM`` or ``R`` as a user's password. A hashed password, created by a tool like ``mkpasswd``, can be specified. A regex (``r'\\$(1|2a|2y|5|6)(\\$.+){2}'``) is used to determine if a password value should be treated as a hash.",
+ "deprecated": true,
+ "deprecated_version": "22.2",
+ "deprecated_description": "Use ``users`` instead."
}
}
},
@@ -2959,8 +2987,9 @@
"enum": [
"template"
],
- "description": "Value ``template`` will be dropped after April 2027. Use ``true`` instead.",
- "deprecated": true
+ "changed_description": "Use of ``template`` is deprecated, use ``true`` instead.",
+ "changed": true,
+ "changed_version": "22.3"
}
]
},
@@ -3122,7 +3151,7 @@
"owner": {
"type": "string",
"default": "root:root",
- "description": "Optional owner:group to chown on the file. Default: ``root:root``"
+ "description": "Optional owner:group to chown on the file and new directories. Default: ``root:root``"
},
"permissions": {
"type": "string",
diff --git a/cloudinit/distros/OpenCloudOS.py b/cloudinit/distros/OpenCloudOS.py
new file mode 100644
index 00000000..53c7a333
--- /dev/null
+++ b/cloudinit/distros/OpenCloudOS.py
@@ -0,0 +1,9 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+from cloudinit.distros import rhel
+
+
+class Distro(rhel.Distro):
+ def __init__(self, name, cfg, paths):
+ super(Distro, self).__init__(name, cfg, paths)
+ self.osfamily = "OpenCloudOS"
diff --git a/cloudinit/distros/TencentOS.py b/cloudinit/distros/TencentOS.py
new file mode 100644
index 00000000..53c7a333
--- /dev/null
+++ b/cloudinit/distros/TencentOS.py
@@ -0,0 +1,9 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+from cloudinit.distros import rhel
+
+
+class Distro(rhel.Distro):
+ def __init__(self, name, cfg, paths):
+ super(Distro, self).__init__(name, cfg, paths)
+ self.osfamily = "OpenCloudOS"
diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py
index 735a7832..940b689e 100644
--- a/cloudinit/distros/__init__.py
+++ b/cloudinit/distros/__init__.py
@@ -57,14 +57,23 @@ OSFAMILIES = {
"fedora",
"mariner",
"miraclelinux",
- "openEuler",
"openmandriva",
"photon",
"rhel",
"rocky",
"virtuozzo",
],
- "suse": ["opensuse", "sles"],
+ "suse": [
+ "opensuse",
+ "opensuse-leap",
+ "opensuse-microos",
+ "opensuse-tumbleweed",
+ "sle_hpc",
+ "sle-micro",
+ "sles",
+ ],
+ "openEuler": ["openEuler"],
+ "OpenCloudOS": ["OpenCloudOS", "TencentOS"],
}
LOG = logging.getLogger(__name__)
@@ -983,6 +992,37 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta):
**kwargs,
)
+ @property
+ def is_virtual(self) -> Optional[bool]:
+ """Detect if running on a virtual machine or bare metal.
+
+ If the detection fails, it returns None.
+ """
+ if not uses_systemd():
+ # For non systemd systems the method should be
+ # implemented in the distro class.
+ LOG.warning("is_virtual should be implemented on distro class")
+ return None
+
+ try:
+ detect_virt_path = subp.which("systemd-detect-virt")
+ if detect_virt_path:
+ out, _ = subp.subp(
+ [detect_virt_path], capture=True, rcs=[0, 1]
+ )
+
+ return not out.strip() == "none"
+ else:
+ err_msg = "detection binary not found"
+ except subp.ProcessExecutionError as e:
+ err_msg = str(e)
+
+ LOG.warning(
+ "Failed to detect virtualization with systemd-detect-virt: %s",
+ err_msg,
+ )
+ return None
+
def _apply_hostname_transformations_to_url(url: str, transformations: list):
"""
diff --git a/cloudinit/distros/freebsd.py b/cloudinit/distros/freebsd.py
index b9fd37b8..706d0743 100644
--- a/cloudinit/distros/freebsd.py
+++ b/cloudinit/distros/freebsd.py
@@ -6,7 +6,9 @@
import os
import re
+from functools import lru_cache
from io import StringIO
+from typing import Optional
import cloudinit.distros.bsd
from cloudinit import log as logging
@@ -76,6 +78,7 @@ class Distro(cloudinit.distros.bsd.BSD):
"groups": "-G",
"shell": "-s",
"inactive": "-E",
+ "uid": "-u",
}
pw_useradd_flags = {
"no_user_group": "--no-user-group",
@@ -84,8 +87,8 @@ class Distro(cloudinit.distros.bsd.BSD):
}
for key, val in kwargs.items():
- if key in pw_useradd_opts and val and isinstance(val, str):
- pw_useradd_cmd.extend([pw_useradd_opts[key], val])
+ if key in pw_useradd_opts and val and isinstance(val, (str, int)):
+ pw_useradd_cmd.extend([pw_useradd_opts[key], str(val)])
elif key in pw_useradd_flags and val:
pw_useradd_cmd.append(pw_useradd_flags[key])
@@ -191,5 +194,40 @@ class Distro(cloudinit.distros.bsd.BSD):
freq=PER_INSTANCE,
)
+ @lru_cache()
+ def is_container(self) -> bool:
+ """return whether we're running in a container.
+ Cached, because it's unlikely to change."""
+ jailed, _ = subp.subp(["sysctl", "-n", "security.jail.jailed"])
+ if jailed.strip() == "0":
+ return False
+ return True
+
+ @lru_cache()
+ def virtual(self) -> str:
+ """return the kind of virtualisation system we're running under.
+ Cached, because it's unlikely to change."""
+ if self.is_container():
+ return "jail"
+ # map FreeBSD's kern.vm_guest to systemd-detect-virt, just like we do
+ # in ds-identify
+ VM_GUEST_TO_SYSTEMD = {
+ "hv": "microsoft",
+ "vbox": "oracle",
+ "generic": "vm-other",
+ }
+ vm, _ = subp.subp(["sysctl", "-n", "kern.vm_guest"])
+ vm = vm.strip()
+ if vm in VM_GUEST_TO_SYSTEMD:
+ return VM_GUEST_TO_SYSTEMD[vm]
+ return vm
+
+ @property
+ def is_virtual(self) -> Optional[bool]:
+ """Detect if running on a virtual machine or bare metal.
-# vi: ts=4 expandtab
+ This can fail on some platforms, so the signature is Optional[bool]
+ """
+ if self.virtual() == "none":
+ return False
+ return True
diff --git a/cloudinit/distros/networking.py b/cloudinit/distros/networking.py
index 7edfe965..28ee1b43 100644
--- a/cloudinit/distros/networking.py
+++ b/cloudinit/distros/networking.py
@@ -190,6 +190,7 @@ class BSDNetworking(Networking):
self.ifc = ifconfig.Ifconfig()
self.ifs = {}
self._update_ifs()
+ super().__init__()
def _update_ifs(self):
ifconf = subp.subp(["ifconfig", "-a"])
diff --git a/cloudinit/distros/openEuler.py b/cloudinit/distros/openEuler.py
index 3dc0a342..92f1985d 100644
--- a/cloudinit/distros/openEuler.py
+++ b/cloudinit/distros/openEuler.py
@@ -4,7 +4,9 @@ from cloudinit.distros import rhel
class Distro(rhel.Distro):
- pass
+ def __init__(self, name, cfg, paths):
+ super(Distro, self).__init__(name, cfg, paths)
+ self.osfamily = "openEuler"
# vi: ts=4 expandtab
diff --git a/cloudinit/distros/opensuse-leap.py b/cloudinit/distros/opensuse-leap.py
new file mode 100644
index 00000000..097e9c14
--- /dev/null
+++ b/cloudinit/distros/opensuse-leap.py
@@ -0,0 +1,14 @@
+# Copyright (C) 2023 SUSE LLC
+#
+# Author: Robert Schweikert <rjschwei@suse.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+from cloudinit.distros import opensuse
+
+
+class Distro(opensuse.Distro):
+ pass
+
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/distros/opensuse-microos.py b/cloudinit/distros/opensuse-microos.py
new file mode 100644
index 00000000..097e9c14
--- /dev/null
+++ b/cloudinit/distros/opensuse-microos.py
@@ -0,0 +1,14 @@
+# Copyright (C) 2023 SUSE LLC
+#
+# Author: Robert Schweikert <rjschwei@suse.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+from cloudinit.distros import opensuse
+
+
+class Distro(opensuse.Distro):
+ pass
+
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/distros/opensuse-tumbleweed.py b/cloudinit/distros/opensuse-tumbleweed.py
new file mode 100644
index 00000000..097e9c14
--- /dev/null
+++ b/cloudinit/distros/opensuse-tumbleweed.py
@@ -0,0 +1,14 @@
+# Copyright (C) 2023 SUSE LLC
+#
+# Author: Robert Schweikert <rjschwei@suse.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+from cloudinit.distros import opensuse
+
+
+class Distro(opensuse.Distro):
+ pass
+
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/distros/opensuse.py b/cloudinit/distros/opensuse.py
index 00ed1514..38307c91 100644
--- a/cloudinit/distros/opensuse.py
+++ b/cloudinit/distros/opensuse.py
@@ -8,11 +8,17 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit import distros, helpers, subp, util
+import os
+
+from cloudinit import distros, helpers
+from cloudinit import log as logging
+from cloudinit import subp, util
from cloudinit.distros import rhel_util as rhutil
from cloudinit.distros.parsers.hostname import HostnameConf
from cloudinit.settings import PER_INSTANCE
+LOG = logging.getLogger(__name__)
+
class Distro(distros.Distro):
clock_conf_fn = "/etc/sysconfig/clock"
@@ -44,6 +50,8 @@ class Distro(distros.Distro):
distros.Distro.__init__(self, name, cfg, paths)
self._runner = helpers.Runners(paths)
self.osfamily = "suse"
+ self.update_method = None
+ self.read_only_root = False
cfg["ssh_svcname"] = "sshd"
if self.uses_systemd():
self.init_cmd = ["systemctl"]
@@ -69,12 +77,44 @@ class Distro(distros.Distro):
if pkgs is None:
pkgs = []
+ self._set_update_method()
+ if self.read_only_root and not self.update_method == "transactional":
+ LOG.error(
+ "Package operation requested but read only root "
+ "without btrfs and transactional-updata"
+ )
+ return
+
# No user interaction possible, enable non-interactive mode
- cmd = ["zypper", "--non-interactive"]
+ if self.update_method == "zypper":
+ cmd = ["zypper", "--non-interactive"]
+ else:
+ cmd = [
+ "transactional-update",
+ "--non-interactive",
+ "--drop-if-no-change",
+ "pkg",
+ ]
# Command is the operation, such as install
if command == "upgrade":
command = "update"
+ if (
+ not pkgs
+ and self.update_method == "transactional"
+ and command == "update"
+ ):
+ command = "up"
+ cmd = [
+ "transactional-update",
+ "--non-interactive",
+ "--drop-if-no-change",
+ ]
+ # Repo refresh only modifies data in the read-write path,
+ # always uses zypper
+ if command == "refresh":
+ # Repo refresh is a zypper only option, ignore the t-u setting
+ cmd = ["zypper", "--non-interactive"]
cmd.append(command)
# args are the arguments to the command, not global options
@@ -89,6 +129,11 @@ class Distro(distros.Distro):
# Allow the output of this to flow outwards (ie not be captured)
subp.subp(cmd, capture=False)
+ if self.update_method == "transactional":
+ LOG.info(
+ "To use/activate the installed packages reboot the system"
+ )
+
def set_timezone(self, tz):
tz_file = self._find_tz_file(tz)
if self.uses_systemd():
@@ -147,6 +192,34 @@ class Distro(distros.Distro):
host_fn = self.hostname_conf_fn
return (host_fn, self._read_hostname(host_fn))
+ def _set_update_method(self):
+ """Decide if we want to use transactional-update or zypper"""
+ if self.update_method is None:
+ result = util.get_mount_info("/")
+ fs_type = ""
+ if result:
+ (devpth, fs_type, mount_point) = result
+ # Check if the file system is read only
+ mounts = util.load_file("/proc/mounts").split("\n")
+ for mount in mounts:
+ if mount.startswith(devpth):
+ mount_info = mount.split()
+ if mount_info[1] != mount_point:
+ continue
+ self.read_only_root = mount_info[3].startswith("ro")
+ break
+ if fs_type.lower() == "btrfs" and os.path.exists(
+ "/usr/sbin/transactional-update"
+ ):
+ self.update_method = "transactional"
+ else:
+ self.update_method = "zypper"
+ else:
+ LOG.info(
+ "Could not determine filesystem type of '/' using zypper"
+ )
+ self.update_method = "zypper"
+
def _write_hostname(self, hostname, filename):
if self.uses_systemd() and filename.endswith("/previous-hostname"):
util.write_file(filename, hostname)
diff --git a/cloudinit/distros/parsers/ifconfig.py b/cloudinit/distros/parsers/ifconfig.py
index 35f728e0..3e57e41a 100644
--- a/cloudinit/distros/parsers/ifconfig.py
+++ b/cloudinit/distros/parsers/ifconfig.py
@@ -6,9 +6,10 @@
import copy
import re
+from collections import defaultdict
from functools import lru_cache
from ipaddress import IPv4Address, IPv4Interface, IPv6Interface
-from typing import Dict, Optional, Tuple
+from typing import Dict, List, Optional, Tuple, Union
from cloudinit import log as logging
@@ -87,10 +88,11 @@ class Ifconfig:
"""
def __init__(self):
- self._ifs = {}
+ self._ifs_by_name = {}
+ self._ifs_by_mac = {}
@lru_cache()
- def parse(self, text: str) -> Dict[str, Ifstate]:
+ def parse(self, text: str) -> Dict[str, Union[Ifstate, List[Ifstate]]]:
"""
Parse the ``ifconfig -a`` output ``text``, into a dict of ``Ifstate``
objects, referenced by ``name`` *and* by ``mac`` address.
@@ -104,6 +106,7 @@ class Ifconfig:
@returns: A dict of ``Ifstate``s, referenced by ``name`` and ``mac``
"""
ifindex = 0
+ ifs_by_mac = defaultdict(list)
for line in text.splitlines():
if len(line) == 0:
continue
@@ -119,7 +122,7 @@ class Ifconfig:
curif = curif[:-1]
dev = Ifstate(curif)
dev.index = ifindex
- self._ifs[curif] = dev
+ self._ifs_by_name[curif] = dev
toks = line.lower().strip().split()
@@ -157,10 +160,10 @@ class Ifconfig:
if toks[0] == "ether":
dev.mac = toks[1]
dev.macs.append(toks[1])
- self._ifs[toks[1]] = dev
+ ifs_by_mac[toks[1]].append(dev)
if toks[0] == "hwaddr":
dev.macs.append(toks[1])
- self._ifs[toks[1]] = dev
+ ifs_by_mac[toks[1]].append(dev)
if toks[0] == "groups:":
dev.groups += toks[1:]
@@ -195,17 +198,14 @@ class Ifconfig:
if toks[i] == "interface:":
dev.vlan["link"] = toks[i + 1]
- return self._ifs
+ self._ifs_by_mac = dict(ifs_by_mac)
+ return {**self._ifs_by_name, **self._ifs_by_mac}
def ifs_by_name(self):
- return {
- k: v for (k, v) in self._ifs.items() if not re.fullmatch(MAC_RE, k)
- }
+ return self._ifs_by_name
def ifs_by_mac(self):
- return {
- k: v for (k, v) in self._ifs.items() if re.fullmatch(MAC_RE, k)
- }
+ return self._ifs_by_mac
def _parse_inet(self, toks: list) -> Tuple[str, dict]:
broadcast = None
diff --git a/cloudinit/distros/rhel.py b/cloudinit/distros/rhel.py
index 320f4ba1..df7dc3d6 100644
--- a/cloudinit/distros/rhel.py
+++ b/cloudinit/distros/rhel.py
@@ -132,6 +132,7 @@ class Distro(distros.Distro):
return util.load_file(filename).strip()
elif self.uses_systemd():
(out, _err) = subp.subp(["hostname"])
+ out = out.strip()
if len(out):
return out
else:
diff --git a/cloudinit/distros/sle-micro.py b/cloudinit/distros/sle-micro.py
new file mode 100644
index 00000000..097e9c14
--- /dev/null
+++ b/cloudinit/distros/sle-micro.py
@@ -0,0 +1,14 @@
+# Copyright (C) 2023 SUSE LLC
+#
+# Author: Robert Schweikert <rjschwei@suse.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+from cloudinit.distros import opensuse
+
+
+class Distro(opensuse.Distro):
+ pass
+
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/distros/sle_hpc.py b/cloudinit/distros/sle_hpc.py
new file mode 100644
index 00000000..097e9c14
--- /dev/null
+++ b/cloudinit/distros/sle_hpc.py
@@ -0,0 +1,14 @@
+# Copyright (C) 2023 SUSE LLC
+#
+# Author: Robert Schweikert <rjschwei@suse.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+from cloudinit.distros import opensuse
+
+
+class Distro(opensuse.Distro):
+ pass
+
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/dmi.py b/cloudinit/dmi.py
index dff9ab0f..edc64e5c 100644
--- a/cloudinit/dmi.py
+++ b/cloudinit/dmi.py
@@ -1,5 +1,6 @@
# This file is part of cloud-init. See LICENSE file for license information.
import os
+import re
from collections import namedtuple
from typing import Optional
@@ -182,4 +183,31 @@ def read_dmi_data(key: str) -> Optional[str]:
return None
+def sub_dmi_vars(src: str) -> str:
+ """Replace __dmi.VARNAME__ with DMI values from either sysfs or kenv."""
+ if "__" not in src:
+ return src
+ valid_dmi_keys = DMIDECODE_TO_KERNEL.keys()
+ for match in re.findall(r"__dmi\.([^_]+)__", src):
+ if match not in valid_dmi_keys:
+ LOG.warning(
+ "Ignoring invalid __dmi.%s__ in %s. Expected one of: %s.",
+ match,
+ src,
+ valid_dmi_keys,
+ )
+ continue
+ dmi_value = read_dmi_data(match)
+ if not dmi_value:
+ dmi_value = ""
+ LOG.debug(
+ "Replacing __dmi.%s__ in '%s' with '%s'.",
+ match,
+ src,
+ dmi_value,
+ )
+ src = src.replace(f"__dmi.{match}__", dmi_value)
+ return src
+
+
# vi: ts=4 expandtab
diff --git a/cloudinit/features.py b/cloudinit/features.py
index ac586f6b..471a4331 100644
--- a/cloudinit/features.py
+++ b/cloudinit/features.py
@@ -59,6 +59,27 @@ only non-hashed passwords were expired.
(This flag can be removed after Jammy is no longer supported.)
"""
+NETPLAN_CONFIG_ROOT_READ_ONLY = True
+"""
+If ``NETPLAN_CONFIG_ROOT_READ_ONLY`` is True, then netplan configuration will
+be written as a single root readon-only file /etc/netplan/50-cloud-init.yaml.
+This prevents wifi passwords in network v2 configuration from being
+world-readable. Prior to 23.1, netplan configuration is world-readable.
+
+(This flag can be removed after Jammy is no longer supported.)
+"""
+
+
+NOCLOUD_SEED_URL_APPEND_FORWARD_SLASH = True
+"""
+Append a forward slash '/' if NoCloud seedurl does not end with either
+a querystring or forward slash. Prior to 23.1, nocloud seedurl would be used
+unaltered, appending meta-data, user-data and vendor-data to without URL path
+separators.
+
+(This flag can be removed when Jammy is no longer supported.)
+"""
+
try:
# pylint: disable=wildcard-import
from cloudinit.feature_overrides import * # noqa
diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py
index 4bc48676..50e445ec 100644
--- a/cloudinit/net/__init__.py
+++ b/cloudinit/net/__init__.py
@@ -1000,15 +1000,61 @@ def get_interfaces_by_mac_on_linux(blacklist_drivers=None) -> dict:
Bridges and any devices that have a 'stolen' mac are excluded."""
ret: dict = {}
- for name, mac, _driver, _devid in get_interfaces(
+ driver_map: dict = {}
+ for name, mac, driver, _devid in get_interfaces(
blacklist_drivers=blacklist_drivers
):
if mac in ret:
- raise RuntimeError(
- "duplicate mac found! both '%s' and '%s' have mac '%s'"
- % (name, ret[mac], mac)
+ raise_duplicate_mac_error = True
+ msg = "duplicate mac found! both '%s' and '%s' have mac '%s'." % (
+ name,
+ ret[mac],
+ mac,
)
+ # Hyper-V netvsc driver will register a VF with the same mac
+ #
+ # The VF will be enslaved to the master nic shortly after
+ # registration. If cloud-init starts enumerating the interfaces
+ # before the completion of the enslaving process, it will see
+ # two different nics with duplicate mac. Cloud-init should ignore
+ # the slave nic (which does not have hv_netvsc driver).
+ if driver != driver_map[mac]:
+ if driver_map[mac] == "hv_netvsc":
+ LOG.warning(
+ msg + " Ignoring '%s' due to driver '%s' and "
+ "'%s' having driver hv_netvsc."
+ % (name, driver, ret[mac])
+ )
+ continue
+ if driver == "hv_netvsc":
+ raise_duplicate_mac_error = False
+ LOG.warning(
+ msg + " Ignoring '%s' due to driver '%s' and "
+ "'%s' having driver hv_netvsc."
+ % (ret[mac], driver_map[mac], name)
+ )
+
+ # This is intended to be a short-term fix of LP: #1997922
+ # Long term, we should better handle configuration of virtual
+ # devices where duplicate MACs are expected early in boot if
+ # cloud-init happens to enumerate network interfaces before drivers
+ # have fully initialized the leader/subordinate relationships for
+ # those devices or switches.
+ if driver == "mscc_felix" or driver == "fsl_enetc":
+ LOG.debug(
+ "Ignoring duplicate macs from '%s' and '%s' due to "
+ "driver '%s'.",
+ name,
+ ret[mac],
+ driver,
+ )
+ continue
+
+ if raise_duplicate_mac_error:
+ raise RuntimeError(msg)
+
ret[mac] = name
+ driver_map[mac] = driver
# Pretend that an Infiniband GUID is an ethernet address for Openstack
# configuration purposes
diff --git a/cloudinit/net/activators.py b/cloudinit/net/activators.py
index b6af3770..7d11a02c 100644
--- a/cloudinit/net/activators.py
+++ b/cloudinit/net/activators.py
@@ -97,7 +97,7 @@ class IfUpDownActivator(NetworkActivator):
# E.g., NetworkManager has a ifupdown plugin that requires the name
# of a specific connection.
@staticmethod
- def available(target: str = None) -> bool:
+ def available(target: Optional[str] = None) -> bool:
"""Return true if ifupdown can be used on this system."""
return eni_available(target=target)
diff --git a/cloudinit/net/bsd.py b/cloudinit/net/bsd.py
index e8778d27..b23279e5 100644
--- a/cloudinit/net/bsd.py
+++ b/cloudinit/net/bsd.py
@@ -30,6 +30,7 @@ class BSDRenderer(renderer.Renderer):
config = {}
self.target = None
self.interface_configurations = {}
+ self.interface_configurations_ipv6 = {}
self._postcmds = config.get("postcmds", True)
def _ifconfig_entries(self, settings):
@@ -62,8 +63,6 @@ class BSDRenderer(renderer.Renderer):
LOG.info("Configuring interface %s", device_name)
- self.interface_configurations[device_name] = "DHCP"
-
for subnet in interface.get("subnets", []):
if subnet.get("type") == "static":
if not subnet.get("netmask"):
@@ -85,29 +84,70 @@ class BSDRenderer(renderer.Renderer):
"mtu": subnet.get("mtu") or interface.get("mtu"),
}
+ elif subnet.get("type") == "static6":
+ if not subnet.get("prefix"):
+ LOG.debug(
+ "Skipping IP %s, because there is no prefix",
+ subnet.get("address"),
+ )
+ continue
+ LOG.debug(
+ "Configuring dev %s with %s / %s",
+ device_name,
+ subnet.get("address"),
+ subnet.get("prefix"),
+ )
+
+ self.interface_configurations_ipv6[device_name] = {
+ "address": subnet.get("address"),
+ "prefix": subnet.get("prefix"),
+ "mtu": subnet.get("mtu") or interface.get("mtu"),
+ }
+ elif (
+ subnet.get("type") == "dhcp"
+ or subnet.get("type") == "dhcp4"
+ ):
+ self.interface_configurations[device_name] = "DHCP"
+
def _route_entries(self, settings):
routes = list(settings.iter_routes())
for interface in settings.iter_interfaces():
subnets = interface.get("subnets", [])
for subnet in subnets:
- if subnet.get("type") != "static":
+ if subnet.get("type") == "static":
+ gateway = subnet.get("gateway")
+ if gateway and len(gateway.split(".")) == 4:
+ routes.append(
+ {
+ "network": "0.0.0.0",
+ "netmask": "0.0.0.0",
+ "gateway": gateway,
+ }
+ )
+ elif subnet.get("type") == "static6":
+ gateway = subnet.get("gateway")
+ if gateway and len(gateway.split(":")) > 1:
+ routes.append(
+ {
+ "network": "::",
+ "prefix": "0",
+ "gateway": gateway,
+ }
+ )
+ else:
continue
- gateway = subnet.get("gateway")
- if gateway and len(gateway.split(".")) == 4:
- routes.append(
- {
- "network": "0.0.0.0",
- "netmask": "0.0.0.0",
- "gateway": gateway,
- }
- )
routes += subnet.get("routes", [])
+
for route in routes:
network = route.get("network")
if not network:
LOG.debug("Skipping a bad route entry")
continue
- netmask = route.get("netmask")
+ netmask = (
+ route.get("netmask")
+ if route.get("netmask")
+ else route.get("prefix")
+ )
gateway = route.get("gateway")
self.set_route(network, netmask, gateway)
diff --git a/cloudinit/net/ephemeral.py b/cloudinit/net/ephemeral.py
index 5ce41694..1dfde6e0 100644
--- a/cloudinit/net/ephemeral.py
+++ b/cloudinit/net/ephemeral.py
@@ -4,7 +4,7 @@
"""
import contextlib
import logging
-from typing import Any, Dict, List
+from typing import Any, Dict, List, Optional
import cloudinit.net as net
from cloudinit import subp
@@ -35,7 +35,7 @@ class EphemeralIPv4Network:
prefix_or_mask,
broadcast,
router=None,
- connectivity_url_data: Dict[str, Any] = None,
+ connectivity_url_data: Optional[Dict[str, Any]] = None,
static_routes=None,
):
"""Setup context manager and validate call signature.
@@ -313,7 +313,7 @@ class EphemeralDHCPv4:
def __init__(
self,
iface=None,
- connectivity_url_data: Dict[str, Any] = None,
+ connectivity_url_data: Optional[Dict[str, Any]] = None,
dhcp_log_func=None,
tmp_dir=None,
):
diff --git a/cloudinit/net/freebsd.py b/cloudinit/net/freebsd.py
index ec42b60c..415f4a5a 100644
--- a/cloudinit/net/freebsd.py
+++ b/cloudinit/net/freebsd.py
@@ -17,14 +17,31 @@ class Renderer(cloudinit.net.bsd.BSDRenderer):
def write_config(self):
for device_name, v in self.interface_configurations.items():
- net_config = "DHCP"
if isinstance(v, dict):
- net_config = v.get("address") + " netmask " + v.get("netmask")
+ net_config = "inet %s netmask %s" % (
+ v.get("address"),
+ v.get("netmask"),
+ )
mtu = v.get("mtu")
if mtu:
net_config += " mtu %d" % mtu
+ elif v == "DHCP":
+ net_config = "DHCP"
self.set_rc_config_value("ifconfig_" + device_name, net_config)
+ for device_name, v in self.interface_configurations_ipv6.items():
+ if isinstance(v, dict):
+ net_config = "inet6 %s/%d" % (
+ v.get("address"),
+ v.get("prefix"),
+ )
+ mtu = v.get("mtu")
+ if mtu:
+ net_config += " mtu %d" % mtu
+ self.set_rc_config_value(
+ "ifconfig_%s_ipv6" % device_name, net_config
+ )
+
def start_services(self, run=False):
if not run:
LOG.debug("freebsd generate postcmd disabled")
@@ -58,6 +75,8 @@ class Renderer(cloudinit.net.bsd.BSDRenderer):
def set_route(self, network, netmask, gateway):
if network == "0.0.0.0":
self.set_rc_config_value("defaultrouter", gateway)
+ elif network == "::":
+ self.set_rc_config_value("ipv6_defaultrouter", gateway)
else:
route_name = "route_net%d" % self._route_cpt
route_cmd = "-route %s/%s %s" % (network, netmask, gateway)
diff --git a/cloudinit/net/netplan.py b/cloudinit/net/netplan.py
index 32fb031c..ad586e1e 100644
--- a/cloudinit/net/netplan.py
+++ b/cloudinit/net/netplan.py
@@ -1,10 +1,12 @@
# This file is part of cloud-init. See LICENSE file ...
import copy
+import ipaddress
import os
import textwrap
from typing import Optional, cast
+from cloudinit import features
from cloudinit import log as logging
from cloudinit import safeyaml, subp, util
from cloudinit.net import (
@@ -43,10 +45,10 @@ def _get_params_dict_by_match(config, match):
)
-def _extract_addresses(config, entry, ifname, features=None):
+def _extract_addresses(config: dict, entry: dict, ifname, features=None):
"""This method parse a cloudinit.net.network_state dictionary (config) and
maps netstate keys/values into a dictionary (entry) to represent
- netplan yaml.
+ netplan yaml. (config v1 -> netplan)
An example config dictionary might look like:
@@ -81,8 +83,10 @@ def _extract_addresses(config, entry, ifname, features=None):
"""
def _listify(obj, token=" "):
- "Helper to convert strings to list of strings, handle single string"
- if not obj or type(obj) not in [str]:
+ """
+ Helper to convert strings to list of strings, handle single string
+ """
+ if not obj or not isinstance(obj, str):
return obj
if token in obj:
return obj.split(token)
@@ -112,12 +116,34 @@ def _extract_addresses(config, entry, ifname, features=None):
addr = "%s" % subnet.get("address")
if "prefix" in subnet:
addr += "/%d" % subnet.get("prefix")
- if "gateway" in subnet and subnet.get("gateway"):
- gateway = subnet.get("gateway")
- if ":" in gateway:
- entry.update({"gateway6": gateway})
- else:
- entry.update({"gateway4": gateway})
+ if subnet.get("gateway"):
+ new_route = {
+ "via": subnet.get("gateway"),
+ "to": "default",
+ }
+ try:
+ subnet_gateway = ipaddress.ip_address(subnet["gateway"])
+ subnet_network = ipaddress.ip_network(addr, strict=False)
+ # If the gateway is not contained within the subnet's
+ # network, mark it as on-link so that it can still be
+ # reached.
+ if subnet_gateway not in subnet_network:
+ LOG.debug(
+ "Gateway %s is not contained within subnet %s,"
+ " adding on-link flag",
+ subnet["gateway"],
+ addr,
+ )
+ new_route["on-link"] = True
+ except ValueError as e:
+ LOG.warning(
+ "Failed to check whether gateway %s"
+ " is contained within subnet %s: %s",
+ subnet["gateway"],
+ addr,
+ e,
+ )
+ routes.append(new_route)
if "dns_nameservers" in subnet:
nameservers += _listify(subnet.get("dns_nameservers", []))
if "dns_search" in subnet:
@@ -258,7 +284,14 @@ class Renderer(renderer.Renderer):
if not header.endswith("\n"):
header += "\n"
- util.write_file(fpnplan, header + content)
+
+ mode = 0o600 if features.NETPLAN_CONFIG_ROOT_READ_ONLY else 0o644
+ if os.path.exists(fpnplan):
+ current_mode = util.get_permissions(fpnplan)
+ if current_mode & mode == current_mode:
+ # preserve mode if existing perms are more strict than default
+ mode = current_mode
+ util.write_file(fpnplan, header + content, mode=mode)
if self.clean_default:
_clean_default(target=target)
@@ -302,7 +335,7 @@ class Renderer(renderer.Renderer):
"successfully for all devices."
) from last_exception
- def _render_content(self, network_state: NetworkState):
+ def _render_content(self, network_state: NetworkState) -> str:
# if content already in netplan format, pass it back
if network_state.version == 2:
@@ -328,11 +361,7 @@ class Renderer(renderer.Renderer):
for config in network_state.iter_interfaces():
ifname = config.get("name")
# filter None (but not False) entries up front
- ifcfg = dict(
- (key, value)
- for (key, value) in config.items()
- if value is not None
- )
+ ifcfg = dict(filter(lambda it: it[1] is not None, config.items()))
if_type = ifcfg.get("type")
if if_type == "physical":
diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py
index dd2ff489..f88b1321 100644
--- a/cloudinit/net/network_state.py
+++ b/cloudinit/net/network_state.py
@@ -83,6 +83,16 @@ NET_CONFIG_TO_V2: Dict[str, Dict[str, Any]] = {
}
+def warn_deprecated_all_devices(dikt: dict) -> None:
+ """Warn about deprecations of v2 properties for all devices"""
+ if "gateway4" in dikt or "gateway6" in dikt:
+ LOG.warning(
+ "DEPRECATED: The use of `gateway4` and `gateway6` is"
+ " deprecated. For more info check out: "
+ "https://cloudinit.readthedocs.io/en/latest/topics/network-config-format-v2.html" # noqa: E501
+ )
+
+
def from_state_file(state_file):
state = util.read_conf(state_file)
nsi = NetworkStateInterpreter()
@@ -241,7 +251,7 @@ class NetworkStateInterpreter(metaclass=CommandHandlerMeta):
self,
version=NETWORK_STATE_VERSION,
config=None,
- renderer=None, # type: Optional[Renderer]
+ renderer: "Optional[Renderer]" = None,
):
self._version = version
self._config = config
@@ -750,6 +760,8 @@ class NetworkStateInterpreter(metaclass=CommandHandlerMeta):
if key in cfg:
phy_cmd[key] = cfg[key]
+ warn_deprecated_all_devices(cfg)
+
subnets = self._v2_to_v1_ipcfg(cfg)
if len(subnets) > 0:
phy_cmd.update({"subnets": subnets})
@@ -784,6 +796,7 @@ class NetworkStateInterpreter(metaclass=CommandHandlerMeta):
}
if "mtu" in cfg:
vlan_cmd["mtu"] = cfg["mtu"]
+ warn_deprecated_all_devices(cfg)
subnets = self._v2_to_v1_ipcfg(cfg)
if len(subnets) > 0:
vlan_cmd.update({"subnets": subnets})
@@ -852,6 +865,8 @@ class NetworkStateInterpreter(metaclass=CommandHandlerMeta):
}
if "mtu" in item_cfg:
v1_cmd["mtu"] = item_cfg["mtu"]
+
+ warn_deprecated_all_devices(item_cfg)
subnets = self._v2_to_v1_ipcfg(item_cfg)
if len(subnets) > 0:
v1_cmd.update({"subnets": subnets})
@@ -974,7 +989,7 @@ def _normalize_net_keys(network, address_keys=()):
@returns: A dict containing normalized prefix and matching addr_key.
"""
- net = dict((k, v) for k, v in network.items() if v)
+ net = {k: v for k, v in network.items() if v or v == 0}
addr_key = None
for key in address_keys:
if net.get(key):
diff --git a/cloudinit/net/networkd.py b/cloudinit/net/networkd.py
index 3591513f..4fd8a9b8 100644
--- a/cloudinit/net/networkd.py
+++ b/cloudinit/net/networkd.py
@@ -28,7 +28,7 @@ class CfgParser:
"DHCPv4": [],
"DHCPv6": [],
"Address": [],
- "Route": [],
+ "Route": {},
}
)
@@ -40,6 +40,22 @@ class CfgParser:
self.conf_dict[k] = list(dict.fromkeys(self.conf_dict[k]))
self.conf_dict[k].sort()
+ def update_route_section(self, sec, rid, key, val):
+ """
+ For each route section we use rid as a key, this allows us to isolate
+ this route from others on subsequent calls.
+ """
+ for k in self.conf_dict.keys():
+ if k == sec:
+ if rid not in self.conf_dict[k]:
+ self.conf_dict[k][rid] = []
+ self.conf_dict[k][rid].append(key + "=" + str(val))
+ # remove duplicates from list
+ self.conf_dict[k][rid] = list(
+ dict.fromkeys(self.conf_dict[k][rid])
+ )
+ self.conf_dict[k][rid].sort()
+
def get_final_conf(self):
contents = ""
for k, v in sorted(self.conf_dict.items()):
@@ -50,6 +66,12 @@ class CfgParser:
contents += "[" + k + "]\n"
contents += e + "\n"
contents += "\n"
+ elif k == "Route":
+ for n in sorted(v):
+ contents += "[" + k + "]\n"
+ for e in sorted(v[n]):
+ contents += e + "\n"
+ contents += "\n"
else:
contents += "[" + k + "]\n"
for e in sorted(v):
@@ -112,7 +134,11 @@ class Renderer(renderer.Renderer):
if "mtu" in iface and iface["mtu"]:
cfg.update_section(sec, "MTUBytes", iface["mtu"])
- def parse_routes(self, conf, cfg: CfgParser):
+ def parse_routes(self, rid, conf, cfg: CfgParser):
+ """
+ Parse a route and use rid as a key in order to isolate the route from
+ others in the route dict.
+ """
sec = "Route"
route_cfg_map = {
"gateway": "Gateway",
@@ -130,11 +156,12 @@ class Renderer(renderer.Renderer):
continue
if k == "network":
v += prefix
- cfg.update_section(sec, route_cfg_map[k], v)
+ cfg.update_route_section(sec, rid, route_cfg_map[k], v)
def parse_subnets(self, iface, cfg: CfgParser):
dhcp = "no"
sec = "Network"
+ rid = 0
for e in iface.get("subnets", []):
t = e["type"]
if t == "dhcp4" or t == "dhcp":
@@ -149,7 +176,10 @@ class Renderer(renderer.Renderer):
dhcp = "yes"
if "routes" in e and e["routes"]:
for i in e["routes"]:
- self.parse_routes(i, cfg)
+ # Use "r" as a dict key prefix for this route to isolate
+ # it from other sources of routes
+ self.parse_routes(f"r{rid}", i, cfg)
+ rid = rid + 1
if "address" in e:
subnet_cfg_map = {
"address": "Address",
@@ -163,7 +193,12 @@ class Renderer(renderer.Renderer):
v += "/" + str(e["prefix"])
cfg.update_section("Address", subnet_cfg_map[k], v)
elif k == "gateway":
- cfg.update_section("Route", subnet_cfg_map[k], v)
+ # Use "a" as a dict key prefix for this route to
+ # isolate it from other sources of routes
+ cfg.update_route_section(
+ "Route", f"a{rid}", subnet_cfg_map[k], v
+ )
+ rid = rid + 1
elif k == "dns_nameservers" or k == "dns_search":
cfg.update_section(sec, subnet_cfg_map[k], " ".join(v))
@@ -280,8 +315,12 @@ class Renderer(renderer.Renderer):
dhcp = self.parse_subnets(iface, cfg)
self.parse_dns(iface, cfg, ns)
+ rid = 0
for route in ns.iter_routes():
- self.parse_routes(route, cfg)
+ # Use "c" as a dict key prefix for this route to isolate it
+ # from other sources of routes
+ self.parse_routes(f"c{rid}", route, cfg)
+ rid = rid + 1
if ns.version == 2:
name: Optional[str] = iface["name"]
diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py
index 453c0522..d4daa78f 100644
--- a/cloudinit/net/sysconfig.py
+++ b/cloudinit/net/sysconfig.py
@@ -27,10 +27,12 @@ KNOWN_DISTROS = [
"fedora",
"miraclelinux",
"openEuler",
+ "OpenCloudOS",
"openmandriva",
"rhel",
"rocky",
"suse",
+ "TencentOS",
"virtuozzo",
]
diff --git a/cloudinit/safeyaml.py b/cloudinit/safeyaml.py
index 368ac861..4a3b6c84 100644
--- a/cloudinit/safeyaml.py
+++ b/cloudinit/safeyaml.py
@@ -25,6 +25,27 @@ class _CustomSafeLoader(yaml.SafeLoader):
return super().construct_scalar(node)
+def _fix_nested_map_index(new_key_path, marks):
+ new_marks = []
+ for mark in marks:
+ if "." not in mark.path:
+ new_marks.append(mark)
+ continue
+ path_prefix, _path_idx = mark.path.rsplit(".", 1)
+ if new_key_path not in mark.path and path_prefix in mark.path:
+ new_marks.append(
+ SchemaPathMarks(
+ # Replace only the first match of path_prefix
+ mark.path.replace(path_prefix, new_key_path, 1),
+ mark.start_mark,
+ mark.end_mark,
+ )
+ )
+ else:
+ new_marks.append(mark)
+ return new_marks
+
+
class _CustomSafeLoaderWithMarks(yaml.SafeLoader):
"""A loader which provides line and column start and end marks for YAML.
@@ -102,7 +123,30 @@ class _CustomSafeLoaderWithMarks(yaml.SafeLoader):
if line_num not in self.schemamarks_by_line:
self.schemamarks_by_line[line_num] = [marks]
else:
- self.schemamarks_by_line[line_num].append(marks)
+ if line_num == sequence_item.end_mark.line:
+ self.schemamarks_by_line[line_num].append(marks)
+ else: # Incorrect multi-line mapping or sequence object.
+ for inner_line in range(
+ line_num, sequence_item.end_mark.line
+ ):
+ if inner_line in self.schemamarks_by_line:
+ schema_marks = self.schemamarks_by_line[inner_line]
+ new_marks = _fix_nested_map_index(
+ node_key_path, schema_marks
+ )
+ if (
+ inner_line == line_num
+ and schema_marks[0].path != node_key_path
+ ):
+ new_marks.insert(
+ 0,
+ SchemaPathMarks(
+ node_key_path,
+ schema_marks[0].start_mark,
+ schema_marks[-1].end_mark,
+ ),
+ )
+ self.schemamarks_by_line[inner_line] = new_marks
return sequence
def get_single_data(self):
diff --git a/cloudinit/sources/DataSourceAliYun.py b/cloudinit/sources/DataSourceAliYun.py
index 6804274e..58e8755f 100644
--- a/cloudinit/sources/DataSourceAliYun.py
+++ b/cloudinit/sources/DataSourceAliYun.py
@@ -18,6 +18,11 @@ class DataSourceAliYun(EC2.DataSourceEc2):
min_metadata_version = "2016-01-01"
extended_metadata_versions: List[str] = []
+ # Aliyun metadata server security enhanced mode overwrite
+ @property
+ def imdsv2_token_put_header(self):
+ return "X-aliyun-ecs-metadata-token"
+
def get_hostname(self, fqdn=False, resolve_ip=False, metadata_only=False):
hostname = self.metadata.get("hostname")
is_default = False
diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
index db9234db..b7d3e5a3 100644
--- a/cloudinit/sources/DataSourceAzure.py
+++ b/cloudinit/sources/DataSourceAzure.py
@@ -6,7 +6,6 @@
import base64
import crypt
-import functools
import os
import os.path
import re
@@ -16,13 +15,10 @@ from pathlib import Path
from time import sleep, time
from typing import Any, Dict, List, Optional
-import requests
-
from cloudinit import dmi
from cloudinit import log as logging
from cloudinit import net, sources, ssh_util, subp, util
from cloudinit.event import EventScope, EventType
-from cloudinit.net import device_driver
from cloudinit.net.dhcp import (
NoDHCPLeaseError,
NoDHCPLeaseInterfaceError,
@@ -30,9 +26,9 @@ from cloudinit.net.dhcp import (
)
from cloudinit.net.ephemeral import EphemeralDHCPv4
from cloudinit.reporting import events
+from cloudinit.sources.azure import imds
from cloudinit.sources.helpers import netlink
from cloudinit.sources.helpers.azure import (
- DEFAULT_REPORT_FAILURE_USER_VISIBLE_MESSAGE,
DEFAULT_WIRESERVER_ENDPOINT,
BrokenAzureDataSource,
ChassisAssetTag,
@@ -51,7 +47,7 @@ from cloudinit.sources.helpers.azure import (
report_diagnostic_event,
report_failure_to_fabric,
)
-from cloudinit.url_helper import UrlError, readurl, retry_on_url_exc
+from cloudinit.url_helper import UrlError
LOG = logging.getLogger(__name__)
@@ -65,31 +61,6 @@ DEFAULT_FS = "ext4"
AGENT_SEED_DIR = "/var/lib/waagent"
DEFAULT_PROVISIONING_ISO_DEV = "/dev/sr0"
-# In the event where the IMDS primary server is not
-# available, it takes 1s to fallback to the secondary one
-IMDS_TIMEOUT_IN_SECONDS = 2
-IMDS_URL = "http://169.254.169.254/metadata"
-IMDS_VER_MIN = "2019-06-01"
-IMDS_VER_WANT = "2021-08-01"
-IMDS_EXTENDED_VER_MIN = "2021-03-01"
-IMDS_RETRY_CODES = (
- 404, # not found (yet)
- 410, # gone / unavailable (yet)
- 429, # rate-limited/throttled
- 500, # server error
-)
-imds_readurl_exception_callback = functools.partial(
- retry_on_url_exc,
- retry_codes=IMDS_RETRY_CODES,
- retry_instances=(requests.Timeout,),
-)
-
-
-class MetadataType(Enum):
- ALL = "{}/instance".format(IMDS_URL)
- NETWORK = "{}/instance/network".format(IMDS_URL)
- REPROVISION_DATA = "{}/reprovisiondata".format(IMDS_URL)
-
class PPSType(Enum):
NONE = "None"
@@ -208,6 +179,33 @@ def get_hv_netvsc_macs_normalized() -> List[str]:
]
+@azure_ds_telemetry_reporter
+def determine_device_driver_for_mac(mac: str) -> Optional[str]:
+ """Determine the device driver to match on, if any."""
+ drivers = [
+ i[2]
+ for i in net.get_interfaces(blacklist_drivers=BLACKLIST_DRIVERS)
+ if mac == normalize_mac_address(i[1])
+ ]
+ if "hv_netvsc" in drivers:
+ return "hv_netvsc"
+
+ if len(drivers) == 1:
+ report_diagnostic_event(
+ "Assuming driver for interface with mac=%s drivers=%r"
+ % (mac, drivers),
+ logger_func=LOG.debug,
+ )
+ return drivers[0]
+
+ report_diagnostic_event(
+ "Unable to specify driver for interface with mac=%s drivers=%r"
+ % (mac, drivers),
+ logger_func=LOG.warning,
+ )
+ return None
+
+
def execute_or_debug(cmd, fail_ret=None) -> str:
try:
return subp.subp(cmd).stdout # pyright: ignore
@@ -502,11 +500,10 @@ class DataSourceAzure(sources.DataSource):
# it determines the value of ret. More specifically, the first one in
# the candidate list determines the path to take in order to get the
# metadata we need.
- ovf_is_accessible = False
- metadata_source = None
- md = {}
+ ovf_source = None
+ md = {"local-hostname": ""}
+ cfg = {"system_info": {"default_user": {"name": ""}}}
userdata_raw = ""
- cfg = {}
files = {}
for src in list_possible_azure_ds(self.seed_dir, ddir):
@@ -524,8 +521,12 @@ class DataSourceAzure(sources.DataSource):
self._iso_dev = src
else:
md, userdata_raw, cfg, files = load_azure_ds_dir(src)
- ovf_is_accessible = True
- metadata_source = src
+
+ ovf_source = src
+ report_diagnostic_event(
+ "Found provisioning metadata in %s" % ovf_source,
+ logger_func=LOG.debug,
+ )
break
except NonAzureDataSource:
report_diagnostic_event(
@@ -537,38 +538,36 @@ class DataSourceAzure(sources.DataSource):
report_diagnostic_event(
"%s was not mountable" % src, logger_func=LOG.debug
)
- md = {"local-hostname": ""}
- cfg = {"system_info": {"default_user": {"name": ""}}}
- metadata_source = "IMDS"
continue
except BrokenAzureDataSource as exc:
msg = "BrokenAzureDataSource: %s" % exc
report_diagnostic_event(msg, logger_func=LOG.error)
raise sources.InvalidMetaDataException(msg)
-
- report_diagnostic_event(
- "Found provisioning metadata in %s" % metadata_source,
- logger_func=LOG.debug,
- )
+ else:
+ msg = (
+ "Unable to find provisioning media, falling back to IMDS "
+ "metadata. Be aware that IMDS metadata does not support "
+ "admin passwords or custom-data (user-data only)."
+ )
+ report_diagnostic_event(msg, logger_func=LOG.warning)
# If we read OVF from attached media, we are provisioning. If OVF
# is not found, we are probably provisioning on a system which does
# not have UDF support. In either case, require IMDS metadata.
# If we require IMDS metadata, try harder to obtain networking, waiting
# for at least 20 minutes. Otherwise only wait 5 minutes.
- requires_imds_metadata = bool(self._iso_dev) or not ovf_is_accessible
+ requires_imds_metadata = bool(self._iso_dev) or ovf_source is None
timeout_minutes = 20 if requires_imds_metadata else 5
try:
self._setup_ephemeral_networking(timeout_minutes=timeout_minutes)
except NoDHCPLeaseError:
pass
+ imds_md = {}
if self._is_ephemeral_networking_up():
- imds_md = self.get_imds_data_with_api_fallback(retries=10)
- else:
- imds_md = {}
+ imds_md = self.get_metadata_from_imds()
- if not imds_md and not ovf_is_accessible:
+ if not imds_md and ovf_source is None:
msg = "No OVF or IMDS available"
report_diagnostic_event(msg)
raise sources.InvalidMetaDataException(msg)
@@ -589,12 +588,12 @@ class DataSourceAzure(sources.DataSource):
md, userdata_raw, cfg, files = self._reprovision()
# fetch metadata again as it has changed after reprovisioning
- imds_md = self.get_imds_data_with_api_fallback(retries=10)
+ imds_md = self.get_metadata_from_imds()
# Report errors if IMDS network configuration is missing data.
self.validate_imds_network_metadata(imds_md=imds_md)
- self.seed = metadata_source
+ self.seed = ovf_source or "IMDS"
crawled_data.update(
{
"cfg": cfg,
@@ -621,7 +620,7 @@ class DataSourceAzure(sources.DataSource):
"disable_password"
] = imds_disable_password
- if metadata_source == "IMDS" and not crawled_data["files"]:
+ if self.seed == "IMDS" and not crawled_data["files"]:
try:
contents = build_minimal_ovf(
username=imds_username, # pyright: ignore
@@ -650,17 +649,7 @@ class DataSourceAzure(sources.DataSource):
"Bad userdata in IMDS", logger_func=LOG.warning
)
- if not metadata_source:
- msg = "No Azure metadata found"
- report_diagnostic_event(msg, logger_func=LOG.error)
- raise sources.InvalidMetaDataException(msg)
- else:
- report_diagnostic_event(
- "found datasource in %s" % metadata_source,
- logger_func=LOG.debug,
- )
-
- if metadata_source == ddir:
+ if ovf_source == ddir:
report_diagnostic_event(
"using files cached in %s" % ddir, logger_func=LOG.debug
)
@@ -690,6 +679,17 @@ class DataSourceAzure(sources.DataSource):
return crawled_data
+ @azure_ds_telemetry_reporter
+ def get_metadata_from_imds(self) -> Dict:
+ try:
+ return imds.fetch_metadata_with_api_fallback()
+ except (UrlError, ValueError) as error:
+ report_diagnostic_event(
+ "Ignoring IMDS metadata due to: %s" % error,
+ logger_func=LOG.warning,
+ )
+ return {}
+
def clear_cached_attrs(self, attr_defaults=()):
"""Reset any cached class attributes to defaults."""
super(DataSourceAzure, self).clear_cached_attrs(attr_defaults)
@@ -726,9 +726,7 @@ class DataSourceAzure(sources.DataSource):
report_diagnostic_event(
"Could not crawl Azure metadata: %s" % e, logger_func=LOG.error
)
- self._report_failure(
- description=DEFAULT_REPORT_FAILURE_USER_VISIBLE_MESSAGE
- )
+ self._report_failure()
return False
finally:
self._teardown_ephemeral_networking()
@@ -777,54 +775,6 @@ class DataSourceAzure(sources.DataSource):
)
return True
- @azure_ds_telemetry_reporter
- def get_imds_data_with_api_fallback(
- self,
- *,
- retries: int,
- md_type: MetadataType = MetadataType.ALL,
- exc_cb=imds_readurl_exception_callback,
- infinite: bool = False,
- ) -> dict:
- """Fetch metadata from IMDS using IMDS_VER_WANT API version.
-
- Falls back to IMDS_VER_MIN version if IMDS returns a 400 error code,
- indicating that IMDS_VER_WANT is unsupported.
-
- :return: Parsed metadata dictionary or empty dict on error.
- """
- LOG.info("Attempting IMDS api-version: %s", IMDS_VER_WANT)
- try:
- return get_metadata_from_imds(
- retries=retries,
- md_type=md_type,
- api_version=IMDS_VER_WANT,
- exc_cb=exc_cb,
- infinite=infinite,
- )
- except UrlError as error:
- LOG.info("UrlError with IMDS api-version: %s", IMDS_VER_WANT)
- # Fall back if HTTP code is 400, otherwise return empty dict.
- if error.code != 400:
- return {}
-
- log_msg = "Fall back to IMDS api-version: {}".format(IMDS_VER_MIN)
- report_diagnostic_event(log_msg, logger_func=LOG.info)
- try:
- return get_metadata_from_imds(
- retries=retries,
- md_type=md_type,
- api_version=IMDS_VER_MIN,
- exc_cb=exc_cb,
- infinite=infinite,
- )
- except UrlError as error:
- report_diagnostic_event(
- "Failed to fetch IMDS metadata: %s" % error,
- logger_func=LOG.error,
- )
- return {}
-
def get_instance_id(self):
if not self.metadata or "instance-id" not in self.metadata:
return self._iid()
@@ -1034,82 +984,18 @@ class DataSourceAzure(sources.DataSource):
primary nic, then we also get the expected total nic count from IMDS.
IMDS will process the request and send a response only for primary NIC.
"""
- is_primary = False
- expected_nic_count = -1
- imds_md = None
- metadata_poll_count = 0
- metadata_logging_threshold = 1
- expected_errors_count = 0
-
# For now, only a VM's primary NIC can contact IMDS and WireServer. If
# DHCP fails for a NIC, we have no mechanism to determine if the NIC is
# primary or secondary. In this case, retry DHCP until successful.
self._setup_ephemeral_networking(iface=ifname, timeout_minutes=20)
- # Retry polling network metadata for a limited duration only when the
- # calls fail due to network unreachable error or timeout.
- # This is because the platform drops packets going towards IMDS
- # when it is not a primary nic. If the calls fail due to other issues
- # like 410, 503 etc, then it means we are primary but IMDS service
- # is unavailable at the moment. Retry indefinitely in those cases
- # since we cannot move on without the network metadata. In the future,
- # all this will not be necessary, as a new dhcp option would tell
- # whether the nic is primary or not.
- def network_metadata_exc_cb(msg, exc):
- nonlocal expected_errors_count, metadata_poll_count
- nonlocal metadata_logging_threshold
-
- metadata_poll_count = metadata_poll_count + 1
-
- # Log when needed but back off exponentially to avoid exploding
- # the log file.
- if metadata_poll_count >= metadata_logging_threshold:
- metadata_logging_threshold *= 2
- report_diagnostic_event(
- "Ran into exception when attempting to reach %s "
- "after %d polls." % (msg, metadata_poll_count),
- logger_func=LOG.error,
- )
-
- if isinstance(exc, UrlError):
- report_diagnostic_event(
- "poll IMDS with %s failed. Exception: %s and code: %s"
- % (msg, exc.cause, exc.code),
- logger_func=LOG.error,
- )
-
- # Retry up to a certain limit for both timeout and network
- # unreachable errors.
- if exc.cause and isinstance(
- exc.cause, (requests.Timeout, requests.ConnectionError)
- ):
- expected_errors_count = expected_errors_count + 1
- return expected_errors_count <= 10
- return True
-
# Primary nic detection will be optimized in the future. The fact that
# primary nic is being attached first helps here. Otherwise each nic
# could add several seconds of delay.
- try:
- imds_md = self.get_imds_data_with_api_fallback(
- retries=0,
- md_type=MetadataType.NETWORK,
- exc_cb=network_metadata_exc_cb,
- infinite=True,
- )
- except Exception as e:
- LOG.warning(
- "Failed to get network metadata using nic %s. Attempt to "
- "contact IMDS failed with error %s. Assuming this is not the "
- "primary nic.",
- ifname,
- e,
- )
-
+ imds_md = self.get_metadata_from_imds()
if imds_md:
# Only primary NIC will get a response from IMDS.
LOG.info("%s is the primary nic", ifname)
- is_primary = True
# Set the expected nic count based on the response received.
expected_nic_count = len(imds_md["interface"])
@@ -1117,11 +1003,16 @@ class DataSourceAzure(sources.DataSource):
"Expected nic count: %d" % expected_nic_count,
logger_func=LOG.info,
)
- else:
- # If we are not the primary nic, then clean the dhcp context.
- self._teardown_ephemeral_networking()
+ return True, expected_nic_count
- return is_primary, expected_nic_count
+ # If we are not the primary nic, then clean the dhcp context.
+ LOG.warning(
+ "Failed to fetch IMDS metadata using nic %s. "
+ "Assuming this is not the primary nic.",
+ ifname,
+ )
+ self._teardown_ephemeral_networking()
+ return False, -1
@azure_ds_telemetry_reporter
def _wait_for_hot_attached_primary_nic(self, nl_sock):
@@ -1211,54 +1102,11 @@ class DataSourceAzure(sources.DataSource):
def _poll_imds(self):
"""Poll IMDS for the new provisioning data until we get a valid
response. Then return the returned JSON object."""
- url = "{}?api-version={}".format(
- MetadataType.REPROVISION_DATA.value, IMDS_VER_MIN
- )
- headers = {"Metadata": "true"}
nl_sock = None
report_ready = bool(
not os.path.isfile(self._reported_ready_marker_file)
)
- self.imds_logging_threshold = 1
- self.imds_poll_counter = 1
dhcp_attempts = 0
- reprovision_data = None
-
- def exc_cb(msg, exception):
- if isinstance(exception, UrlError):
- if exception.code in (404, 410):
- if self.imds_poll_counter == self.imds_logging_threshold:
- # Reducing the logging frequency as we are polling IMDS
- self.imds_logging_threshold *= 2
- LOG.debug(
- "Backing off logging threshold for the same "
- "exception to %d",
- self.imds_logging_threshold,
- )
- report_diagnostic_event(
- "poll IMDS with %s failed. "
- "Exception: %s and code: %s"
- % (msg, exception.cause, exception.code),
- logger_func=LOG.debug,
- )
- self.imds_poll_counter += 1
- return True
- else:
- # If we get an exception while trying to call IMDS, we call
- # DHCP and setup the ephemeral network to acquire a new IP.
- report_diagnostic_event(
- "poll IMDS with %s failed. Exception: %s and code: %s"
- % (msg, exception.cause, exception.code),
- logger_func=LOG.warning,
- )
- return False
-
- report_diagnostic_event(
- "poll IMDS failed with an unexpected exception: %s"
- % exception,
- logger_func=LOG.warning,
- )
- return False
if report_ready:
# Networking must be up for netlink to detect
@@ -1320,6 +1168,7 @@ class DataSourceAzure(sources.DataSource):
# Teardown old network configuration.
self._teardown_ephemeral_networking()
+ reprovision_data = None
while not reprovision_data:
if not self._is_ephemeral_networking_up():
dhcp_attempts += 1
@@ -1334,14 +1183,7 @@ class DataSourceAzure(sources.DataSource):
parent=azure_ds_reporter,
):
try:
- reprovision_data = readurl(
- url,
- timeout=IMDS_TIMEOUT_IN_SECONDS,
- headers=headers,
- exception_cb=exc_cb,
- infinite=True,
- log_req_resp=False,
- ).contents
+ reprovision_data = imds.fetch_reprovision_data()
except UrlError:
self._teardown_ephemeral_networking()
continue
@@ -1350,15 +1192,11 @@ class DataSourceAzure(sources.DataSource):
"attempted dhcp %d times after reuse" % dhcp_attempts,
logger_func=LOG.debug,
)
- report_diagnostic_event(
- "polled imds %d times after reuse" % self.imds_poll_counter,
- logger_func=LOG.debug,
- )
return reprovision_data
@azure_ds_telemetry_reporter
- def _report_failure(self, description: Optional[str] = None) -> bool:
+ def _report_failure(self) -> bool:
"""Tells the Azure fabric that provisioning has failed.
@param description: A description of the error encountered.
@@ -1371,10 +1209,7 @@ class DataSourceAzure(sources.DataSource):
"to report failure to Azure",
logger_func=LOG.debug,
)
- report_failure_to_fabric(
- endpoint=self._wireserver_endpoint,
- description=description,
- )
+ report_failure_to_fabric(endpoint=self._wireserver_endpoint)
return True
except Exception as e:
report_diagnostic_event(
@@ -1394,9 +1229,7 @@ class DataSourceAzure(sources.DataSource):
except NoDHCPLeaseError:
# Reporting failure will fail, but it will emit telemetry.
pass
- report_failure_to_fabric(
- endpoint=self._wireserver_endpoint, description=description
- )
+ report_failure_to_fabric(endpoint=self._wireserver_endpoint)
return True
except Exception as e:
report_diagnostic_event(
@@ -2046,11 +1879,8 @@ def generate_network_config_from_instance_network_metadata(
dev_config.update(
{"match": {"macaddress": mac.lower()}, "set-name": nicname}
)
- # With netvsc, we can get two interfaces that
- # share the same MAC, so we need to make sure
- # our match condition also contains the driver
- driver = device_driver(nicname)
- if driver and driver == "hv_netvsc":
+ driver = determine_device_driver_for_mac(mac)
+ if driver:
dev_config["match"]["driver"] = driver
netconfig["ethernets"][nicname] = dev_config
continue
@@ -2079,96 +1909,6 @@ def _generate_network_config_from_fallback_config() -> dict:
@azure_ds_telemetry_reporter
-def get_metadata_from_imds(
- retries,
- md_type=MetadataType.ALL,
- api_version=IMDS_VER_MIN,
- exc_cb=imds_readurl_exception_callback,
- infinite=False,
-):
- """Query Azure's instance metadata service, returning a dictionary.
-
- For more info on IMDS:
- https://docs.microsoft.com/en-us/azure/virtual-machines/windows/instance-metadata-service
-
- @param retries: The number of retries of the IMDS_URL.
- @param md_type: Metadata type for IMDS request.
- @param api_version: IMDS api-version to use in the request.
-
- @return: A dict of instance metadata containing compute and network
- info.
- """
- kwargs = {
- "logfunc": LOG.debug,
- "msg": "Crawl of Azure Instance Metadata Service (IMDS)",
- "func": _get_metadata_from_imds,
- "args": (retries, exc_cb, md_type, api_version, infinite),
- }
- try:
- return util.log_time(**kwargs)
- except Exception as e:
- report_diagnostic_event(
- "exception while getting metadata: %s" % e,
- logger_func=LOG.warning,
- )
- raise
-
-
-@azure_ds_telemetry_reporter
-def _get_metadata_from_imds(
- retries,
- exc_cb,
- md_type=MetadataType.ALL,
- api_version=IMDS_VER_MIN,
- infinite=False,
-):
- url = "{}?api-version={}".format(md_type.value, api_version)
- headers = {"Metadata": "true"}
-
- # support for extended metadata begins with 2021-03-01
- if api_version >= IMDS_EXTENDED_VER_MIN and md_type == MetadataType.ALL:
- url = url + "&extended=true"
-
- try:
- response = readurl(
- url,
- timeout=IMDS_TIMEOUT_IN_SECONDS,
- headers=headers,
- retries=retries,
- exception_cb=exc_cb,
- infinite=infinite,
- )
- except Exception as e:
- # pylint:disable=no-member
- if isinstance(e, UrlError) and e.code == 400:
- raise
- else:
- report_diagnostic_event(
- "Ignoring IMDS instance metadata. "
- "Get metadata from IMDS failed: %s" % e,
- logger_func=LOG.warning,
- )
- return {}
- try:
- from json.decoder import JSONDecodeError
-
- json_decode_error = JSONDecodeError
- except ImportError:
- json_decode_error = ValueError
-
- try:
- return util.load_json(response.contents)
- except json_decode_error as e:
- report_diagnostic_event(
- "Ignoring non-json IMDS instance metadata response: %s. "
- "Loading non-json IMDS response failed: %s"
- % (response.contents, e),
- logger_func=LOG.warning,
- )
- return {}
-
-
-@azure_ds_telemetry_reporter
def maybe_remove_ubuntu_network_config_scripts(paths=None):
"""Remove Azure-specific ubuntu network config for non-primary nics.
diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py
index 139ec7e4..44665b26 100644
--- a/cloudinit/sources/DataSourceEc2.py
+++ b/cloudinit/sources/DataSourceEc2.py
@@ -30,12 +30,6 @@ SKIP_METADATA_URL_CODES = frozenset([uhelp.NOT_FOUND])
STRICT_ID_PATH = ("datasource", "Ec2", "strict_id")
STRICT_ID_DEFAULT = "warn"
-API_TOKEN_ROUTE = "latest/api/token"
-AWS_TOKEN_TTL_SECONDS = "21600"
-AWS_TOKEN_PUT_HEADER = "X-aws-ec2-metadata-token"
-AWS_TOKEN_REQ_HEADER = AWS_TOKEN_PUT_HEADER + "-ttl-seconds"
-AWS_TOKEN_REDACT = [AWS_TOKEN_PUT_HEADER, AWS_TOKEN_REQ_HEADER]
-
class CloudNames:
ALIYUN = "aliyun"
@@ -57,6 +51,10 @@ def skip_404_tag_errors(exception):
return exception.code == 404 and "meta-data/tags/" in exception.url
+# Cloud platforms that support IMDSv2 style metadata server
+IDMSV2_SUPPORTED_CLOUD_PLATFORMS = [CloudNames.AWS, CloudNames.ALIYUN]
+
+
class DataSourceEc2(sources.DataSource):
dsname = "Ec2"
@@ -192,6 +190,27 @@ class DataSourceEc2(sources.DataSource):
self._platform_type = DataSourceEc2.dsname.lower()
return self._platform_type
+ # IMDSv2 related parameters from the ec2 metadata api document
+ @property
+ def api_token_route(self):
+ return "latest/api/token"
+
+ @property
+ def imdsv2_token_ttl_seconds(self):
+ return "21600"
+
+ @property
+ def imdsv2_token_put_header(self):
+ return "X-aws-ec2-metadata-token"
+
+ @property
+ def imdsv2_token_req_header(self):
+ return self.imdsv2_token_put_header + "-ttl-seconds"
+
+ @property
+ def imdsv2_token_redact(self):
+ return [self.imdsv2_token_put_header, self.imdsv2_token_req_header]
+
def get_metadata_api_version(self):
"""Get the best supported api version from the metadata service.
@@ -208,7 +227,9 @@ class DataSourceEc2(sources.DataSource):
url = url_tmpl.format(self.metadata_address, api_ver)
try:
resp = uhelp.readurl(
- url=url, headers=headers, headers_redact=AWS_TOKEN_REDACT
+ url=url,
+ headers=headers,
+ headers_redact=self.imdsv2_token_redact,
)
except uhelp.UrlError as e:
LOG.debug("url %s raised exception %s", url, e)
@@ -232,7 +253,7 @@ class DataSourceEc2(sources.DataSource):
api_version,
self.metadata_address,
headers_cb=self._get_headers,
- headers_redact=AWS_TOKEN_REDACT,
+ headers_redact=self.imdsv2_token_redact,
exception_cb=self._refresh_stale_aws_token_cb,
).get("document", {})
return self.identity.get(
@@ -248,12 +269,12 @@ class DataSourceEc2(sources.DataSource):
the instance owner has disabled the IMDS HTTP endpoint or
the network topology conflicts with the configured hop-limit.
"""
- if self.cloud_name != CloudNames.AWS:
+ if self.cloud_name not in IDMSV2_SUPPORTED_CLOUD_PLATFORMS:
return
urls = []
url2base = {}
- url_path = API_TOKEN_ROUTE
+ url_path = self.api_token_route
request_method = "PUT"
for url in mdurls:
cur = "{0}/{1}".format(url, url_path)
@@ -275,7 +296,7 @@ class DataSourceEc2(sources.DataSource):
headers_cb=self._get_headers,
exception_cb=self._imds_exception_cb,
request_method=request_method,
- headers_redact=AWS_TOKEN_REDACT,
+ headers_redact=self.imdsv2_token_redact,
connect_synchronously=False,
)
except uhelp.UrlError:
@@ -320,7 +341,10 @@ class DataSourceEc2(sources.DataSource):
# If we could not get an API token, then we assume the IMDS
# endpoint was disabled and we move on without a data source.
# Fallback to IMDSv1 if not running on EC2
- if not metadata_address and self.cloud_name != CloudNames.AWS:
+ if (
+ not metadata_address
+ and self.cloud_name not in IDMSV2_SUPPORTED_CLOUD_PLATFORMS
+ ):
# if we can't get a token, use instance-id path
urls = []
url2base = {}
@@ -339,7 +363,7 @@ class DataSourceEc2(sources.DataSource):
max_wait=url_params.max_wait_seconds,
timeout=url_params.timeout_seconds,
status_cb=LOG.warning,
- headers_redact=AWS_TOKEN_REDACT,
+ headers_redact=self.imdsv2_token_redact,
headers_cb=self._get_headers,
request_method=request_method,
)
@@ -350,7 +374,7 @@ class DataSourceEc2(sources.DataSource):
if metadata_address:
self.metadata_address = metadata_address
LOG.debug("Using metadata source: '%s'", self.metadata_address)
- elif self.cloud_name == CloudNames.AWS:
+ elif self.cloud_name in IDMSV2_SUPPORTED_CLOUD_PLATFORMS:
LOG.warning("IMDS's HTTP endpoint is probably disabled")
else:
LOG.critical(
@@ -531,9 +555,9 @@ class DataSourceEc2(sources.DataSource):
if not self.wait_for_metadata_service():
return {}
api_version = self.get_metadata_api_version()
- redact = AWS_TOKEN_REDACT
+ redact = self.imdsv2_token_redact
crawled_metadata = {}
- if self.cloud_name == CloudNames.AWS:
+ if self.cloud_name in IDMSV2_SUPPORTED_CLOUD_PLATFORMS:
exc_cb = self._refresh_stale_aws_token_cb
exc_cb_ud = self._skip_or_refresh_stale_aws_token_cb
skip_cb = None
@@ -577,22 +601,26 @@ class DataSourceEc2(sources.DataSource):
crawled_metadata["_metadata_api_version"] = api_version
return crawled_metadata
- def _refresh_api_token(self, seconds=AWS_TOKEN_TTL_SECONDS):
+ def _refresh_api_token(self, seconds=None):
"""Request new metadata API token.
@param seconds: The lifetime of the token in seconds
@return: The API token or None if unavailable.
"""
- if self.cloud_name != CloudNames.AWS:
+ if self.cloud_name not in IDMSV2_SUPPORTED_CLOUD_PLATFORMS:
return None
+
+ if seconds is None:
+ seconds = self.imdsv2_token_ttl_seconds
+
LOG.debug("Refreshing Ec2 metadata API token")
- request_header = {AWS_TOKEN_REQ_HEADER: seconds}
- token_url = "{}/{}".format(self.metadata_address, API_TOKEN_ROUTE)
+ request_header = {self.imdsv2_token_req_header: seconds}
+ token_url = "{}/{}".format(self.metadata_address, self.api_token_route)
try:
response = uhelp.readurl(
token_url,
headers=request_header,
- headers_redact=AWS_TOKEN_REDACT,
+ headers_redact=self.imdsv2_token_redact,
request_method="PUT",
)
except uhelp.UrlError as e:
@@ -653,20 +681,22 @@ class DataSourceEc2(sources.DataSource):
If _api_token is unset on AWS, attempt to refresh the token via a PUT
and then return the updated token header.
"""
- if self.cloud_name != CloudNames.AWS:
+ if self.cloud_name not in IDMSV2_SUPPORTED_CLOUD_PLATFORMS:
return {}
- # Request a 6 hour token if URL is API_TOKEN_ROUTE
- request_token_header = {AWS_TOKEN_REQ_HEADER: AWS_TOKEN_TTL_SECONDS}
- if API_TOKEN_ROUTE in url:
+ # Request a 6 hour token if URL is api_token_route
+ request_token_header = {
+ self.imdsv2_token_req_header: self.imdsv2_token_ttl_seconds
+ }
+ if self.api_token_route in url:
return request_token_header
if not self._api_token:
# If we don't yet have an API token, get one via a PUT against
- # API_TOKEN_ROUTE. This _api_token may get unset by a 403 due
+ # api_token_route. This _api_token may get unset by a 403 due
# to an invalid or expired token
self._api_token = self._refresh_api_token()
if not self._api_token:
return {}
- return {AWS_TOKEN_PUT_HEADER: self._api_token}
+ return {self.imdsv2_token_put_header: self._api_token}
class DataSourceEc2Local(DataSourceEc2):
@@ -746,6 +776,11 @@ def warn_if_necessary(cfgval, cfg):
warnings.show_warning("non_ec2_md", cfg, mode=True, sleep=sleep)
+def identify_aliyun(data):
+ if data["product_name"] == "Alibaba Cloud ECS":
+ return CloudNames.ALIYUN
+
+
def identify_aws(data):
# data is a dictionary returned by _collect_platform_data.
if data["uuid"].startswith("ec2") and (
@@ -788,6 +823,7 @@ def identify_platform():
identify_zstack,
identify_e24cloud,
identify_outscale,
+ identify_aliyun,
lambda x: CloudNames.UNKNOWN,
)
for checker in checks:
diff --git a/cloudinit/sources/DataSourceLXD.py b/cloudinit/sources/DataSourceLXD.py
index 49a7567c..ab440cc8 100644
--- a/cloudinit/sources/DataSourceLXD.py
+++ b/cloudinit/sources/DataSourceLXD.py
@@ -11,6 +11,7 @@ Notes:
import os
import socket
import stat
+import time
from enum import Flag, auto
from json.decoder import JSONDecodeError
from typing import Any, Dict, List, Optional, Union, cast
@@ -277,6 +278,14 @@ def _get_json_response(
session: requests.Session, url: str, do_raise: bool = True
):
url_response = _do_request(session, url, do_raise)
+ if not url_response.ok:
+ LOG.debug(
+ "Skipping %s on [HTTP:%d]:%s",
+ url,
+ url_response.status_code,
+ url_response.text,
+ )
+ return {}
try:
return url_response.json()
except JSONDecodeError as exc:
@@ -291,7 +300,20 @@ def _get_json_response(
def _do_request(
session: requests.Session, url: str, do_raise: bool = True
) -> requests.Response:
- response = session.get(url)
+ for retries in range(30, 0, -1):
+ response = session.get(url)
+ if 500 == response.status_code:
+ # retry every 0.1 seconds for 3 seconds in the case of 500 error
+ # tis evil, but it also works around a bug
+ time.sleep(0.1)
+ LOG.warning(
+ "[GET] [HTTP:%d] %s, retrying %d more time(s)",
+ response.status_code,
+ url,
+ retries,
+ )
+ else:
+ break
LOG.debug("[GET] [HTTP:%d] %s", response.status_code, url)
if do_raise and not response.ok:
raise sources.InvalidMetaDataException(
@@ -386,7 +408,9 @@ class _MetaDataReader:
md.update(self._process_config(session))
if MetaDataKeys.DEVICES in metadata_keys:
url = url_helper.combine_url(self._version_url, "devices")
- md["devices"] = _get_json_response(session, url)
+ devices = _get_json_response(session, url, do_raise=False)
+ if devices:
+ md["devices"] = devices
return md
diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py
index fba6aaae..a32bd4d0 100644
--- a/cloudinit/sources/DataSourceNoCloud.py
+++ b/cloudinit/sources/DataSourceNoCloud.py
@@ -156,7 +156,7 @@ class DataSourceNoCloud(sources.DataSource):
# The special argument "seedfrom" indicates we should
# attempt to seed the userdata / metadata from its value
# its primarily value is in allowing the user to type less
- # on the command line, ie: ds=nocloud;s=http://bit.ly/abcdefg
+ # on the command line, ie: ds=nocloud;s=http://bit.ly/abcdefg/
if "seedfrom" in mydata["meta-data"]:
seedfrom = mydata["meta-data"]["seedfrom"]
seedfound = False
@@ -167,6 +167,9 @@ class DataSourceNoCloud(sources.DataSource):
if not seedfound:
LOG.debug("Seed from %s not supported by %s", seedfrom, self)
return False
+ # check and replace instances of known dmi.<dmi_keys> such as
+ # chassis-serial-number or baseboard-product-name
+ seedfrom = dmi.sub_dmi_vars(seedfrom)
# This could throw errors, but the user told us to do it
# so if errors are raised, let them raise
diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py
index 05bf84c2..7baef3a5 100644
--- a/cloudinit/sources/DataSourceOVF.py
+++ b/cloudinit/sources/DataSourceOVF.py
@@ -11,49 +11,13 @@
import base64
import os
import re
-import time
from xml.dom import minidom
-from cloudinit import dmi
from cloudinit import log as logging
from cloudinit import safeyaml, sources, subp, util
-from cloudinit.sources.helpers.vmware.imc.config import Config
-from cloudinit.sources.helpers.vmware.imc.config_custom_script import (
- PostCustomScript,
- PreCustomScript,
-)
-from cloudinit.sources.helpers.vmware.imc.config_file import ConfigFile
-from cloudinit.sources.helpers.vmware.imc.config_nic import NicConfigurator
-from cloudinit.sources.helpers.vmware.imc.config_passwd import (
- PasswordConfigurator,
-)
-from cloudinit.sources.helpers.vmware.imc.guestcust_error import (
- GuestCustErrorEnum,
-)
-from cloudinit.sources.helpers.vmware.imc.guestcust_event import (
- GuestCustEventEnum as GuestCustEvent,
-)
-from cloudinit.sources.helpers.vmware.imc.guestcust_state import (
- GuestCustStateEnum,
-)
-from cloudinit.sources.helpers.vmware.imc.guestcust_util import (
- enable_nics,
- get_nics_to_enable,
- get_tools_config,
- set_customization_status,
- set_gc_status,
-)
LOG = logging.getLogger(__name__)
-CONFGROUPNAME_GUESTCUSTOMIZATION = "deployPkg"
-GUESTCUSTOMIZATION_ENABLE_CUST_SCRIPTS = "enable-custom-scripts"
-VMWARE_IMC_DIR = "/var/run/vmware-imc"
-
-
-class GuestCustScriptDisabled(Exception):
- pass
-
class DataSourceOVF(sources.DataSource):
@@ -66,11 +30,7 @@ class DataSourceOVF(sources.DataSource):
self.environment = None
self.cfg = {}
self.supported_seed_starts = ("/", "file://")
- self.vmware_customization_supported = True
self._network_config = None
- self._vmware_nics_to_enable = None
- self._vmware_cust_conf = None
- self._vmware_cust_found = False
def __str__(self):
root = sources.DataSource.__str__(self)
@@ -81,8 +41,6 @@ class DataSourceOVF(sources.DataSource):
md = {}
ud = ""
vd = ""
- vmwareImcConfigFilePath = None
- nicspath = None
defaults = {
"instance-id": "iid-dsovf",
@@ -90,305 +48,12 @@ class DataSourceOVF(sources.DataSource):
(seedfile, contents) = get_ovf_env(self.paths.seed_dir)
- system_type = dmi.read_dmi_data("system-product-name")
- if system_type is None:
- LOG.debug("No system-product-name found")
-
if seedfile:
# Found a seed dir
seed = os.path.join(self.paths.seed_dir, seedfile)
(md, ud, cfg) = read_ovf_environment(contents)
self.environment = contents
found.append(seed)
- elif system_type and "vmware" in system_type.lower():
- LOG.debug("VMware Virtualization Platform found")
- allow_vmware_cust = False
- allow_raw_data = False
- if not self.vmware_customization_supported:
- LOG.debug(
- "Skipping the check for VMware Customization support"
- )
- else:
- allow_vmware_cust = not util.get_cfg_option_bool(
- self.sys_cfg, "disable_vmware_customization", True
- )
- allow_raw_data = util.get_cfg_option_bool(
- self.ds_cfg, "allow_raw_data", True
- )
-
- if not (allow_vmware_cust or allow_raw_data):
- LOG.debug("Customization for VMware platform is disabled.")
- else:
- search_paths = (
- "/usr/lib/vmware-tools",
- "/usr/lib64/vmware-tools",
- "/usr/lib/open-vm-tools",
- "/usr/lib64/open-vm-tools",
- "/usr/lib/x86_64-linux-gnu/open-vm-tools",
- "/usr/lib/aarch64-linux-gnu/open-vm-tools",
- )
-
- plugin = "libdeployPkgPlugin.so"
- deployPkgPluginPath = None
- for path in search_paths:
- deployPkgPluginPath = search_file(path, plugin)
- if deployPkgPluginPath:
- LOG.debug(
- "Found the customization plugin at %s",
- deployPkgPluginPath,
- )
- break
-
- if deployPkgPluginPath:
- # When the VM is powered on, the "VMware Tools" daemon
- # copies the customization specification file to
- # /var/run/vmware-imc directory. cloud-init code needs
- # to search for the file in that directory which indicates
- # that required metadata and userdata files are now
- # present.
- max_wait = get_max_wait_from_cfg(self.ds_cfg)
- vmwareImcConfigFilePath = util.log_time(
- logfunc=LOG.debug,
- msg="waiting for configuration file",
- func=wait_for_imc_cfg_file,
- args=("cust.cfg", max_wait),
- )
- else:
- LOG.debug("Did not find the customization plugin.")
-
- md_path = None
- if vmwareImcConfigFilePath:
- imcdirpath = os.path.dirname(vmwareImcConfigFilePath)
- cf = ConfigFile(vmwareImcConfigFilePath)
- self._vmware_cust_conf = Config(cf)
- LOG.debug(
- "Found VMware Customization Config File at %s",
- vmwareImcConfigFilePath,
- )
- try:
- (md_path, ud_path, nicspath) = collect_imc_file_paths(
- self._vmware_cust_conf
- )
- except FileNotFoundError as e:
- _raise_error_status(
- "File(s) missing in directory",
- e,
- GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED,
- vmwareImcConfigFilePath,
- self._vmware_cust_conf,
- )
- # Don't handle the customization for below 2 cases:
- # 1. meta data is found, allow_raw_data is False.
- # 2. no meta data is found, allow_vmware_cust is False.
- if md_path and not allow_raw_data:
- LOG.debug("Customization using raw data is disabled.")
- # reset vmwareImcConfigFilePath to None to avoid
- # customization for VMware platform
- vmwareImcConfigFilePath = None
- if md_path is None and not allow_vmware_cust:
- LOG.debug(
- "Customization using VMware config is disabled."
- )
- vmwareImcConfigFilePath = None
- else:
- LOG.debug("Did not find VMware Customization Config File")
-
- use_raw_data = bool(vmwareImcConfigFilePath and md_path)
- if use_raw_data:
- set_gc_status(self._vmware_cust_conf, "Started")
- LOG.debug("Start to load cloud-init meta data and user data")
- try:
- (md, ud, cfg, network) = load_cloudinit_data(md_path, ud_path)
-
- if network:
- self._network_config = network
- else:
- self._network_config = (
- self.distro.generate_fallback_config()
- )
-
- except safeyaml.YAMLError as e:
- _raise_error_status(
- "Error parsing the cloud-init meta data",
- e,
- GuestCustErrorEnum.GUESTCUST_ERROR_WRONG_META_FORMAT,
- vmwareImcConfigFilePath,
- self._vmware_cust_conf,
- )
- except Exception as e:
- _raise_error_status(
- "Error loading cloud-init configuration",
- e,
- GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED,
- vmwareImcConfigFilePath,
- self._vmware_cust_conf,
- )
-
- self._vmware_cust_found = True
- found.append("vmware-tools")
-
- util.del_dir(imcdirpath)
- set_customization_status(
- GuestCustStateEnum.GUESTCUST_STATE_DONE,
- GuestCustErrorEnum.GUESTCUST_ERROR_SUCCESS,
- )
- set_gc_status(self._vmware_cust_conf, "Successful")
-
- elif vmwareImcConfigFilePath:
- # Load configuration from vmware_imc
- self._vmware_nics_to_enable = ""
- try:
- set_gc_status(self._vmware_cust_conf, "Started")
-
- (md, ud, cfg) = read_vmware_imc(self._vmware_cust_conf)
- self._vmware_nics_to_enable = get_nics_to_enable(nicspath)
- product_marker = self._vmware_cust_conf.marker_id
- hasmarkerfile = check_marker_exists(
- product_marker, os.path.join(self.paths.cloud_dir, "data")
- )
- special_customization = product_marker and not hasmarkerfile
- customscript = self._vmware_cust_conf.custom_script_name
-
- # In case there is a custom script, check whether VMware
- # Tools configuration allow the custom script to run.
- if special_customization and customscript:
- defVal = "false"
- if self._vmware_cust_conf.default_run_post_script:
- LOG.debug(
- "Set default value to true due to"
- " customization configuration."
- )
- defVal = "true"
-
- custScriptConfig = get_tools_config(
- CONFGROUPNAME_GUESTCUSTOMIZATION,
- GUESTCUSTOMIZATION_ENABLE_CUST_SCRIPTS,
- defVal,
- )
- if custScriptConfig.lower() != "true":
- # Update the customization status if custom script
- # is disabled
- msg = "Custom script is disabled by VM Administrator"
- LOG.debug(msg)
- set_customization_status(
- GuestCustStateEnum.GUESTCUST_STATE_RUNNING,
- GuestCustErrorEnum.GUESTCUST_ERROR_SCRIPT_DISABLED,
- )
- raise GuestCustScriptDisabled(msg)
-
- ccScriptsDir = os.path.join(
- self.paths.get_cpath("scripts"), "per-instance"
- )
- except GuestCustScriptDisabled as e:
- LOG.debug("GuestCustScriptDisabled")
- _raise_error_status(
- "Error parsing the customization Config File",
- e,
- GuestCustErrorEnum.GUESTCUST_ERROR_SCRIPT_DISABLED,
- vmwareImcConfigFilePath,
- self._vmware_cust_conf,
- )
- except Exception as e:
- _raise_error_status(
- "Error parsing the customization Config File",
- e,
- GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED,
- vmwareImcConfigFilePath,
- self._vmware_cust_conf,
- )
-
- if special_customization:
- if customscript:
- try:
- precust = PreCustomScript(customscript, imcdirpath)
- precust.execute()
- except Exception as e:
- _raise_error_status(
- "Error executing pre-customization script",
- e,
- GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED,
- vmwareImcConfigFilePath,
- self._vmware_cust_conf,
- )
-
- try:
- LOG.debug("Preparing the Network configuration")
- self._network_config = get_network_config_from_conf(
- self._vmware_cust_conf, True, True, self.distro.osfamily
- )
- except Exception as e:
- _raise_error_status(
- "Error preparing Network Configuration",
- e,
- GuestCustEvent.GUESTCUST_EVENT_NETWORK_SETUP_FAILED,
- vmwareImcConfigFilePath,
- self._vmware_cust_conf,
- )
-
- if special_customization:
- LOG.debug("Applying password customization")
- pwdConfigurator = PasswordConfigurator()
- adminpwd = self._vmware_cust_conf.admin_password
- try:
- resetpwd = self._vmware_cust_conf.reset_password
- if adminpwd or resetpwd:
- pwdConfigurator.configure(
- adminpwd, resetpwd, self.distro
- )
- else:
- LOG.debug("Changing password is not needed")
- except Exception as e:
- _raise_error_status(
- "Error applying Password Configuration",
- e,
- GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED,
- vmwareImcConfigFilePath,
- self._vmware_cust_conf,
- )
-
- if customscript:
- try:
- postcust = PostCustomScript(
- customscript, imcdirpath, ccScriptsDir
- )
- postcust.execute()
- except Exception as e:
- _raise_error_status(
- "Error executing post-customization script",
- e,
- GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED,
- vmwareImcConfigFilePath,
- self._vmware_cust_conf,
- )
-
- if product_marker:
- try:
- setup_marker_files(
- product_marker,
- os.path.join(self.paths.cloud_dir, "data"),
- )
- except Exception as e:
- _raise_error_status(
- "Error creating marker files",
- e,
- GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED,
- vmwareImcConfigFilePath,
- self._vmware_cust_conf,
- )
-
- self._vmware_cust_found = True
- found.append("vmware-tools")
-
- # TODO: Need to set the status to DONE only when the
- # customization is done successfully.
- util.del_dir(os.path.dirname(vmwareImcConfigFilePath))
- enable_nics(self._vmware_nics_to_enable)
- set_customization_status(
- GuestCustStateEnum.GUESTCUST_STATE_DONE,
- GuestCustErrorEnum.GUESTCUST_ERROR_SUCCESS,
- )
- set_gc_status(self._vmware_cust_conf, "Successful")
-
else:
np = [
("com.vmware.guestInfo", transport_vmware_guestinfo),
@@ -438,9 +103,6 @@ class DataSourceOVF(sources.DataSource):
return True
def _get_subplatform(self):
- system_type = dmi.read_dmi_data("system-product-name").lower()
- if system_type == "vmware":
- return "vmware (%s)" % self.seed
return "ovf (%s)" % self.seed
def get_public_ssh_keys(self):
@@ -468,94 +130,6 @@ class DataSourceOVFNet(DataSourceOVF):
DataSourceOVF.__init__(self, sys_cfg, distro, paths)
self.seed_dir = os.path.join(paths.seed_dir, "ovf-net")
self.supported_seed_starts = ("http://", "https://")
- self.vmware_customization_supported = False
-
-
-def get_max_wait_from_cfg(cfg):
- default_max_wait = 15
- max_wait_cfg_option = "vmware_cust_file_max_wait"
- max_wait = default_max_wait
-
- if not cfg:
- return max_wait
-
- try:
- max_wait = int(cfg.get(max_wait_cfg_option, default_max_wait))
- except ValueError:
- LOG.warning(
- "Failed to get '%s', using %s",
- max_wait_cfg_option,
- default_max_wait,
- )
-
- if max_wait < 0:
- LOG.warning(
- "Invalid value '%s' for '%s', using '%s' instead",
- max_wait,
- max_wait_cfg_option,
- default_max_wait,
- )
- max_wait = default_max_wait
-
- return max_wait
-
-
-def wait_for_imc_cfg_file(
- filename, maxwait=180, naplen=5, dirpath="/var/run/vmware-imc"
-):
- waited = 0
- if maxwait <= naplen:
- naplen = 1
-
- while waited < maxwait:
- fileFullPath = os.path.join(dirpath, filename)
- if os.path.isfile(fileFullPath):
- return fileFullPath
- LOG.debug("Waiting for VMware Customization Config File")
- time.sleep(naplen)
- waited += naplen
- return None
-
-
-def get_network_config_from_conf(
- config, use_system_devices=True, configure=False, osfamily=None
-):
- nicConfigurator = NicConfigurator(config.nics, use_system_devices)
- nics_cfg_list = nicConfigurator.generate(configure, osfamily)
-
- return get_network_config(
- nics_cfg_list, config.name_servers, config.dns_suffixes
- )
-
-
-def get_network_config(nics=None, nameservers=None, search=None):
- config_list = nics
-
- if nameservers or search:
- config_list.append(
- {"type": "nameserver", "address": nameservers, "search": search}
- )
-
- return {"version": 1, "config": config_list}
-
-
-# This will return a dict with some content
-# meta-data, user-data, some config
-def read_vmware_imc(config):
- md = {}
- cfg = {}
- ud = None
- if config.host_name:
- if config.domain_name:
- md["local-hostname"] = config.host_name + "." + config.domain_name
- else:
- md["local-hostname"] = config.host_name
-
- if config.timezone:
- cfg["timezone"] = config.timezone
-
- md["instance-id"] = "iid-vmware-imc"
- return (md, ud, cfg)
# This will return a dict with some content
@@ -745,17 +319,6 @@ def get_properties(contents):
return props
-def search_file(dirpath, filename):
- if not dirpath or not filename:
- return None
-
- for root, _dirs, files in os.walk(dirpath):
- if filename in files:
- return os.path.join(root, filename)
-
- return None
-
-
class XmlError(Exception):
pass
@@ -772,80 +335,6 @@ def get_datasource_list(depends):
return sources.list_from_depends(depends, datasources)
-# To check if marker file exists
-def check_marker_exists(markerid, marker_dir):
- """
- Check the existence of a marker file.
- Presence of marker file determines whether a certain code path is to be
- executed. It is needed for partial guest customization in VMware.
- @param markerid: is an unique string representing a particular product
- marker.
- @param: marker_dir: The directory in which markers exist.
- """
- if not markerid:
- return False
- markerfile = os.path.join(marker_dir, ".markerfile-" + markerid + ".txt")
- if os.path.exists(markerfile):
- return True
- return False
-
-
-# Create a marker file
-def setup_marker_files(markerid, marker_dir):
- """
- Create a new marker file.
- Marker files are unique to a full customization workflow in VMware
- environment.
- @param markerid: is an unique string representing a particular product
- marker.
- @param: marker_dir: The directory in which markers exist.
-
- """
- LOG.debug("Handle marker creation")
- markerfile = os.path.join(marker_dir, ".markerfile-" + markerid + ".txt")
- for fname in os.listdir(marker_dir):
- if fname.startswith(".markerfile"):
- util.del_file(os.path.join(marker_dir, fname))
- open(markerfile, "w").close()
-
-
-def _raise_error_status(prefix, error, event, config_file, conf):
- """
- Raise error and send customization status to the underlying VMware
- Virtualization Platform. Also, cleanup the imc directory.
- """
- LOG.debug("%s: %s", prefix, error)
- set_customization_status(GuestCustStateEnum.GUESTCUST_STATE_RUNNING, event)
- set_gc_status(conf, prefix)
- util.del_dir(os.path.dirname(config_file))
- raise error
-
-
-def load_cloudinit_data(md_path, ud_path):
- """
- Load the cloud-init meta data, user data, cfg and network from the
- given files
-
- @return: 4-tuple of configuration
- metadata, userdata, cfg={}, network
-
- @raises: FileNotFoundError if md_path or ud_path are absent
- """
- LOG.debug("load meta data from: %s: user data from: %s", md_path, ud_path)
- md = {}
- ud = None
- network = None
-
- md = safeload_yaml_or_dict(util.load_file(md_path))
-
- if "network" in md:
- network = md["network"]
-
- if ud_path:
- ud = util.load_file(ud_path).replace("\r", "")
- return md, ud, {}, network
-
-
def safeload_yaml_or_dict(data):
"""
The meta data could be JSON or YAML. Since YAML is a strict superset of
@@ -857,47 +346,4 @@ def safeload_yaml_or_dict(data):
return safeyaml.load(data)
-def collect_imc_file_paths(cust_conf):
- """
- collect all the other imc files.
-
- metadata is preferred to nics.txt configuration data.
-
- If metadata file exists because it is specified in customization
- configuration, then metadata is required and userdata is optional.
-
- @return a 3-tuple containing desired configuration file paths if present
- Expected returns:
- 1. user provided metadata and userdata (md_path, ud_path, None)
- 2. user provided metadata (md_path, None, None)
- 3. user-provided network config (None, None, nics_path)
- 4. No config found (None, None, None)
- """
- md_path = None
- ud_path = None
- nics_path = None
- md_file = cust_conf.meta_data_name
- if md_file:
- md_path = os.path.join(VMWARE_IMC_DIR, md_file)
- if not os.path.exists(md_path):
- raise FileNotFoundError(
- "meta data file is not found: %s" % md_path
- )
-
- ud_file = cust_conf.user_data_name
- if ud_file:
- ud_path = os.path.join(VMWARE_IMC_DIR, ud_file)
- if not os.path.exists(ud_path):
- raise FileNotFoundError(
- "user data file is not found: %s" % ud_path
- )
- else:
- nics_path = os.path.join(VMWARE_IMC_DIR, "nics.txt")
- if not os.path.exists(nics_path):
- LOG.debug("%s does not exist.", nics_path)
- nics_path = None
-
- return md_path, ud_path, nics_path
-
-
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/DataSourceOpenStack.py b/cloudinit/sources/DataSourceOpenStack.py
index 915ed0c0..86ed3dd5 100644
--- a/cloudinit/sources/DataSourceOpenStack.py
+++ b/cloudinit/sources/DataSourceOpenStack.py
@@ -18,7 +18,7 @@ from cloudinit.sources.helpers import openstack
LOG = logging.getLogger(__name__)
# Various defaults/constants...
-DEF_MD_URL = "http://169.254.169.254"
+DEF_MD_URLS = ["http://[fe80::a9fe:a9fe]", "http://169.254.169.254"]
DEFAULT_IID = "iid-dsopenstack"
DEFAULT_METADATA = {
"instance-id": DEFAULT_IID,
@@ -73,8 +73,8 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
mstr = "%s [%s,ver=%s]" % (root, self.dsmode, self.version)
return mstr
- def wait_for_metadata_service(self):
- urls = self.ds_cfg.get("metadata_urls", [DEF_MD_URL])
+ def wait_for_metadata_service(self, max_wait=None, timeout=None):
+ urls = self.ds_cfg.get("metadata_urls", DEF_MD_URLS)
filtered = [x for x in urls if util.is_resolvable_url(x)]
if set(filtered) != set(urls):
LOG.debug(
@@ -85,21 +85,29 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
urls = filtered
else:
LOG.warning("Empty metadata url list! using default list")
- urls = [DEF_MD_URL]
+ urls = DEF_MD_URLS
md_urls = []
url2base = {}
for url in urls:
+ # Wait for a specific openstack metadata url
md_url = url_helper.combine_url(url, "openstack")
md_urls.append(md_url)
url2base[md_url] = url
url_params = self.get_url_params()
+ if max_wait is None:
+ max_wait = url_params.max_wait_seconds
+
+ if timeout is None:
+ timeout = url_params.timeout_seconds
+
start_time = time.time()
avail_url, _response = url_helper.wait_for_url(
urls=md_urls,
- max_wait=url_params.max_wait_seconds,
- timeout=url_params.timeout_seconds,
+ max_wait=max_wait,
+ timeout=timeout,
+ connect_synchronously=False,
)
if avail_url:
LOG.debug("Using metadata source: '%s'", url2base[avail_url])
@@ -150,8 +158,6 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
format is invalid or disabled.
"""
oracle_considered = "Oracle" in self.sys_cfg.get("datasource_list")
- if not detect_openstack(accept_oracle=not oracle_considered):
- return False
if self.perform_dhcp_setup: # Setup networking in init-local stage.
try:
@@ -159,6 +165,15 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
self.fallback_interface,
tmp_dir=self.distro.get_tmp_exec_path(),
):
+ if not self.detect_openstack(
+ accept_oracle=not oracle_considered
+ ):
+ LOG.debug(
+ "OpenStack datasource not running"
+ " on OpenStack (dhcp)"
+ )
+ return False
+
results = util.log_time(
logfunc=LOG.debug,
msg="Crawl of metadata service",
@@ -168,6 +183,13 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
util.logexc(LOG, str(e))
return False
else:
+ if not self.detect_openstack(accept_oracle=not oracle_considered):
+ LOG.debug(
+ "OpenStack datasource not running"
+ " on OpenStack (non-dhcp)"
+ )
+ return False
+
try:
results = self._crawl_metadata()
except sources.InvalidMetaDataException as e:
@@ -246,6 +268,30 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
raise sources.InvalidMetaDataException(msg) from e
return result
+ def detect_openstack(self, accept_oracle=False):
+ """Return True when a potential OpenStack platform is detected."""
+ if not util.is_x86():
+ # Non-Intel cpus don't properly report dmi product names
+ return True
+
+ product_name = dmi.read_dmi_data("system-product-name")
+ if product_name in VALID_DMI_PRODUCT_NAMES:
+ return True
+ elif dmi.read_dmi_data("chassis-asset-tag") in VALID_DMI_ASSET_TAGS:
+ return True
+ elif accept_oracle and oracle._is_platform_viable():
+ return True
+ elif util.get_proc_env(1).get("product_name") == DMI_PRODUCT_NOVA:
+ return True
+ # On bare metal hardware, the product name is not set like
+ # in a virtual OpenStack vm. We check if the system is virtual
+ # and if the openstack specific metadata service has been found.
+ elif not self.distro.is_virtual and self.wait_for_metadata_service(
+ max_wait=15, timeout=5
+ ):
+ return True
+ return False
+
class DataSourceOpenStackLocal(DataSourceOpenStack):
"""Run in init-local using a dhcp discovery prior to metadata crawl.
@@ -266,22 +312,6 @@ def read_metadata_service(base_url, ssl_details=None, timeout=5, retries=5):
return reader.read_v2()
-def detect_openstack(accept_oracle=False):
- """Return True when a potential OpenStack platform is detected."""
- if not util.is_x86():
- return True # Non-Intel cpus don't properly report dmi product names
- product_name = dmi.read_dmi_data("system-product-name")
- if product_name in VALID_DMI_PRODUCT_NAMES:
- return True
- elif dmi.read_dmi_data("chassis-asset-tag") in VALID_DMI_ASSET_TAGS:
- return True
- elif accept_oracle and oracle._is_platform_viable():
- return True
- elif util.get_proc_env(1).get("product_name") == DMI_PRODUCT_NOVA:
- return True
- return False
-
-
# Used to match classes to dependencies
datasources = [
(DataSourceOpenStackLocal, (sources.DEP_FILESYSTEM,)),
diff --git a/cloudinit/sources/DataSourceVMware.py b/cloudinit/sources/DataSourceVMware.py
index 308e02e8..07a80222 100644
--- a/cloudinit/sources/DataSourceVMware.py
+++ b/cloudinit/sources/DataSourceVMware.py
@@ -1,9 +1,10 @@
# Cloud-Init DataSource for VMware
#
-# Copyright (c) 2018-2021 VMware, Inc. All Rights Reserved.
+# Copyright (c) 2018-2022 VMware, Inc. All Rights Reserved.
#
# Authors: Anish Swaminathan <anishs@vmware.com>
# Andrew Kutz <akutz@vmware.com>
+# Pengpeng Sun <pengpengs@vmware.com>
#
# This file is part of cloud-init. See LICENSE file for license information.
@@ -14,6 +15,7 @@ multiple transports types, including:
* EnvVars
* GuestInfo
+ * IMC (Guest Customization)
Netifaces (https://github.com/al45tair/netifaces)
@@ -74,6 +76,7 @@ import netifaces
from cloudinit import dmi
from cloudinit import log as logging
from cloudinit import net, sources, util
+from cloudinit.sources.helpers.vmware.imc import guestcust_util
from cloudinit.subp import ProcessExecutionError, subp, which
PRODUCT_UUID_FILE_PATH = "/sys/class/dmi/id/product_uuid"
@@ -81,8 +84,10 @@ PRODUCT_UUID_FILE_PATH = "/sys/class/dmi/id/product_uuid"
LOG = logging.getLogger(__name__)
NOVAL = "No value found"
+# Data transports names
DATA_ACCESS_METHOD_ENVVAR = "envvar"
DATA_ACCESS_METHOD_GUESTINFO = "guestinfo"
+DATA_ACCESS_METHOD_IMC = "imc"
VMWARE_RPCTOOL = which("vmware-rpctool")
REDACT = "redact"
@@ -116,14 +121,22 @@ class DataSourceVMware(sources.DataSource):
Network Config Version 2 - http://bit.ly/cloudinit-net-conf-v2
For example, CentOS 7's official cloud-init package is version
- 0.7.9 and does not support Network Config Version 2. However,
- this datasource still supports supplying Network Config Version 2
- data as long as the Linux distro's cloud-init package is new
- enough to parse the data.
+ 0.7.9 and does not support Network Config Version 2.
- The metadata key "network.encoding" may be used to indicate the
- format of the metadata key "network". Valid encodings are base64
- and gzip+base64.
+ imc transport:
+ Either Network Config Version 1 or Network Config Version 2 is
+ supported which depends on the customization type.
+ For LinuxPrep customization, Network config Version 1 data is
+ parsed from the customization specification.
+ For CloudinitPrep customization, Network config Version 2 data
+ is parsed from the customization specification.
+
+ envvar and guestinfo tranports:
+ Network Config Version 2 data is supported as long as the Linux
+ distro's cloud-init package is new enough to parse the data.
+ The metadata key "network.encoding" may be used to indicate the
+ format of the metadata key "network". Valid encodings are base64
+ and gzip+base64.
"""
dsname = "VMware"
@@ -131,9 +144,27 @@ class DataSourceVMware(sources.DataSource):
def __init__(self, sys_cfg, distro, paths, ud_proc=None):
sources.DataSource.__init__(self, sys_cfg, distro, paths, ud_proc)
+ self.cfg = {}
self.data_access_method = None
self.vmware_rpctool = VMWARE_RPCTOOL
+ # A list includes all possible data transports, each tuple represents
+ # one data transport type. This datasource will try to get data from
+ # each of transports follows the tuples order in this list.
+ # A tuple has 3 elements which are:
+ # 1. The transport name
+ # 2. The function name to get data for the transport
+ # 3. A boolean tells whether the transport requires VMware platform
+ self.possible_data_access_method_list = [
+ (DATA_ACCESS_METHOD_ENVVAR, self.get_envvar_data_fn, False),
+ (DATA_ACCESS_METHOD_GUESTINFO, self.get_guestinfo_data_fn, True),
+ (DATA_ACCESS_METHOD_IMC, self.get_imc_data_fn, True),
+ ]
+
+ def __str__(self):
+ root = sources.DataSource.__str__(self)
+ return "%s [seed=%s]" % (root, self.data_access_method)
+
def _get_data(self):
"""
_get_data loads the metadata, userdata, and vendordata from one of
@@ -141,6 +172,7 @@ class DataSourceVMware(sources.DataSource):
* envvars
* guestinfo
+ * imc
Please note when updating this function with support for new data
transports, the order should match the order in the dscheck_VMware
@@ -152,35 +184,18 @@ class DataSourceVMware(sources.DataSource):
# access method.
md, ud, vd = None, None, None
- # First check to see if there is data via env vars.
- if os.environ.get(VMX_GUESTINFO, ""):
- md = guestinfo_envvar("metadata")
- ud = guestinfo_envvar("userdata")
- vd = guestinfo_envvar("vendordata")
-
+ # Crawl data from all possible data transports
+ for (
+ data_access_method,
+ get_data_fn,
+ require_vmware_platform,
+ ) in self.possible_data_access_method_list:
+ if require_vmware_platform and not is_vmware_platform():
+ continue
+ (md, ud, vd) = get_data_fn()
if md or ud or vd:
- self.data_access_method = DATA_ACCESS_METHOD_ENVVAR
-
- # At this point, all additional data transports are valid only on
- # a VMware platform.
- if not self.data_access_method:
- system_type = dmi.read_dmi_data("system-product-name")
- if system_type is None:
- LOG.debug("No system-product-name found")
- return False
- if "vmware" not in system_type.lower():
- LOG.debug("Not a VMware platform")
- return False
-
- # If no data was detected, check the guestinfo transport next.
- if not self.data_access_method:
- if self.vmware_rpctool:
- md = guestinfo("metadata", self.vmware_rpctool)
- ud = guestinfo("userdata", self.vmware_rpctool)
- vd = guestinfo("vendordata", self.vmware_rpctool)
-
- if md or ud or vd:
- self.data_access_method = DATA_ACCESS_METHOD_GUESTINFO
+ self.data_access_method = data_access_method
+ break
if not self.data_access_method:
LOG.error("failed to find a valid data access method")
@@ -241,6 +256,8 @@ class DataSourceVMware(sources.DataSource):
get_key_name_fn = get_guestinfo_envvar_key_name
elif self.data_access_method == DATA_ACCESS_METHOD_GUESTINFO:
get_key_name_fn = get_guestinfo_key_name
+ elif self.data_access_method == DATA_ACCESS_METHOD_IMC:
+ get_key_name_fn = get_imc_key_name
else:
return sources.METADATA_UNKNOWN
@@ -249,6 +266,12 @@ class DataSourceVMware(sources.DataSource):
get_key_name_fn("metadata"),
)
+ # The data sources' config_obj is a cloud-config formatted
+ # object that came to it from ways other than cloud-config
+ # because cloud-config content would be handled elsewhere
+ def get_config_obj(self):
+ return self.cfg
+
@property
def network_config(self):
if "network" in self.metadata:
@@ -292,6 +315,98 @@ class DataSourceVMware(sources.DataSource):
if self.data_access_method == DATA_ACCESS_METHOD_GUESTINFO:
guestinfo_redact_keys(keys_to_redact, self.vmware_rpctool)
+ def get_envvar_data_fn(self):
+ """
+ check to see if there is data via env vars
+ """
+ md, ud, vd = None, None, None
+ if os.environ.get(VMX_GUESTINFO, ""):
+ md = guestinfo_envvar("metadata")
+ ud = guestinfo_envvar("userdata")
+ vd = guestinfo_envvar("vendordata")
+
+ return (md, ud, vd)
+
+ def get_guestinfo_data_fn(self):
+ """
+ check to see if there is data via the guestinfo transport
+ """
+ md, ud, vd = None, None, None
+ if self.vmware_rpctool:
+ md = guestinfo("metadata", self.vmware_rpctool)
+ ud = guestinfo("userdata", self.vmware_rpctool)
+ vd = guestinfo("vendordata", self.vmware_rpctool)
+
+ return (md, ud, vd)
+
+ def get_imc_data_fn(self):
+ """
+ check to see if there is data via vmware guest customization
+ """
+ md, ud, vd = None, None, None
+
+ # Check if vmware guest customization is enabled.
+ allow_vmware_cust = guestcust_util.is_vmware_cust_enabled(self.sys_cfg)
+ allow_raw_data_cust = guestcust_util.is_raw_data_cust_enabled(
+ self.ds_cfg
+ )
+ if not allow_vmware_cust and not allow_raw_data_cust:
+ LOG.debug("Customization for VMware platform is disabled")
+ return (md, ud, vd)
+
+ # Check if "VMware Tools" plugin is available.
+ if not guestcust_util.is_cust_plugin_available():
+ return (md, ud, vd)
+
+ # Wait for vmware guest customization configuration file.
+ cust_cfg_file = guestcust_util.get_cust_cfg_file(self.ds_cfg)
+ if cust_cfg_file is None:
+ return (md, ud, vd)
+
+ # Check what type of guest customization is this.
+ cust_cfg_dir = os.path.dirname(cust_cfg_file)
+ cust_cfg = guestcust_util.parse_cust_cfg(cust_cfg_file)
+ (
+ is_vmware_cust_cfg,
+ is_raw_data_cust_cfg,
+ ) = guestcust_util.get_cust_cfg_type(cust_cfg)
+
+ # Get data only if guest customization type and flag matches.
+ if is_vmware_cust_cfg and allow_vmware_cust:
+ LOG.debug("Getting data via VMware customization configuration")
+ (md, ud, vd, self.cfg) = guestcust_util.get_data_from_imc_cust_cfg(
+ self.paths.cloud_dir,
+ self.paths.get_cpath("scripts"),
+ cust_cfg,
+ cust_cfg_dir,
+ self.distro,
+ )
+ elif is_raw_data_cust_cfg and allow_raw_data_cust:
+ LOG.debug(
+ "Getting data via VMware raw cloudinit data "
+ "customization configuration"
+ )
+ (md, ud, vd) = guestcust_util.get_data_from_imc_raw_data_cust_cfg(
+ cust_cfg
+ )
+ else:
+ LOG.debug("No allowed customization configuration data found")
+
+ # Clean customization configuration file and directory
+ util.del_dir(cust_cfg_dir)
+ return (md, ud, vd)
+
+
+def is_vmware_platform():
+ system_type = dmi.read_dmi_data("system-product-name")
+ if system_type is None:
+ LOG.debug("No system-product-name found")
+ return False
+ elif "vmware" not in system_type.lower():
+ LOG.debug("Not a VMware platform")
+ return False
+ return True
+
def decode(key, enc_type, data):
"""
@@ -367,6 +482,10 @@ def handle_returned_guestinfo_val(key, val):
return None
+def get_imc_key_name(key):
+ return "vmware-tools"
+
+
def get_guestinfo_key_name(key):
return "guestinfo." + key
@@ -512,6 +631,9 @@ def load_json_or_yaml(data):
"""
if not data:
return {}
+ # If data is already a dictionary, here will return it directly.
+ if isinstance(data, dict):
+ return data
try:
return util.load_json(data)
except (json.JSONDecodeError, TypeError):
@@ -523,6 +645,8 @@ def process_metadata(data):
process_metadata processes metadata and loads the optional network
configuration.
"""
+ if not data:
+ return {}
network = None
if "network" in data:
network = data["network"]
diff --git a/cloudinit/sources/DataSourceVultr.py b/cloudinit/sources/DataSourceVultr.py
index 93b04829..9d7c84fb 100644
--- a/cloudinit/sources/DataSourceVultr.py
+++ b/cloudinit/sources/DataSourceVultr.py
@@ -48,12 +48,6 @@ class DataSourceVultr(sources.DataSource):
# Fetch metadata
self.metadata = self.get_metadata()
- self.metadata["instance-id"] = self.metadata["instance-v2-id"]
- self.metadata["local-hostname"] = self.metadata["hostname"]
- region = self.metadata["region"]["regioncode"]
- if "countrycode" in self.metadata["region"]:
- region = self.metadata["region"]["countrycode"]
- self.metadata["region"] = region.lower()
self.userdata_raw = self.metadata["user-data"]
# Generate config and process data
@@ -76,10 +70,10 @@ class DataSourceVultr(sources.DataSource):
if "cloud_interfaces" in md:
# In the future we will just drop pre-configured
# network configs into the array. They need names though.
- self.netcfg = vultr.add_interface_names(md["cloud_interfaces"])
+ vultr.add_interface_names(md["cloud_interfaces"])
+ self.netcfg = md["cloud_interfaces"]
else:
self.netcfg = vultr.generate_network_config(md["interfaces"])
-
# Grab vendordata
self.vendordata_raw = md["vendor-data"]
diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py
index 85e094ac..12430401 100644
--- a/cloudinit/sources/__init__.py
+++ b/cloudinit/sources/__init__.py
@@ -435,12 +435,15 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta):
cloud_id = instance_data["v1"].get("cloud_id", "none")
cloud_id_file = os.path.join(self.paths.run_dir, "cloud-id")
util.write_file(f"{cloud_id_file}-{cloud_id}", f"{cloud_id}\n")
+ # cloud-id not found, then no previous cloud-id fle
+ prev_cloud_id_file = None
+ new_cloud_id_file = f"{cloud_id_file}-{cloud_id}"
+ # cloud-id found, then the prev cloud-id file is source of symlink
if os.path.exists(cloud_id_file):
prev_cloud_id_file = os.path.realpath(cloud_id_file)
- else:
- prev_cloud_id_file = cloud_id_file
- util.sym_link(f"{cloud_id_file}-{cloud_id}", cloud_id_file, force=True)
- if prev_cloud_id_file != cloud_id_file:
+
+ util.sym_link(new_cloud_id_file, cloud_id_file, force=True)
+ if prev_cloud_id_file and prev_cloud_id_file != new_cloud_id_file:
util.del_file(prev_cloud_id_file)
write_json(json_sensitive_file, processed_data, mode=0o600)
json_file = self.paths.get_runpath("instance_data")
diff --git a/cloudinit/sources/azure/__init__.py b/cloudinit/sources/azure/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/cloudinit/sources/azure/__init__.py
diff --git a/cloudinit/sources/azure/imds.py b/cloudinit/sources/azure/imds.py
new file mode 100644
index 00000000..54fc9a05
--- /dev/null
+++ b/cloudinit/sources/azure/imds.py
@@ -0,0 +1,156 @@
+# Copyright (C) 2022 Microsoft Corporation.
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import functools
+from typing import Dict
+
+import requests
+
+from cloudinit import log as logging
+from cloudinit import util
+from cloudinit.sources.helpers.azure import report_diagnostic_event
+from cloudinit.url_helper import UrlError, readurl, retry_on_url_exc
+
+LOG = logging.getLogger(__name__)
+
+IMDS_URL = "http://169.254.169.254/metadata"
+
+_readurl_exception_callback = functools.partial(
+ retry_on_url_exc,
+ retry_codes=(
+ 404, # not found (yet)
+ 410, # gone / unavailable (yet)
+ 429, # rate-limited/throttled
+ 500, # server error
+ ),
+ retry_instances=(
+ requests.ConnectionError,
+ requests.Timeout,
+ ),
+)
+
+
+def _fetch_url(
+ url: str, *, log_response: bool = True, retries: int = 10, timeout: int = 2
+) -> bytes:
+ """Fetch URL from IMDS.
+
+ :raises UrlError: on error fetching metadata.
+ """
+
+ try:
+ response = readurl(
+ url,
+ exception_cb=_readurl_exception_callback,
+ headers={"Metadata": "true"},
+ infinite=False,
+ log_req_resp=log_response,
+ retries=retries,
+ timeout=timeout,
+ )
+ except UrlError as error:
+ report_diagnostic_event(
+ "Failed to fetch metadata from IMDS: %s" % error,
+ logger_func=LOG.warning,
+ )
+ raise
+
+ return response.contents
+
+
+def _fetch_metadata(
+ url: str,
+) -> Dict:
+ """Fetch IMDS metadata.
+
+ :raises UrlError: on error fetching metadata.
+ :raises ValueError: on error parsing metadata.
+ """
+ metadata = _fetch_url(url)
+
+ try:
+ return util.load_json(metadata)
+ except ValueError as error:
+ report_diagnostic_event(
+ "Failed to parse metadata from IMDS: %s" % error,
+ logger_func=LOG.warning,
+ )
+ raise
+
+
+def fetch_metadata_with_api_fallback() -> Dict:
+ """Fetch extended metadata, falling back to non-extended as required.
+
+ :raises UrlError: on error fetching metadata.
+ :raises ValueError: on error parsing metadata.
+ """
+ try:
+ url = IMDS_URL + "/instance?api-version=2021-08-01&extended=true"
+ return _fetch_metadata(url)
+ except UrlError as error:
+ if error.code == 400:
+ report_diagnostic_event(
+ "Falling back to IMDS api-version: 2019-06-01",
+ logger_func=LOG.warning,
+ )
+ url = IMDS_URL + "/instance?api-version=2019-06-01"
+ return _fetch_metadata(url)
+ raise
+
+
+def fetch_reprovision_data() -> bytes:
+ """Fetch extended metadata, falling back to non-extended as required.
+
+ :raises UrlError: on error.
+ """
+ url = IMDS_URL + "/reprovisiondata?api-version=2019-06-01"
+
+ logging_threshold = 1
+ poll_counter = 0
+
+ def exception_callback(msg, exception):
+ nonlocal logging_threshold
+ nonlocal poll_counter
+
+ poll_counter += 1
+ if not isinstance(exception, UrlError):
+ report_diagnostic_event(
+ "Polling IMDS failed with unexpected exception: %r"
+ % (exception),
+ logger_func=LOG.warning,
+ )
+ return False
+
+ log = True
+ retry = False
+ if exception.code in (404, 410):
+ retry = True
+ if poll_counter >= logging_threshold:
+ # Exponential back-off on logging.
+ logging_threshold *= 2
+ else:
+ log = False
+
+ if log:
+ report_diagnostic_event(
+ "Polling IMDS failed with exception: %r count: %d"
+ % (exception, poll_counter),
+ logger_func=LOG.info,
+ )
+ return retry
+
+ response = readurl(
+ url,
+ exception_cb=exception_callback,
+ headers={"Metadata": "true"},
+ infinite=True,
+ log_req_resp=False,
+ timeout=2,
+ )
+
+ report_diagnostic_event(
+ f"Polled IMDS {poll_counter+1} time(s)",
+ logger_func=LOG.debug,
+ )
+ return response.contents
diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py
index 56f44339..d4fc04e2 100644
--- a/cloudinit/sources/helpers/azure.py
+++ b/cloudinit/sources/helpers/azure.py
@@ -13,7 +13,7 @@ from contextlib import contextmanager
from datetime import datetime
from errno import ENOENT
from time import sleep, time
-from typing import List, Optional, Union
+from typing import Callable, List, Optional, TypeVar, Union
from xml.etree import ElementTree
from xml.sax.saxutils import escape
@@ -50,8 +50,10 @@ DEFAULT_REPORT_FAILURE_USER_VISIBLE_MESSAGE = (
"for more information on remediation."
)
+T = TypeVar("T")
-def azure_ds_telemetry_reporter(func):
+
+def azure_ds_telemetry_reporter(func: Callable[..., T]) -> Callable[..., T]:
def impl(*args, **kwargs):
with events.ReportEventStack(
name=func.__name__,
@@ -335,7 +337,7 @@ def http_with_retries(
url: str,
*,
headers: dict,
- data: Optional[str] = None,
+ data: Optional[bytes] = None,
retry_sleep: int = 5,
timeout_minutes: int = 20,
) -> url_helper.UrlResponse:
@@ -440,7 +442,7 @@ class AzureEndpointHttpClient:
return http_with_retries(url, headers=headers)
def post(
- self, url, data=None, extra_headers=None
+ self, url, data: Optional[bytes] = None, extra_headers=None
) -> url_helper.UrlResponse:
headers = self.headers
if extra_headers is not None:
@@ -752,7 +754,7 @@ class GoalStateHealthReporter:
status: str,
substatus=None,
description=None,
- ) -> str:
+ ) -> bytes:
health_detail = ""
if substatus is not None:
health_detail = self.HEALTH_DETAIL_SUBSECTION_XML_TEMPLATE.format(
@@ -770,10 +772,10 @@ class GoalStateHealthReporter:
health_detail_subsection=health_detail,
)
- return health_report
+ return health_report.encode("utf-8")
@azure_ds_telemetry_reporter
- def _post_health_report(self, document: str) -> None:
+ def _post_health_report(self, document: bytes) -> None:
push_log_to_kvp()
# Whenever report_diagnostic_event(diagnostic_msg) is invoked in code,
@@ -1051,10 +1053,9 @@ def get_metadata_from_fabric(
@azure_ds_telemetry_reporter
-def report_failure_to_fabric(endpoint: str, description: Optional[str] = None):
+def report_failure_to_fabric(endpoint: str):
shim = WALinuxAgentShim(endpoint=endpoint)
- if not description:
- description = DEFAULT_REPORT_FAILURE_USER_VISIBLE_MESSAGE
+ description = DEFAULT_REPORT_FAILURE_USER_VISIBLE_MESSAGE
try:
shim.register_with_azure_and_report_failure(description=description)
finally:
diff --git a/cloudinit/sources/helpers/vmware/imc/config.py b/cloudinit/sources/helpers/vmware/imc/config.py
index 8b2deb65..df9e5c4b 100644
--- a/cloudinit/sources/helpers/vmware/imc/config.py
+++ b/cloudinit/sources/helpers/vmware/imc/config.py
@@ -29,6 +29,7 @@ class Config:
DEFAULT_RUN_POST_SCRIPT = "MISC|DEFAULT-RUN-POST-CUST-SCRIPT"
CLOUDINIT_META_DATA = "CLOUDINIT|METADATA"
CLOUDINIT_USER_DATA = "CLOUDINIT|USERDATA"
+ CLOUDINIT_INSTANCE_ID = "CLOUDINIT|INSTANCE-ID"
def __init__(self, configFile):
self._configFile = configFile
@@ -142,5 +143,10 @@ class Config:
"""Return the name of cloud-init user data."""
return self._configFile.get(Config.CLOUDINIT_USER_DATA, None)
+ @property
+ def instance_id(self):
+ """Return instance id"""
+ return self._configFile.get(Config.CLOUDINIT_INSTANCE_ID, None)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vmware/imc/guestcust_util.py b/cloudinit/sources/helpers/vmware/imc/guestcust_util.py
index 5b5f02ca..6ffbae40 100644
--- a/cloudinit/sources/helpers/vmware/imc/guestcust_util.py
+++ b/cloudinit/sources/helpers/vmware/imc/guestcust_util.py
@@ -1,7 +1,8 @@
# Copyright (C) 2016 Canonical Ltd.
-# Copyright (C) 2016 VMware Inc.
+# Copyright (C) 2016-2022 VMware Inc.
#
# Author: Sankar Tanguturi <stanguturi@vmware.com>
+# Pengpeng Sun <pegnpengs@vmware.com>
#
# This file is part of cloud-init. See LICENSE file for license information.
@@ -10,13 +11,20 @@ import os
import re
import time
-from cloudinit import subp
-from cloudinit.sources.helpers.vmware.imc.guestcust_event import (
- GuestCustEventEnum,
-)
-from cloudinit.sources.helpers.vmware.imc.guestcust_state import (
- GuestCustStateEnum,
+from cloudinit import subp, util
+
+from .config import Config
+from .config_custom_script import (
+ CustomScriptNotFound,
+ PostCustomScript,
+ PreCustomScript,
)
+from .config_file import ConfigFile
+from .config_nic import NicConfigurator
+from .config_passwd import PasswordConfigurator
+from .guestcust_error import GuestCustErrorEnum
+from .guestcust_event import GuestCustEventEnum
+from .guestcust_state import GuestCustStateEnum
logger = logging.getLogger(__name__)
@@ -24,6 +32,11 @@ logger = logging.getLogger(__name__)
CLOUDINIT_LOG_FILE = "/var/log/cloud-init.log"
QUERY_NICS_SUPPORTED = "queryNicsSupported"
NICS_STATUS_CONNECTED = "connected"
+# Path to the VMware IMC directory
+IMC_DIR_PATH = "/var/run/vmware-imc"
+# Customization script configuration in tools conf
+IMC_TOOLS_CONF_GROUPNAME = "deployPkg"
+IMC_TOOLS_CONF_ENABLE_CUST_SCRIPTS = "enable-custom-scripts"
# This will send a RPC command to the underlying
@@ -183,4 +196,447 @@ def set_gc_status(config, gcMsg):
return None
+def get_imc_dir_path():
+ return IMC_DIR_PATH
+
+
+def get_data_from_imc_cust_cfg(
+ cloud_dir,
+ scripts_cpath,
+ cust_cfg,
+ cust_cfg_dir,
+ distro,
+):
+ md, ud, vd, cfg = {}, None, None, {}
+ set_gc_status(cust_cfg, "Started")
+ (md, cfg) = get_non_network_data_from_vmware_cust_cfg(cust_cfg)
+ is_special_customization = check_markers(cloud_dir, cust_cfg)
+ if is_special_customization:
+ if not do_special_customization(
+ scripts_cpath, cust_cfg, cust_cfg_dir, distro
+ ):
+ return (None, None, None, None)
+ if not recheck_markers(cloud_dir, cust_cfg):
+ return (None, None, None, None)
+ try:
+ logger.debug("Preparing the Network configuration")
+ md["network"] = get_network_data_from_vmware_cust_cfg(
+ cust_cfg, True, True, distro.osfamily
+ )
+ except Exception as e:
+ set_cust_error_status(
+ "Error preparing Network Configuration",
+ str(e),
+ GuestCustEventEnum.GUESTCUST_EVENT_NETWORK_SETUP_FAILED,
+ cust_cfg,
+ )
+ return (None, None, None, None)
+ connect_nics(cust_cfg_dir)
+ set_customization_status(
+ GuestCustStateEnum.GUESTCUST_STATE_DONE,
+ GuestCustErrorEnum.GUESTCUST_ERROR_SUCCESS,
+ )
+ set_gc_status(cust_cfg, "Successful")
+ return (md, ud, vd, cfg)
+
+
+def get_data_from_imc_raw_data_cust_cfg(cust_cfg):
+ set_gc_status(cust_cfg, "Started")
+ md, ud, vd = None, None, None
+ md_file = cust_cfg.meta_data_name
+ if md_file:
+ md_path = os.path.join(get_imc_dir_path(), md_file)
+ if not os.path.exists(md_path):
+ set_cust_error_status(
+ "Error locating the cloud-init meta data file",
+ "Meta data file is not found: %s" % md_path,
+ GuestCustEventEnum.GUESTCUST_EVENT_CUSTOMIZE_FAILED,
+ cust_cfg,
+ )
+ return (None, None, None)
+ try:
+ md = util.load_file(md_path)
+ except Exception as e:
+ set_cust_error_status(
+ "Error loading cloud-init meta data file",
+ str(e),
+ GuestCustEventEnum.GUESTCUST_EVENT_CUSTOMIZE_FAILED,
+ cust_cfg,
+ )
+ return (None, None, None)
+
+ ud_file = cust_cfg.user_data_name
+ if ud_file:
+ ud_path = os.path.join(get_imc_dir_path(), ud_file)
+ if not os.path.exists(ud_path):
+ set_cust_error_status(
+ "Error locating the cloud-init userdata file",
+ "Userdata file is not found: %s" % ud_path,
+ GuestCustEventEnum.GUESTCUST_EVENT_CUSTOMIZE_FAILED,
+ cust_cfg,
+ )
+ return (None, None, None)
+ try:
+ ud = util.load_file(ud_path).replace("\r", "")
+ except Exception as e:
+ set_cust_error_status(
+ "Error loading cloud-init userdata file",
+ str(e),
+ GuestCustEventEnum.GUESTCUST_EVENT_CUSTOMIZE_FAILED,
+ cust_cfg,
+ )
+ return (None, None, None)
+
+ set_customization_status(
+ GuestCustStateEnum.GUESTCUST_STATE_DONE,
+ GuestCustErrorEnum.GUESTCUST_ERROR_SUCCESS,
+ )
+ set_gc_status(cust_cfg, "Successful")
+ return (md, ud, vd)
+
+
+def get_non_network_data_from_vmware_cust_cfg(cust_cfg):
+ md, cfg = {}, {}
+ if cust_cfg.host_name:
+ if cust_cfg.domain_name:
+ md["local-hostname"] = (
+ cust_cfg.host_name + "." + cust_cfg.domain_name
+ )
+ else:
+ md["local-hostname"] = cust_cfg.host_name
+ if cust_cfg.timezone:
+ cfg["timezone"] = cust_cfg.timezone
+ if cust_cfg.instance_id:
+ md["instance-id"] = cust_cfg.instance_id
+ return (md, cfg)
+
+
+def get_network_data_from_vmware_cust_cfg(
+ cust_cfg, use_system_devices=True, configure=False, osfamily=None
+):
+ nicConfigurator = NicConfigurator(cust_cfg.nics, use_system_devices)
+ nics_cfg_list = nicConfigurator.generate(configure, osfamily)
+
+ return get_v1_network_config(
+ nics_cfg_list, cust_cfg.name_servers, cust_cfg.dns_suffixes
+ )
+
+
+def get_v1_network_config(nics_cfg_list=None, nameservers=None, search=None):
+ config_list = nics_cfg_list
+
+ if nameservers or search:
+ config_list.append(
+ {"type": "nameserver", "address": nameservers, "search": search}
+ )
+
+ return {"version": 1, "config": config_list}
+
+
+def connect_nics(cust_cfg_dir):
+ nics_file = os.path.join(cust_cfg_dir, "nics.txt")
+ if os.path.exists(nics_file):
+ logger.debug("%s file found, to connect nics", nics_file)
+ enable_nics(get_nics_to_enable(nics_file))
+
+
+def is_vmware_cust_enabled(sys_cfg):
+ return not util.get_cfg_option_bool(
+ sys_cfg, "disable_vmware_customization", True
+ )
+
+
+def is_raw_data_cust_enabled(ds_cfg):
+ return util.get_cfg_option_bool(ds_cfg, "allow_raw_data", True)
+
+
+def get_cust_cfg_file(ds_cfg):
+ # When the VM is powered on, the "VMware Tools" daemon
+ # copies the customization specification file to
+ # /var/run/vmware-imc directory. cloud-init code needs
+ # to search for the file in that directory which indicates
+ # that required metadata and userdata files are now
+ # present.
+ max_wait = get_max_wait_from_cfg(ds_cfg)
+ cust_cfg_file_path = util.log_time(
+ logfunc=logger.debug,
+ msg="Waiting for VMware customization configuration file",
+ func=wait_for_cust_cfg_file,
+ args=("cust.cfg", max_wait),
+ )
+ if cust_cfg_file_path:
+ logger.debug(
+ "Found VMware customization configuration file at %s",
+ cust_cfg_file_path,
+ )
+ return cust_cfg_file_path
+ else:
+ logger.debug("No VMware customization configuration file found")
+ return None
+
+
+def wait_for_cust_cfg_file(
+ filename, maxwait=180, naplen=5, dirpath="/var/run/vmware-imc"
+):
+ waited = 0
+ if maxwait <= naplen:
+ naplen = 1
+
+ while waited < maxwait:
+ fileFullPath = os.path.join(dirpath, filename)
+ if os.path.isfile(fileFullPath):
+ return fileFullPath
+ logger.debug("Waiting for VMware customization configuration file")
+ time.sleep(naplen)
+ waited += naplen
+ return None
+
+
+def get_max_wait_from_cfg(ds_cfg):
+ default_max_wait = 15
+ max_wait_cfg_option = "vmware_cust_file_max_wait"
+ max_wait = default_max_wait
+ if not ds_cfg:
+ return max_wait
+ try:
+ max_wait = int(ds_cfg.get(max_wait_cfg_option, default_max_wait))
+ except ValueError:
+ logger.warning(
+ "Failed to get '%s', using %s",
+ max_wait_cfg_option,
+ default_max_wait,
+ )
+ if max_wait < 0:
+ logger.warning(
+ "Invalid value '%s' for '%s', using '%s' instead",
+ max_wait,
+ max_wait_cfg_option,
+ default_max_wait,
+ )
+ max_wait = default_max_wait
+ return max_wait
+
+
+def check_markers(cloud_dir, cust_cfg):
+ product_marker = cust_cfg.marker_id
+ has_marker_file = check_marker_exists(
+ product_marker, os.path.join(cloud_dir, "data")
+ )
+ return product_marker and not has_marker_file
+
+
+def check_marker_exists(markerid, marker_dir):
+ """
+ Check the existence of a marker file.
+ Presence of marker file determines whether a certain code path is to be
+ executed. It is needed for partial guest customization in VMware.
+ @param markerid: is an unique string representing a particular product
+ marker.
+ @param: marker_dir: The directory in which markers exist.
+ """
+ if not markerid:
+ return False
+ markerfile = os.path.join(marker_dir, ".markerfile-" + markerid + ".txt")
+ if os.path.exists(markerfile):
+ return True
+ return False
+
+
+def recheck_markers(cloud_dir, cust_cfg):
+ product_marker = cust_cfg.marker_id
+ if product_marker:
+ if not create_marker_file(cloud_dir, cust_cfg):
+ return False
+ return True
+
+
+def create_marker_file(cloud_dir, cust_cfg):
+ try:
+ setup_marker_files(cust_cfg.marker_id, os.path.join(cloud_dir, "data"))
+ except Exception as e:
+ set_cust_error_status(
+ "Error creating marker files",
+ str(e),
+ GuestCustEventEnum.GUESTCUST_EVENT_CUSTOMIZE_FAILED,
+ cust_cfg,
+ )
+ return False
+ return True
+
+
+def setup_marker_files(marker_id, marker_dir):
+ """
+ Create a new marker file.
+ Marker files are unique to a full customization workflow in VMware
+ environment.
+ @param marker_id: is an unique string representing a particular product
+ marker.
+ @param: marker_dir: The directory in which markers exist.
+ """
+ logger.debug("Handle marker creation")
+ marker_file = os.path.join(marker_dir, ".markerfile-" + marker_id + ".txt")
+ for fname in os.listdir(marker_dir):
+ if fname.startswith(".markerfile"):
+ util.del_file(os.path.join(marker_dir, fname))
+ open(marker_file, "w").close()
+
+
+def do_special_customization(scripts_cpath, cust_cfg, cust_cfg_dir, distro):
+ is_pre_custom_successful = False
+ is_password_custom_successful = False
+ is_post_custom_successful = False
+ is_custom_script_enabled = False
+ custom_script = cust_cfg.custom_script_name
+ if custom_script:
+ is_custom_script_enabled = check_custom_script_enablement(cust_cfg)
+ if is_custom_script_enabled:
+ is_pre_custom_successful = do_pre_custom_script(
+ cust_cfg, custom_script, cust_cfg_dir
+ )
+ is_password_custom_successful = do_password_customization(cust_cfg, distro)
+ if custom_script and is_custom_script_enabled:
+ ccScriptsDir = os.path.join(scripts_cpath, "per-instance")
+ is_post_custom_successful = do_post_custom_script(
+ cust_cfg, custom_script, cust_cfg_dir, ccScriptsDir
+ )
+ if custom_script:
+ return (
+ is_pre_custom_successful
+ and is_password_custom_successful
+ and is_post_custom_successful
+ )
+ return is_password_custom_successful
+
+
+def do_pre_custom_script(cust_cfg, custom_script, cust_cfg_dir):
+ try:
+ precust = PreCustomScript(custom_script, cust_cfg_dir)
+ precust.execute()
+ except CustomScriptNotFound as e:
+ set_cust_error_status(
+ "Error executing pre-customization script",
+ str(e),
+ GuestCustEventEnum.GUESTCUST_EVENT_CUSTOMIZE_FAILED,
+ cust_cfg,
+ )
+ return False
+ return True
+
+
+def do_post_custom_script(cust_cfg, custom_script, cust_cfg_dir, ccScriptsDir):
+ try:
+ postcust = PostCustomScript(custom_script, cust_cfg_dir, ccScriptsDir)
+ postcust.execute()
+ except CustomScriptNotFound as e:
+ set_cust_error_status(
+ "Error executing post-customization script",
+ str(e),
+ GuestCustEventEnum.GUESTCUST_EVENT_CUSTOMIZE_FAILED,
+ cust_cfg,
+ )
+ return False
+ return True
+
+
+def check_custom_script_enablement(cust_cfg):
+ is_custom_script_enabled = False
+ default_value = "false"
+ if cust_cfg.default_run_post_script:
+ logger.debug(
+ "Set default value to true due to customization configuration."
+ )
+ default_value = "true"
+ custom_script_enablement = get_tools_config(
+ IMC_TOOLS_CONF_GROUPNAME,
+ IMC_TOOLS_CONF_ENABLE_CUST_SCRIPTS,
+ default_value,
+ )
+ if custom_script_enablement.lower() != "true":
+ set_cust_error_status(
+ "Custom script is disabled by VM Administrator",
+ "Error checking custom script enablement",
+ GuestCustErrorEnum.GUESTCUST_ERROR_SCRIPT_DISABLED,
+ cust_cfg,
+ )
+ else:
+ is_custom_script_enabled = True
+ return is_custom_script_enabled
+
+
+def do_password_customization(cust_cfg, distro):
+ logger.debug("Applying password customization")
+ pwdConfigurator = PasswordConfigurator()
+ admin_pwd = cust_cfg.admin_password
+ try:
+ reset_pwd = cust_cfg.reset_password
+ if admin_pwd or reset_pwd:
+ pwdConfigurator.configure(admin_pwd, reset_pwd, distro)
+ else:
+ logger.debug("Changing password is not needed")
+ except Exception as e:
+ set_cust_error_status(
+ "Error applying password configuration",
+ str(e),
+ GuestCustEventEnum.GUESTCUST_EVENT_CUSTOMIZE_FAILED,
+ cust_cfg,
+ )
+ return False
+ return True
+
+
+def parse_cust_cfg(cfg_file):
+ return Config(ConfigFile(cfg_file))
+
+
+def get_cust_cfg_type(cust_cfg):
+ is_vmware_cust_cfg, is_raw_data_cust_cfg = False, False
+ if cust_cfg.meta_data_name:
+ is_raw_data_cust_cfg = True
+ logger.debug("raw cloudinit data cust cfg found")
+ else:
+ is_vmware_cust_cfg = True
+ logger.debug("vmware cust cfg found")
+ return (is_vmware_cust_cfg, is_raw_data_cust_cfg)
+
+
+def is_cust_plugin_available():
+ search_paths = (
+ "/usr/lib/vmware-tools",
+ "/usr/lib64/vmware-tools",
+ "/usr/lib/open-vm-tools",
+ "/usr/lib64/open-vm-tools",
+ "/usr/lib/x86_64-linux-gnu/open-vm-tools",
+ "/usr/lib/aarch64-linux-gnu/open-vm-tools",
+ )
+ cust_plugin = "libdeployPkgPlugin.so"
+ for path in search_paths:
+ cust_plugin_path = search_file(path, cust_plugin)
+ if cust_plugin_path:
+ logger.debug(
+ "Found the customization plugin at %s", cust_plugin_path
+ )
+ return True
+ return False
+
+
+def search_file(dirpath, filename):
+ if not dirpath or not filename:
+ return None
+
+ for root, _dirs, files in os.walk(dirpath):
+ if filename in files:
+ return os.path.join(root, filename)
+
+ return None
+
+
+def set_cust_error_status(prefix, error, event, cust_cfg):
+ """
+ Set customization status to the underlying VMware Virtualization Platform
+ """
+ util.logexc(logger, "%s: %s", prefix, error)
+ set_customization_status(GuestCustStateEnum.GUESTCUST_STATE_RUNNING, event)
+ set_gc_status(cust_cfg, prefix)
+
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vultr.py b/cloudinit/sources/helpers/vultr.py
index de2d0eb0..836108d4 100644
--- a/cloudinit/sources/helpers/vultr.py
+++ b/cloudinit/sources/helpers/vultr.py
@@ -37,7 +37,9 @@ def get_metadata(url, timeout, retries, sec_between, agent, tmp_dir=None):
# Fetch the metadata
v1 = read_metadata(url, timeout, retries, sec_between, agent)
- return json.loads(v1)
+ metadata = json.loads(v1)
+ refactor_metadata(metadata)
+ return metadata
except (
NoDHCPLeaseError,
subp.ProcessExecutionError,
@@ -49,6 +51,16 @@ def get_metadata(url, timeout, retries, sec_between, agent, tmp_dir=None):
raise exception
+# Refactor metadata into acceptable format
+def refactor_metadata(metadata):
+ metadata["instance-id"] = metadata["instance-v2-id"]
+ metadata["local-hostname"] = metadata["hostname"]
+ region = metadata["region"]["regioncode"]
+ if "countrycode" in metadata["region"]:
+ region = metadata["region"]["countrycode"]
+ metadata["region"] = region.lower()
+
+
# Get interface list, sort, and clean
def get_interface_list():
ifaces = []
@@ -264,17 +276,17 @@ def generate_interface_additional_addresses(interface, netcfg):
# Make required adjustments to the network configs provided
-def add_interface_names(interfaces):
- for interface in interfaces:
- interface_name = get_interface_name(interface["mac"])
+def add_interface_names(netcfg):
+ for interface in netcfg["config"]:
+ if interface["type"] != "physical":
+ continue
+ interface_name = get_interface_name(interface["mac_address"])
if not interface_name:
raise RuntimeError(
"Interface: %s could not be found on the system"
- % interface["mac"]
+ % interface["mac_address"]
)
interface["name"] = interface_name
- return interfaces
-
# vi: ts=4 expandtab
diff --git a/cloudinit/ssh_util.py b/cloudinit/ssh_util.py
index a1a6964f..eb5c9f64 100644
--- a/cloudinit/ssh_util.py
+++ b/cloudinit/ssh_util.py
@@ -8,6 +8,7 @@
import os
import pwd
+from typing import List, Sequence, Tuple
from cloudinit import log as logging
from cloudinit import util
@@ -499,18 +500,18 @@ class SshdConfigLine:
return v
-def parse_ssh_config(fname):
+def parse_ssh_config(fname) -> List[SshdConfigLine]:
if not os.path.isfile(fname):
return []
return parse_ssh_config_lines(util.load_file(fname).splitlines())
-def parse_ssh_config_lines(lines):
+def parse_ssh_config_lines(lines) -> List[SshdConfigLine]:
# See: man sshd_config
# The file contains keyword-argument pairs, one per line.
# Lines starting with '#' and empty lines are interpreted as comments.
# Note: key-words are case-insensitive and arguments are case-sensitive
- ret = []
+ ret: List[SshdConfigLine] = []
for line in lines:
line = line.strip()
if not line or line.startswith("#"):
@@ -554,11 +555,7 @@ def _includes_dconf(fname: str) -> bool:
return False
-def update_ssh_config(updates, fname=DEF_SSHD_CFG):
- """Read fname, and update if changes are necessary.
-
- @param updates: dictionary of desired values {Option: value}
- @return: boolean indicating if an update was done."""
+def _ensure_cloud_init_ssh_config_file(fname):
if _includes_dconf(fname):
if not os.path.isdir(f"{fname}.d"):
util.ensure_dir(f"{fname}.d", mode=0o755)
@@ -566,6 +563,15 @@ def update_ssh_config(updates, fname=DEF_SSHD_CFG):
if not os.path.isfile(fname):
# Ensure root read-only:
util.ensure_file(fname, 0o600)
+ return fname
+
+
+def update_ssh_config(updates, fname=DEF_SSHD_CFG):
+ """Read fname, and update if changes are necessary.
+
+ @param updates: dictionary of desired values {Option: value}
+ @return: boolean indicating if an update was done."""
+ fname = _ensure_cloud_init_ssh_config_file(fname)
lines = parse_ssh_config(fname)
changed = update_ssh_config_lines(lines=lines, updates=updates)
if changed:
@@ -623,4 +629,17 @@ def update_ssh_config_lines(lines, updates):
return changed
+def append_ssh_config(lines: Sequence[Tuple[str, str]], fname=DEF_SSHD_CFG):
+ if not lines:
+ return
+ fname = _ensure_cloud_init_ssh_config_file(fname)
+ content = (f"{k} {v}" for k, v in lines)
+ util.write_file(
+ fname,
+ "\n".join(content) + "\n",
+ omode="ab",
+ preserve_mode=True,
+ )
+
+
# vi: ts=4 expandtab
diff --git a/cloudinit/stages.py b/cloudinit/stages.py
index 635a31e8..9494a0bf 100644
--- a/cloudinit/stages.py
+++ b/cloudinit/stages.py
@@ -43,7 +43,7 @@ def update_event_enabled(
datasource: sources.DataSource,
cfg: dict,
event_source_type: EventType,
- scope: EventScope = None,
+ scope: Optional[EventScope] = None,
) -> bool:
"""Determine if a particular EventType is enabled.
@@ -801,11 +801,10 @@ class Init:
# Run the handlers
self._do_handlers(user_data_msg, c_handlers_list, frequency)
- def _remove_top_level_network_key(self, cfg):
- """If network-config contains top level 'network' key, then remove it.
-
- Some providers of network configuration skip the top-level network
- key, so ensure both methods works.
+ def _get_network_key_contents(self, cfg) -> dict:
+ """
+ Network configuration can be passed as a dict under a "network" key, or
+ optionally at the top level. In both cases, return the config.
"""
if cfg and "network" in cfg:
return cfg["network"]
@@ -848,14 +847,14 @@ class Init:
cfg_source,
)
continue
- ncfg = self._remove_top_level_network_key(
- available_cfgs[cfg_source]
- )
+ ncfg = self._get_network_key_contents(available_cfgs[cfg_source])
if net.is_disabled_cfg(ncfg):
LOG.debug("network config disabled by %s", cfg_source)
return (None, cfg_source)
if ncfg:
return (ncfg, cfg_source)
+ if not self.cfg.get("network", True):
+ LOG.warning("Empty network config found")
return (
self.distro.generate_fallback_config(),
NetworkConfigSource.FALLBACK,
diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py
index 3d4e4639..d6d0afa6 100644
--- a/cloudinit/url_helper.py
+++ b/cloudinit/url_helper.py
@@ -375,7 +375,7 @@ def _run_func_with_delay(
addr: str,
timeout: int,
event: threading.Event,
- delay: float = None,
+ delay: Optional[float] = None,
) -> Any:
"""Execute func with optional delay"""
if delay:
@@ -476,11 +476,11 @@ def wait_for_url(
max_wait=None,
timeout=None,
status_cb: Callable = LOG.debug, # some sources use different log levels
- headers_cb: Callable = None,
+ headers_cb: Optional[Callable] = None,
headers_redact=None,
sleep_time: int = 1,
- exception_cb: Callable = None,
- sleep_time_cb: Callable[[Any, int], int] = None,
+ exception_cb: Optional[Callable] = None,
+ sleep_time_cb: Optional[Callable[[Any, int], int]] = None,
request_method: str = "",
connect_synchronously: bool = True,
async_delay: float = 0.150,
diff --git a/cloudinit/user_data.py b/cloudinit/user_data.py
index 51e4cd63..3336b23d 100644
--- a/cloudinit/user_data.py
+++ b/cloudinit/user_data.py
@@ -166,7 +166,6 @@ class UserDataProcessor:
# TODO(harlowja): Should this be happening, shouldn't
# the part header be modified and not the base?
_replace_header(base_msg, CONTENT_TYPE, ctype)
-
self._attach_part(append_msg, part)
def _attach_launch_index(self, msg):
diff --git a/cloudinit/util.py b/cloudinit/util.py
index 078ce1c2..8ba3e2b6 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -35,10 +35,11 @@ from base64 import b64decode, b64encode
from collections import deque, namedtuple
from errno import EACCES, ENOENT
from functools import lru_cache, total_ordering
+from pathlib import Path
from typing import Callable, Deque, Dict, List, TypeVar
from urllib import parse
-from cloudinit import importer
+from cloudinit import features, importer
from cloudinit import log as logging
from cloudinit import (
mergers,
@@ -639,11 +640,13 @@ def _get_variant(info):
"mariner",
"miraclelinux",
"openeuler",
+ "opencloudos",
"openmandriva",
"photon",
"rhel",
"rocky",
"suse",
+ "tencentos",
"virtuozzo",
):
variant = linux_dist
@@ -653,10 +656,12 @@ def _get_variant(info):
variant = "rhel"
elif linux_dist in (
"opensuse",
- "opensuse-tumbleweed",
"opensuse-leap",
- "sles",
+ "opensuse-microos",
+ "opensuse-tumbleweed",
"sle_hpc",
+ "sle-micro",
+ "sles",
):
variant = "suse"
else:
@@ -976,14 +981,17 @@ def load_yaml(blob, default=None, allowed=(dict,)):
def read_seeded(base="", ext="", timeout=5, retries=10, file_retries=0):
if base.find("%s") >= 0:
- ud_url = base % ("user-data" + ext)
- vd_url = base % ("vendor-data" + ext)
- md_url = base % ("meta-data" + ext)
+ ud_url = base.replace("%s", "user-data" + ext)
+ vd_url = base.replace("%s", "vendor-data" + ext)
+ md_url = base.replace("%s", "meta-data" + ext)
else:
+ if features.NOCLOUD_SEED_URL_APPEND_FORWARD_SLASH:
+ if base[-1] != "/" and parse.urlparse(base).query == "":
+ # Append fwd slash when no query string and no %s
+ base += "/"
ud_url = "%s%s%s" % (base, "user-data", ext)
vd_url = "%s%s%s" % (base, "vendor-data", ext)
md_url = "%s%s%s" % (base, "meta-data", ext)
-
md_resp = url_helper.read_file_or_url(
md_url, timeout=timeout, retries=retries
)
@@ -1784,12 +1792,41 @@ def json_dumps(data):
)
-def ensure_dir(path, mode=None):
+def get_non_exist_parent_dir(path):
+ """Get the last directory in a path that does not exist.
+
+ Example: when path=/usr/a/b and /usr/a does not exis but /usr does,
+ return /usr/a
+ """
+ p_path = os.path.dirname(path)
+ # Check if parent directory of path is root
+ if p_path == os.path.dirname(p_path):
+ return path
+ else:
+ if os.path.isdir(p_path):
+ return path
+ else:
+ return get_non_exist_parent_dir(p_path)
+
+
+def ensure_dir(path, mode=None, user=None, group=None):
if not os.path.isdir(path):
+ # Get non existed parent dir first before they are created.
+ non_existed_parent_dir = get_non_exist_parent_dir(path)
# Make the dir and adjust the mode
with SeLinuxGuard(os.path.dirname(path), recursive=True):
os.makedirs(path)
chmod(path, mode)
+ # Change the ownership
+ if user or group:
+ chownbyname(non_existed_parent_dir, user, group)
+ # if path=/usr/a/b/c and non_existed_parent_dir=/usr,
+ # then sub_relative_dir=PosixPath('a/b/c')
+ sub_relative_dir = Path(path.split(non_existed_parent_dir)[1][1:])
+ sub_path = Path(non_existed_parent_dir)
+ for part in sub_relative_dir.parts:
+ sub_path = sub_path.joinpath(part)
+ chownbyname(sub_path, user, group)
else:
# Just adjust the mode
chmod(path, mode)
@@ -2126,6 +2163,8 @@ def write_file(
preserve_mode=False,
*,
ensure_dir_exists=True,
+ user=None,
+ group=None,
):
"""
Writes a file with the given content and sets the file mode as specified.
@@ -2140,6 +2179,8 @@ def write_file(
@param ensure_dir_exists: If True (the default), ensure that the directory
containing `filename` exists before writing to
the file.
+ @param user: The user to set on the file.
+ @param group: The group to set on the file.
"""
if preserve_mode:
@@ -2149,7 +2190,7 @@ def write_file(
pass
if ensure_dir_exists:
- ensure_dir(os.path.dirname(filename))
+ ensure_dir(os.path.dirname(filename), user=user, group=group)
if "b" in omode.lower():
content = encode_text(content)
write_type = "bytes"
@@ -2761,11 +2802,25 @@ def read_meminfo(meminfo="/proc/meminfo", raw=False):
def human2bytes(size):
"""Convert human string or integer to size in bytes
+
+ In the original implementation, SI prefixes parse to IEC values
+ (1KB=1024B). Later, support for parsing IEC prefixes was added,
+ also parsing to IEC values (1KiB=1024B). To maintain backwards
+ compatibility for the long-used implementation, no fix is provided for SI
+ prefixes (to make 1KB=1000B may now violate user expectations).
+
+ Future prospective callers of this function should consider implementing a
+ new function with more standard expectations (1KB=1000B and 1KiB=1024B)
+
+ Examples:
10M => 10485760
- .5G => 536870912
+ 10MB => 10485760
+ 10MiB => 10485760
"""
size_in = size
- if size.endswith("B"):
+ if size.endswith("iB"):
+ size = size[:-2]
+ elif size.endswith("B"):
size = size[:-1]
mpliers = {"B": 1, "K": 2**10, "M": 2**20, "G": 2**30, "T": 2**40}
diff --git a/cloudinit/version.py b/cloudinit/version.py
index b9b42af3..fd83ebf6 100644
--- a/cloudinit/version.py
+++ b/cloudinit/version.py
@@ -4,7 +4,7 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-__VERSION__ = "22.4"
+__VERSION__ = "23.1"
_PACKAGED_VERSION = "@@PACKAGED_VERSION@@"
FEATURES = [
diff --git a/config/cloud.cfg.tmpl b/config/cloud.cfg.tmpl
index 0f234a7d..7238c102 100644
--- a/config/cloud.cfg.tmpl
+++ b/config/cloud.cfg.tmpl
@@ -3,7 +3,8 @@
# The top level settings are used as module
# and base configuration.
{% set is_bsd = variant in ["dragonfly", "freebsd", "netbsd", "openbsd"] %}
-{% set is_rhel = variant in ["rhel", "centos"] %}
+{% set is_rhel = variant in ["almalinux", "centos", "cloudlinux", "eurolinux",
+ "miraclelinux", "rhel", "rocky", "virtuozzo" ] %}
{% if is_bsd %}
syslog_fix_perms: root:wheel
{% elif variant in ["suse"] %}
@@ -34,8 +35,7 @@ disable_root: false
disable_root: true
{% endif %}
-{% if variant in ["almalinux", "alpine", "amazon", "cloudlinux", "eurolinux",
- "fedora", "miraclelinux", "openEuler", "openmandriva", "rocky", "virtuozzo"] or is_rhel %}
+{% if variant in ["alpine", "amazon", "fedora", "openEuler", "OpenCloudOS", "openmandriva", "photon", "TencentOS"] or is_rhel %}
{% if is_rhel %}
mount_default_fields: [~, ~, 'auto', 'defaults,nofail,x-systemd.requires=cloud-init.service,_netdev', '0', '2']
{% else %}
@@ -44,10 +44,12 @@ mount_default_fields: [~, ~, 'auto', 'defaults,nofail', '0', '2']
{% if variant == "amazon" %}
resize_rootfs: noblock
{% endif %}
+{% if variant not in ["photon"] %}
resize_rootfs_tmp: /dev
ssh_pwauth: false
-
{% endif %}
+{% endif %}
+
# This will cause the set+update hostname module to not operate (if true)
preserve_hostname: false
@@ -197,9 +199,9 @@ cloud_final_modules:
# (not accessible to handlers/transforms)
system_info:
# This will affect which distro class gets used
-{% if variant in ["almalinux", "alpine", "amazon", "arch", "cloudlinux", "debian",
- "eurolinux", "fedora", "freebsd", "gentoo", "netbsd", "mariner", "miraclelinux", "openbsd", "openEuler",
- "openmandriva", "photon", "rocky", "suse", "ubuntu", "virtuozzo"] or is_rhel %}
+{% if variant in ["alpine", "amazon", "arch", "debian", "fedora", "freebsd",
+ "gentoo", "netbsd", "mariner", "openbsd", "openEuler", "OpenCloudOS",
+ "openmandriva", "photon", "suse", "TencentOS", "ubuntu"] or is_rhel %}
distro: {{ variant }}
{% elif variant in ["dragonfly"] %}
distro: dragonflybsd
@@ -252,15 +254,15 @@ system_info:
primary: http://ports.ubuntu.com/ubuntu-ports
security: http://ports.ubuntu.com/ubuntu-ports
ssh_svcname: ssh
-{% elif variant in ["almalinux", "alpine", "amazon", "arch", "cloudlinux", "eurolinux",
- "fedora", "gentoo", "miraclelinux", "openEuler", "openmandriva", "rocky", "suse", "virtuozzo"] or is_rhel %}
+{% elif variant in ["alpine", "amazon", "arch", "fedora",
+ "gentoo", "openEuler", "OpenCloudOS", "openmandriva", "suse", "TencentOS"] or is_rhel %}
# Default user name + that default users groups (if added/used)
default_user:
{% if variant == "amazon" %}
name: ec2-user
lock_passwd: True
gecos: EC2 Default User
-{% elif is_rhel %}
+{% elif variant in ["rhel", "centos"] %}
name: cloud-user
lock_passwd: true
gecos: Cloud User
diff --git a/doc-requirements.txt b/doc-requirements.txt
index 6f48062e..ee2126b0 100644
--- a/doc-requirements.txt
+++ b/doc-requirements.txt
@@ -4,3 +4,5 @@ m2r2
pyyaml
sphinx
sphinx-design
+sphinx-copybutton
+sphinx-notfound-page
diff --git a/doc/examples/cloud-config-ansible-controller.txt b/doc/examples/cloud-config-ansible-controller.txt
index 389f8f88..da2f58f0 100644
--- a/doc/examples/cloud-config-ansible-controller.txt
+++ b/doc/examples/cloud-config-ansible-controller.txt
@@ -4,8 +4,8 @@
# This example installs a playbook repository from a remote private repository
# and then runs two of the plays.
-packages_update: true
-packages_upgrade: true
+package_update: true
+package_upgrade: true
packages:
- git
- python3-pip
diff --git a/doc/examples/cloud-config-ansible-pull.txt b/doc/examples/cloud-config-ansible-pull.txt
index 62acc5a9..73985772 100644
--- a/doc/examples/cloud-config-ansible-pull.txt
+++ b/doc/examples/cloud-config-ansible-pull.txt
@@ -1,6 +1,6 @@
#cloud-config
-packages_update: true
-packages_upgrade: true
+package_update: true
+package_upgrade: true
# if you're already installing other packages, you may
# wish to manually install ansible to avoid multiple calls
diff --git a/doc/examples/cloud-config-ca-certs.txt b/doc/examples/cloud-config-ca-certs.txt
index 9f7beb05..427465d4 100644
--- a/doc/examples/cloud-config-ca-certs.txt
+++ b/doc/examples/cloud-config-ca-certs.txt
@@ -8,11 +8,12 @@
# It should be passed as user-data when starting the instance.
ca_certs:
- # If present and set to True, the 'remove_defaults' parameter will remove
- # all the default trusted CA certificates that are normally shipped with
- # Ubuntu.
- # This is mainly for paranoid admins - most users will not need this
- # functionality.
+ # If present and set to True, the 'remove_defaults' parameter will either
+ # disable all the trusted CA certifications normally shipped with
+ # Alpine, Debian or Ubuntu. On RedHat, this action will delete those
+ # certificates.
+ # This is mainly for very security-sensitive use cases - most users will not
+ # need this functionality.
remove_defaults: true
# If present, the 'trusted' parameter should contain a certificate (or list
diff --git a/doc/examples/cloud-config-datasources.txt b/doc/examples/cloud-config-datasources.txt
index 7a8c4284..9b5df6b0 100644
--- a/doc/examples/cloud-config-datasources.txt
+++ b/doc/examples/cloud-config-datasources.txt
@@ -16,6 +16,11 @@ datasource:
- http://169.254.169.254:80
- http://instance-data:8773
+ OpenStack:
+ # The default list of metadata services to check for OpenStack.
+ metadata_urls:
+ - http://169.254.169.254
+
MAAS:
timeout : 50
max_wait : 120
@@ -31,7 +36,7 @@ datasource:
# default seedfrom is None
# if found, then it should contain a url with:
# <url>/user-data and <url>/meta-data
- # seedfrom: http://my.example.com/i-abcde
+ # seedfrom: http://my.example.com/i-abcde/
seedfrom: None
# fs_label: the label on filesystems to be searched for NoCloud source
diff --git a/doc/examples/cloud-config-disk-setup.txt b/doc/examples/cloud-config-disk-setup.txt
index cdd176d3..3c8fc36c 100644
--- a/doc/examples/cloud-config-disk-setup.txt
+++ b/doc/examples/cloud-config-disk-setup.txt
@@ -1,5 +1,5 @@
#cloud-config
-# Cloud-init supports the creation of simple partition tables and file systems
+# Cloud-init supports the creation of simple partition tables and filesystems
# on devices.
# Default disk definitions for AWS
@@ -147,13 +147,13 @@ disk_setup:
# If layout is set to "true" and overwrite is set to "false",
# it will skip partitioning the device without a failure.
#
-# overwrite=<BOOL>: This describes whether to ride with saftey's on and
+# overwrite=<BOOL>: This describes whether to ride with safetys on and
# everything holstered.
#
# 'false' is the default, which means that:
# 1. The device will be checked for a partition table
-# 2. The device will be checked for a file system
-# 3. If either a partition of file system is found, then
+# 2. The device will be checked for a filesystem
+# 3. If either a partition of filesystem is found, then
# the operation will be _skipped_.
#
# 'true' is cowboy mode. There are no checks and things are
@@ -161,10 +161,10 @@ disk_setup:
# really, really don't want to do.
#
#
-# fs_setup: Setup the file system
-# -------------------------------
+# fs_setup: Setup the filesystem
+# ------------------------------
#
-# fs_setup describes the how the file systems are supposed to look.
+# fs_setup describes the how the filesystems are supposed to look.
fs_setup:
- label: ephemeral0
@@ -189,10 +189,10 @@ fs_setup:
# replace_fs: <FS_TYPE>
#
# Where:
-# <LABEL>: The file system label to be used. If set to None, no label is
+# <LABEL>: The filesystem label to be used. If set to None, no label is
# used.
#
-# <FS_TYPE>: The file system type. It is assumed that the there
+# <FS_TYPE>: The filesystem type. It is assumed that the there
# will be a "mkfs.<FS_TYPE>" that behaves likes "mkfs". On a standard
# Ubuntu Cloud Image, this means that you have the option of ext{2,3,4},
# and vfat by default.
@@ -212,15 +212,15 @@ fs_setup:
# The valid options are:
# "auto|any": tell cloud-init not to care whether there is a partition
# or not. Auto will use the first partition that does not contain a
-# file system already. In the absence of a partition table, it will
+# filesystem already. In the absence of a partition table, it will
# put it directly on the disk.
#
-# "auto": If a file system that matches the specification in terms of
-# label, type and device, then cloud-init will skip the creation of
-# the file system.
+# "auto": If a filesystem that matches the specification in terms of
+# label, filesystem and device, then cloud-init will skip the creation
+# of the filesystem.
#
-# "any": If a file system that matches the file system type and device,
-# then cloud-init will skip the creation of the file system.
+# "any": If a filesystem that matches the filesystem type and device,
+# then cloud-init will skip the creation of the filesystem.
#
# Devices are selected based on first-detected, starting with partitions
# and then the raw disk. Consider the following:
@@ -231,7 +231,7 @@ fs_setup:
# |-xvdb3 btrfs test
# \-xvdb4 ext4 test
#
-# If you ask for 'auto', label of 'test, and file system of 'ext4'
+# If you ask for 'auto', label of 'test, and filesystem of 'ext4'
# then cloud-init will select the 2nd partition, even though there
# is a partition match at the 4th partition.
#
@@ -243,25 +243,25 @@ fs_setup:
#
# In general, if you have a specific partition configuration in mind,
# you should define either the device or the partition number. 'auto'
-# and 'any' are specifically intended for formatting ephemeral storage or
-# for simple schemes.
+# and 'any' are specifically intended for formatting ephemeral storage
+# or for simple schemes.
#
-# "none": Put the file system directly on the device.
+# "none": Put the filesystem directly on the device.
#
# <NUM>: where NUM is the actual partition number.
#
# <OVERWRITE>: Defines whether or not to overwrite any existing
# filesystem.
#
-# "true": Indiscriminately destroy any pre-existing file system. Use at
+# "true": Indiscriminately destroy any pre-existing filesystem. Use at
# your own peril.
#
-# "false": If an existing file system exists, skip the creation.
+# "false": If an existing filesystem exists, skip the creation.
#
# <REPLACE_FS>: This is a special directive, used for Microsoft Azure that
-# instructs cloud-init to replace a file system of <FS_TYPE>. NOTE:
+# instructs cloud-init to replace a filesystem of <FS_TYPE>. NOTE:
# unless you define a label, this requires the use of the 'any' partition
# directive.
#
-# Behavior Caveat: The default behavior is to _check_ if the file system exists.
-# If a file system matches the specification, then the operation is a no-op.
+# Behavior Caveat: The default behavior is to _check_ if the filesystem exists.
+# If a filesystem matches the specification, then the operation is a no-op.
diff --git a/doc/rtd/conf.py b/doc/rtd/conf.py
index a549a444..a4103a7e 100644
--- a/doc/rtd/conf.py
+++ b/doc/rtd/conf.py
@@ -18,7 +18,7 @@ sys.path.insert(0, os.path.abspath("."))
# General information about the project.
project = "cloud-init"
-copyright = "2022, Canonical Ltd."
+copyright = "Canonical Ltd."
# -- General configuration ----------------------------------------------------
@@ -29,6 +29,8 @@ needs_sphinx = "4.0"
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
"m2r2",
+ "notfound.extension",
+ "sphinx_copybutton",
"sphinx_design",
"sphinx.ext.autodoc",
"sphinx.ext.autosectionlabel",
@@ -58,6 +60,12 @@ exclude_patterns = []
# output. They are ignored by default.
show_authors = False
+# Sphinx-copybutton config options: 1) prompt to be stripped from copied code.
+# 2) Set to copy all lines (not just prompt lines) to ensure multiline snippets
+# can be copied even if they don't contain an EOF line.
+copybutton_prompt_text = "$ "
+copybutton_only_copy_prompt_lines = False
+
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
@@ -72,6 +80,21 @@ html_theme_options = {
"dark_logo": "logo-dark-mode.png",
}
+html_extra_path = ["googleaf254801a5285c31.html"]
+
# Make sure the target is unique
autosectionlabel_prefix_document = True
autosectionlabel_maxdepth = 2
+
+# Sphinx-copybutton config options:
+notfound_body = (
+ "<h1>Page not found</h1><p>Sorry we missed you! Our docs have had a"
+ " remodel and some deprecated links have changed.</p><p>"
+ "<a href='https://canonical-cloud-init.readthedocs-hosted.com'>Back to our"
+ " homepage now hosted at"
+ " https://canonical-cloud-init.readthedocs-hosted.com</a></p>"
+)
+notfound_context = {
+ "title": "Page not found",
+ "body": notfound_body,
+}
diff --git a/doc/rtd/topics/code_review.rst b/doc/rtd/development/code_review.rst
index 20c81eac..5684a509 100644
--- a/doc/rtd/topics/code_review.rst
+++ b/doc/rtd/development/code_review.rst
@@ -1,11 +1,13 @@
-*******************
-Code Review Process
+.. _code_review_process:
+
+Code review process
*******************
In order to manage incoming pull requests effectively, and provide
-timely feedback and/or acceptance this document serves as a guideline
-for the review process and outlines the expectations for those
+timely feedback and/or acceptance, this document serves as a guideline
+for the review process. It outlines the expectations for those
submitting code to the project as well as those reviewing the code.
+
Code is reviewed for acceptance by at least one core team member (later
referred to as committers), but comments and suggestions from others
are encouraged and welcome.
@@ -13,156 +15,154 @@ are encouraged and welcome.
The process is intended to provide timely and actionable feedback for
any submission.
-Asking For Help
+Asking for help
===============
-cloud-init contributors, potential contributors, community members and
-users are encouraged to ask for any help that they need. If you have
+``cloud-init`` contributors, potential contributors, community members and
+users are encouraged to ask for any help that they need. If you have
questions about the code review process, or at any point during the
code review process, these are the available avenues:
-* if you have an open Pull Request, comment on that pull request
-* join the ``#cloud-init`` channel on the Libera IRC network and ask
- away
-* send an email to the cloud-init mailing list,
- cloud-init@lists.launchpad.net
+* if you have an open Pull Request, comment on that pull request,
+* join the ``#cloud-init`` `channel on the Libera IRC`_ network,
+* send an email to the cloud-init mailing list: ::
-These are listed in rough order of preference, but use whichever of
-them you are most comfortable with.
+ cloud-init@lists.launchpad.net
+
+These are listed in order of preference, but please use whichever of them you
+are most comfortable with.
Goals
=====
This process has the following goals:
-* Ensure code reviews occur in a timely fashion and provide actionable
+* To ensure code reviews occur in a timely fashion and provide actionable
feedback if changes are desired.
-* Ensure the minimization of ancillary problems to increase the
- efficiency for those reviewing the submitted code
+* To ensure the minimisation of ancillary problems to increase the
+ efficiency for those reviewing the submitted code.
-Role Definitions
+Role definitions
================
-Any code review process will have (at least) two involved parties. For
+Any code review process will have (at least) two involved parties. For
our purposes, these parties are referred to as **Proposer** and
-**Reviewer**. (We also have the **Committer** role which is a special
-case of the **Reviewer** role.) The terms are defined here (and the
-use of the singular form is not meant to imply that they refer to a
-single person):
+**Reviewer**. We also have the **Committer** role which is a special
+case of the **Reviewer** role.
+
+The terms are defined here (and the use of the singular form is not meant to
+imply that they refer to a single person):
-Proposer
+**Proposer**
The person proposing a pull request (hereafter known as a PR).
-Reviewer
+**Reviewer**
A person who is reviewing a PR.
-Committer
- A cloud-init core developer (i.e. a person who has permission to
- merge PRs into **main**).
+**Committer**
+ A cloud-init core developer (i.e., a person who has permission to
+ merge PRs into ``main``).
-Prerequisites For Landing Pull Requests
+Prerequisites for landing pull requests
=======================================
-Before a PR can be landed into **main**, the following conditions *must*
+Before a PR can be landed into ``main``, the following conditions *must*
be met:
* the CLA has been signed by the **Proposer** (or is covered by an
- entity-level CLA signature)
-* all required status checks are passing
-* at least one "Approve" review from a **Committer**
-* no "Request changes" reviews from any **Committer**
+ entity-level CLA signature),
+* all required status checks are passing,
+* at least one "Approve" review has been received from a **Committer**, and
+* no "Request changes" reviews from any **Committer** are outstanding.
The following conditions *should* be met:
-* any Python functions/methods/classes have docstrings added/updated
-* any changes to config module behaviour are captured in the
- documentation of the config module
-* any Python code added has corresponding unit tests
-* no "Request changes" reviews from any **Reviewer**
+* any Python functions/methods/classes have docstrings added/updated,
+* any changes to config module behaviour are captured in the documentation of
+ the config module,
+* any Python code added has corresponding unit tests, and
+* no "Request changes" reviews from any **Reviewer** are outstanding.
These conditions can be relaxed at the discretion of the
-**Committers** on a case-by-case basis. Generally, for accountability,
+**Committers** on a case-by-case basis. Generally, for accountability,
this should not be the decision of a single **Committer**, and the
decision should be documented in comments on the PR.
(To take a specific example, the ``cc_phone_home`` module had no tests
-at the time `PR #237
-<https://github.com/canonical/cloud-init/pull/237>`_ was submitted, so
-the **Proposer** was not expected to write a full set of tests for
-their minor modification, but they were expected to update the config
-module docs.)
+at the time `PR #237`_ was submitted, so the **Proposer** was not expected to
+write a full set of tests for their minor modification, but they were expected
+to update the config module docs.)
-Non-Committer Reviews
+Non-Committer reviews
=====================
-Reviews from non-**Committers** are *always* welcome. Please feel
-empowered to review PRs and leave your thoughts and comments on any
-submitted PRs, regardless of the **Proposer**.
+Reviews from non-**Committers** are *always* welcome. Please feel empowered to
+review PRs and leave your thoughts and comments on any submitted PRs,
+regardless of the **Proposer**.
-Much of the below process is written in terms of the **Committers**.
-This is not intended to reflect that reviews should only come from that
-group, but acknowledges that we are ultimately responsible for
-maintaining the standards of the codebase. It would be entirely
-reasonable (and very welcome) for a **Reviewer** to only examine part
-of a PR, but it would not be appropriate for a **Committer** to merge a
-PR without full scrutiny.
+Much of the below process is written in terms of the **Committers**. This is
+not intended to reflect that reviews should only come from that group, but
+rather an acknowledgement that we are ultimately responsible for maintaining
+the standards of the codebase. It would be entirely reasonable (and very
+welcome) for a **Reviewer** to only examine part of a PR, but it would not be
+appropriate for a **Committer** to merge a PR without full scrutiny.
-Opening Phase
+Opening phase
=============
-In this phase, the **Proposer** is responsible for opening a pull
-request and meeting the prerequisites laid out above.
+In this phase, the **Proposer** is responsible for opening a pull request and
+meeting the prerequisites laid out above.
If they need help understanding the prerequisites, or help meeting the
-prerequisites, then they can (and should!) ask for help. See the
+prerequisites, then they can (and should!) ask for help. See the
`Asking For Help`_ section above for the ways to do that.
These are the steps that comprise the opening phase:
-1. The **Proposer** opens PR
+1. The **Proposer** opens a PR
-2. CI runs automatically, and if
+2. CI runs automatically, and if:
- CI fails
- The **Proposer** is expected to fix CI failures. If the
+ CI fails:
+ The **Proposer** is expected to fix CI failures. If the
**Proposer** doesn't understand the nature of the failures they
are seeing, they should comment in the PR to request assistance,
or use another way of `Asking For Help`_.
(Note that if assistance is not requested, the **Committers**
will assume that the **Proposer** is working on addressing the
- failures themselves. If you require assistance, please do ask
+ failures themselves. If you require assistance, please do ask
for help!)
- CI passes
- Move on to the `Review Phase`_.
+ CI passes:
+ Move on to the `Review phase`_.
-Review Phase
+Review phase
============
In this phase, the **Proposer** and the **Reviewers** will iterate
-together to, hopefully, get the PR merged into the cloud-init codebase.
+together to, hopefully, get the PR merged into the ``cloud-init`` codebase.
There are three potential outcomes: merged, rejected permanently, and
-temporarily closed. (The first two are covered in this section; see
-`Inactive Pull Requests`_ for details about temporary closure.)
+temporarily closed. The first two are covered in this section; see
+`Inactive Pull Requests`_ for details about temporary closure.
-(In the below, when the verbs "merge" or "squash merge" are used, they
+In this section, when the verbs "merge" or "squash merge" are used, they
should be understood to mean "squash merged using the GitHub UI", which
-is the only way that changes can land in cloud-init's **main** branch.)
+is the only way that changes can land in ``cloud-init``'s ``main`` branch.
These are the steps that comprise the review phase:
-1. **The Committers** assign a **Committer** to the PR
+1. **The Committers** assign a **Committer** to the PR:
- This **Committer** is expected to shepherd the PR to completion (and
- merge it, if that is the outcome reached). This means that they
+ This **Committer** is expected to shepherd the PR to completion (and to
+ merge it, if that is the outcome reached). This means that they
will perform an initial review, and monitor the PR to ensure that
- the **Proposer** is receiving any assistance that they require. The
+ the **Proposer** is receiving any assistance that they require. The
**Committers** will perform this assignment on a daily basis.
This assignment is intended to ensure that the **Proposer** has a
- clear point of contact with a cloud-init core developer, and that
- they get timely feedback after submitting a PR. It *is not*
+ clear point of contact with a ``cloud-init`` core developer, and that
+ they get timely feedback after submitting a PR. It *is not*
intended to preclude reviews from any other **Reviewers**, nor to
imply that the **Committer** has ownership over the review process.
@@ -170,33 +170,33 @@ These are the steps that comprise the review phase:
a PR to another **Reviewer** if they think that they would be better
suited.
- (Note that, in GitHub terms, this is setting an Assignee, not
+ (Note that, in GitHub terms, this is setting an `Assignee`, not
requesting a review.)
2. That **Committer** performs an initial review of the PR, resulting
in one of the following:
- Approve
+ Approve:
If the submitted PR meets all of the `Prerequisites for
Landing Pull Requests`_ and passes code review, then the
**Committer** will squash merge immediately.
There may be circumstances where a PR should not be merged
- immediately. The ``wip`` label will be applied to PRs for which
- this is true. Only **Committers** are able to apply labels to
+ immediately. The :guilabel:`wip` label will be applied to PRs for which
+ this is true. Only **Committers** are able to apply labels to
PRs, so anyone who believes that this label should be applied to a
PR should request its application in a comment on the PR.
The review process is **DONE**.
- Approve (with nits)
- If the **Proposer** submits their PR with "Allow edits from
- maintainer" enabled, and the only changes the **Committer**
+ Approve (with nits):
+ If the **Proposer** submits their PR with :guilabel:`"Allow edits from
+ maintainer"` enabled, and the only changes the **Committer**
requests are minor "nits", the **Committer** can push fixes for
- those nits and *immediately* squash merge. If the **Committer**
+ those nits and *immediately* squash merge. If the **Committer**
does not wish to fix these nits but believes they should block a
- straight-up Approve, then their review should be "Needs Changes"
- instead.
+ straightforward :guilabel:`Approve`, then their review should be "Needs
+ Changes" instead.
A nit is understood to be something like a minor style issue or a
spelling error, generally confined to a single line of code.
@@ -205,29 +205,30 @@ These are the steps that comprise the review phase:
is a nit, they should not treat it as a nit.
(If a **Proposer** wants to opt-out of this, then they should
- uncheck "Allow edits from maintainer" when submitting their PR.)
+ uncheck :guilabel:`"Allow edits from maintainer"` when submitting their
+ PR.)
The review process is **DONE**.
- Outright rejection
+ Outright rejection:
The **Committer** will close the PR, with useful messaging for the
**Proposer** as to why this has happened.
This is reserved for cases where the proposed change is completely
- unfit for landing, and there is no reasonable path forward. This
+ unfit for landing, and there is no reasonable path forward. This
should only be used sparingly, as there are very few cases where
proposals are completely unfit.
If a different approach to the same problem is planned, it should
- be submitted as a separate PR. The **Committer** should include
+ be submitted as a separate PR. The **Committer** should include
this information in their message when the PR is closed.
The review process is **DONE**.
- Needs Changes
+ Needs Changes:
The **Committer** will give the **Proposer** a clear idea of what
- is required for an Approve vote or, for more complex PRs, what the
- next steps towards an Approve vote are.
+ is required for an :guilabel:`Approve` vote or, for more complex PRs,
+ what the next steps towards an :guilabel:`Approve` vote are.
The **Proposer** will ask questions if they don't understand, or
disagree with, the **Committer**'s review comments.
@@ -236,21 +237,25 @@ These are the steps that comprise the review phase:
review comments.
Once the review comments are addressed (as well as, potentially,
- in the interim), CI will run. If CI fails, the **Proposer** is
- expected to fix CI failures. If CI passes, the **Proposer**
+ in the interim), CI will run. If CI fails, the **Proposer** is
+ expected to fix CI failures. If CI passes, the **Proposer**
should indicate that the PR is ready for re-review (by @ing the
assigned reviewer), effectively moving back to the start of this
section.
-Inactive Pull Requests
+Inactive pull requests
======================
PRs will be temporarily closed if they have been waiting on
-**Proposer** action for a certain amount of time without activity. A
+**Proposer** action for a certain amount of time without activity. A
PR will be marked as stale (with an explanatory comment) after 14 days
-of inactivity. It will be closed after a further 7 days of inactivity.
+of inactivity. It will be closed after a further 7 days of inactivity.
These closes are not considered permanent, and the closing message
should reflect this for the **Proposer**. However, if a PR is reopened,
it should effectively enter the `Opening phase`_ again, as it may
need some work done to get CI passing again.
+
+.. LINKS:
+.. _channel on the Libera IRC: https://kiwiirc.com/nextclient/irc.libera.chat/cloud-init
+.. _PR #237: https://github.com/canonical/cloud-init/pull/237
diff --git a/doc/rtd/topics/contributing.rst b/doc/rtd/development/contributing.rst
index b9aee867..b1cd2f37 100644
--- a/doc/rtd/topics/contributing.rst
+++ b/doc/rtd/development/contributing.rst
@@ -1,2 +1 @@
.. include:: ../../../CONTRIBUTING.rst
-.. vi: textwidth=79
diff --git a/doc/rtd/development/debugging.rst b/doc/rtd/development/debugging.rst
new file mode 100644
index 00000000..b0a0da35
--- /dev/null
+++ b/doc/rtd/development/debugging.rst
@@ -0,0 +1,325 @@
+.. _debugging:
+
+Debugging ``cloud-init``
+************************
+
+Overview
+========
+
+This topic will discuss general approaches for testing and debugging
+``cloud-init`` on deployed instances.
+
+.. _boot_time_analysis:
+
+Boot time analysis
+==================
+
+:command:`cloud-init analyze`
+-----------------------------
+
+Occasionally, instances don't appear as performant as we would like and
+``cloud-init`` packages a simple facility to inspect which operations took the
+longest during boot and setup.
+
+The script :file:`/usr/bin/cloud-init` has an analysis sub-command,
+:command:`analyze`, which parses any :file:`cloud-init.log` file into formatted
+and sorted events. It allows for detailed analysis of the most costly
+``cloud-init`` operations, and to determine the long-pole in ``cloud-init``
+configuration and setup. These subcommands default to reading
+:file:`/var/log/cloud-init.log`.
+
+:command:`analyze show`
+^^^^^^^^^^^^^^^^^^^^^^^
+
+Parse and organise :file:`cloud-init.log` events by stage and include each
+sub-stage granularity with time delta reports.
+
+.. code-block:: shell-session
+
+ $ cloud-init analyze show -i my-cloud-init.log
+
+Example output:
+
+.. code-block:: shell-session
+
+ -- Boot Record 01 --
+ The total time elapsed since completing an event is printed after the "@"
+ character.
+ The time the event takes is printed after the "+" character.
+
+ Starting stage: modules-config
+ |`->config-snap_config ran successfully @05.47700s +00.00100s
+ |`->config-ssh-import-id ran successfully @05.47800s +00.00200s
+ |`->config-locale ran successfully @05.48000s +00.00100s
+ ...
+
+
+:command:`analyze dump`
+^^^^^^^^^^^^^^^^^^^^^^^
+
+Parse :file:`cloud-init.log` into event records and return a list of
+dictionaries that can be consumed for other reporting needs.
+
+.. code-block:: shell-session
+
+ $ cloud-init analyze dump -i my-cloud-init.log
+
+Example output:
+
+.. code-block::
+
+ [
+ {
+ "description": "running config modules",
+ "event_type": "start",
+ "name": "modules-config",
+ "origin": "cloudinit",
+ "timestamp": 1510807493.0
+ },...
+
+:command:`analyze blame`
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+Parse :file:`cloud-init.log` into event records and sort them based on the
+highest time cost for a quick assessment of areas of ``cloud-init`` that may
+need improvement.
+
+.. code-block:: shell-session
+
+ $ cloud-init analyze blame -i my-cloud-init.log
+
+Example output:
+
+.. code-block::
+
+ -- Boot Record 11 --
+ 00.01300s (modules-final/config-scripts-per-boot)
+ 00.00400s (modules-final/config-final-message)
+ 00.00100s (modules-final/config-rightscale_userdata)
+ ...
+
+:command:`analyze boot`
+^^^^^^^^^^^^^^^^^^^^^^^
+
+Make subprocess calls to the kernel in order to get relevant pre-``cloud-init``
+timestamps, such as the kernel start, kernel finish boot, and ``cloud-init``
+start.
+
+.. code-block:: shell-session
+
+ $ cloud-init analyze boot
+
+Example output:
+
+.. code-block::
+
+ -- Most Recent Boot Record --
+ Kernel Started at: 2019-06-13 15:59:55.809385
+ Kernel ended boot at: 2019-06-13 16:00:00.944740
+ Kernel time to boot (seconds): 5.135355
+ Cloud-init start: 2019-06-13 16:00:05.738396
+ Time between Kernel boot and Cloud-init start (seconds): 4.793656
+
+Analyze quickstart - LXC
+------------------------
+
+To quickly obtain a ``cloud-init`` log, try using :command:``lxc`` on any
+Ubuntu system:
+
+.. code-block:: shell-session
+
+ $ lxc init ubuntu-daily:focal x1
+ $ lxc start x1
+ $ # Take lxc's cloud-init.log and pipe it to the analyzer
+ $ lxc file pull x1/var/log/cloud-init.log - | cloud-init analyze dump -i -
+ $ lxc file pull x1/var/log/cloud-init.log - | \
+ python3 -m cloudinit.analyze dump -i -
+
+
+Analyze quickstart - KVM
+------------------------
+To quickly analyze a KVM ``cloud-init`` log:
+
+1. Download the current cloud image
+
+.. code-block:: shell-session
+
+ $ wget https://cloud-images.ubuntu.com/daily/server/focal/current/focal-server-cloudimg-amd64.img
+
+2. Create a snapshot image to preserve the original cloud image
+
+.. code-block:: shell-session
+
+ $ qemu-img create -b focal-server-cloudimg-amd64.img -f qcow2 \
+ test-cloudinit.qcow2
+
+3. Create a seed image with metadata using :command:`cloud-localds`
+
+.. code-block:: shell-session
+
+ $ cat > user-data <<EOF
+ #cloud-config
+ password: passw0rd
+ chpasswd: { expire: False }
+ EOF
+ $ cloud-localds my-seed.img user-data
+
+4. Launch your modified VM
+
+.. code-block:: shell-session
+
+ $ kvm -m 512 -net nic -net user -redir tcp:2222::22 \
+ -drive file=test-cloudinit.qcow2,if=virtio,format=qcow2 \
+ -drive file=my-seed.img,if=virtio,format=raw
+
+5. Analyze the boot (:command:`blame`, :command:`dump`, :command:`show`)
+
+.. code-block:: shell-session
+
+ $ ssh -p 2222 ubuntu@localhost 'cat /var/log/cloud-init.log' | \
+ cloud-init analyze blame -i -
+
+
+Running single cloud-config modules
+===================================
+
+This subcommand is not called by the init system. It can be called manually to
+load the configured datasource and run a single cloud-config module once, using
+the cached user data and metadata after the instance has booted. Each
+cloud-config module has a module ``FREQUENCY`` configured: ``PER_INSTANCE``,
+``PER_BOOT``, ``PER_ONCE`` or ``PER_ALWAYS``. When a module is run by
+``cloud-init``, it stores a semaphore file in
+:file:`/var/lib/cloud/instance/sem/config_<module_name>.<frequency>` which
+marks when the module last successfully ran. Presence of this semaphore file
+prevents a module from running again if it has already been run. To ensure that
+a module is run again, the desired frequency can be overridden via the
+command line:
+
+.. code-block:: shell-session
+
+ $ sudo cloud-init single --name cc_ssh --frequency always
+
+Example output:
+
+.. code-block::
+
+ ...
+ Generating public/private ed25519 key pair
+ ...
+
+Inspect :file:`cloud-init.log` for output of what operations were performed as
+a result.
+
+.. _proposed_sru_testing:
+
+Stable Release Updates (SRU) testing for ``cloud-init``
+=======================================================
+
+Once an Ubuntu release is stable (i.e. after it is released), updates for it
+must follow a special procedure called a "Stable Release Update" (`SRU`_).
+
+The ``cloud-init`` project has a specific process it follows when validating
+a ``cloud-init`` SRU, documented in the `CloudinitUpdates`_ wiki page.
+
+Generally an SRU test of ``cloud-init`` performs the following:
+
+ * Install a pre-release version of ``cloud-init`` from the **-proposed** APT
+ pocket (e.g., **bionic-proposed**).
+ * Upgrade ``cloud-init`` and attempt a clean run of ``cloud-init`` to assert
+ that the new version works properly on the specific platform and Ubuntu
+ series.
+ * Check for tracebacks or errors in behaviour.
+
+Manual SRU verification procedure
+---------------------------------
+
+Below are steps to manually test a pre-release version of ``cloud-init``
+from **-proposed**
+
+.. note::
+ For each Ubuntu SRU, the Ubuntu Server team manually validates the new
+ version of ``cloud-init`` on these platforms: **Amazon EC2, Azure, GCE,
+ OpenStack, Oracle, Softlayer (IBM), LXD, KVM**
+
+1. Launch a VM on your favorite platform, providing this cloud-config
+ user data and replacing `<YOUR_LAUNCHPAD_USERNAME>` with your username:
+
+.. code-block:: yaml
+
+ ## template: jinja
+ #cloud-config
+ ssh_import_id: [<YOUR_LAUNCHPAD_USERNAME>]
+ hostname: SRU-worked-{{v1.cloud_name}}
+
+2. Wait for current ``cloud-init`` to complete, replace ``<YOUR_VM_IP>`` with
+ the IP address of the VM that you launched in step 1. Be sure to make a
+ note of the datasource ``cloud-init`` detected in ``--long`` output. You
+ will need this during step 5, where you will use it to confirm the same
+ datasource is detected after the upgrade:
+
+.. code-block:: bash
+
+ CI_VM_IP=<YOUR_VM_IP>
+ $ ssh ubuntu@$CI_VM_IP -- cloud-init status --wait --long
+
+3. Set up the **-proposed** pocket on your VM and upgrade to the **-proposed**
+ ``cloud-init``. To do this, create the following bash script, which will
+ add the **-proposed** pocket to APT's sources and install ``cloud-init``
+ from that pocket:
+
+.. code-block:: bash
+
+ cat > setup_proposed.sh <<EOF
+ #/bin/bash
+ mirror=http://archive.ubuntu.com/ubuntu
+ echo deb \$mirror \$(lsb_release -sc)-proposed main | tee \
+ /etc/apt/sources.list.d/proposed.list
+ apt-get update -q
+ apt-get install -qy cloud-init
+ EOF
+
+.. code-block:: shell-session
+
+ $ scp setup_proposed.sh ubuntu@$CI_VM_IP:.
+ $ ssh ubuntu@$CI_VM_IP -- sudo bash setup_proposed.sh
+
+4. Change hostname, clean ``cloud-init``'s state, and reboot to run
+ ``cloud-init`` from scratch:
+
+.. code-block:: shell-session
+
+ $ ssh ubuntu@$CI_VM_IP -- sudo hostname something-else
+ $ ssh ubuntu@$CI_VM_IP -- sudo cloud-init clean --logs --reboot
+
+5. Validate **-proposed** ``cloud-init`` came up without error. First, we block
+ until ``cloud-init`` completes, then verify from ``--long`` that the
+ datasource is the same as the one picked up from step 1. Errors will show up
+ in ``--long``:
+
+.. code-block:: shell-session
+
+ $ ssh ubuntu@$CI_VM_IP -- cloud-init status --wait --long
+
+Make sure the hostname was set properly to `SRU-worked-<cloud name>`:
+
+.. code-block:: shell-session
+
+ $ ssh ubuntu@$CI_VM_IP -- hostname
+
+Then, check for any errors or warnings in ``cloud-init`` logs. If successful,
+this will produce no output:
+
+.. code-block:: shell-session
+
+ $ ssh ubuntu@$CI_VM_IP -- grep Trace "/var/log/cloud-init*"
+
+6. If you encounter an error during SRU testing:
+
+ * Create a `new cloud-init bug`_ reporting the version of ``cloud-init``
+ affected
+ * Ping upstream ``cloud-init`` on Libera's `#cloud-init IRC channel`_
+
+.. _SRU: https://wiki.ubuntu.com/StableReleaseUpdates
+.. _CloudinitUpdates: https://wiki.ubuntu.com/CloudinitUpdates
+.. _new cloud-init bug: https://bugs.launchpad.net/cloud-init/+filebug
+.. _#cloud-init IRC channel: https://kiwiirc.com/nextclient/irc.libera.chat/cloud-init
diff --git a/doc/rtd/topics/dir_layout.rst b/doc/rtd/development/dir_layout.rst
index d4606ac5..56627fb8 100644
--- a/doc/rtd/topics/dir_layout.rst
+++ b/doc/rtd/development/dir_layout.rst
@@ -1,9 +1,10 @@
-****************
+.. _dir_layout:
+
Directory layout
****************
-Cloud-init's directory structure is somewhat different from a regular
-application::
+``Cloud-init``'s directory structure is somewhat different from a regular
+application: ::
/var/lib/cloud/
- data/
@@ -34,15 +35,15 @@ application::
``/var/lib/cloud``
- The main directory containing the cloud-init specific subdirectories.
- It is typically located at ``/var/lib`` but there are certain configuration
- scenarios where this can be altered.
+ The main directory containing the ``cloud-init``-specific subdirectories.
+ It is typically located at :file:`/var/lib` but there are certain
+ configuration scenarios where this can be altered.
.. TODO: expand this section
``data/``
- Contains information related to instance ids, datasources and hostnames of
+ Contains information related to instance IDs, datasources and hostnames of
the previous and current instance if they are different. These can be
examined as needed to determine any information related to a previous boot
(if applicable).
@@ -50,21 +51,21 @@ application::
``handlers/``
Custom ``part-handlers`` code is written out here. Files that end up here are
- written out with in the scheme of ``part-handler-XYZ`` where ``XYZ`` is the
- handler number (the first handler found starts at 0).
+ written out within the scheme of ``part-handler-XYZ`` where ``XYZ`` is the
+ handler number (the first handler found starts at ``0``).
``instance``
A symlink to the current ``instances/`` subdirectory that points to the
- currently active instance (which is active is dependent on the datasource
+ currently active instance (the active instance is dependent on the datasource
loaded).
``instances/``
All instances that were created using this image end up with instance
identifier subdirectories (and corresponding data for each instance). The
- currently active instance will be symlinked the ``instance`` symlink file
+ currently active instance will be symlinked to the ``instance`` symlink file
defined previously.
``scripts/``
@@ -74,12 +75,13 @@ application::
``seed/``
- Contains seeded data files: meta-data, network-config, user-data, vendor-data
+ Contains seeded data files: :file:`meta-data`, :file:`network-config`,
+ :file:`user-data`, :file:`vendor-data`.
``sem/``
- Cloud-init has a concept of a module semaphore, which basically consists
+ ``Cloud-init`` has a concept of a module semaphore, which basically consists
of the module name and its frequency. These files are used to ensure a module
- is only ran `per-once`, `per-instance`, `per-always`. This folder contains
- semaphore `files` which are only supposed to run `per-once` (not tied to the
- instance id).
+ is only run "per-once", "per-instance", or "per-always". This folder contains
+ semaphore :file:`files` which are only supposed to run "per-once" (not tied
+ to the instance ID).
diff --git a/doc/rtd/development/docs.rst b/doc/rtd/development/docs.rst
new file mode 100644
index 00000000..30d66559
--- /dev/null
+++ b/doc/rtd/development/docs.rst
@@ -0,0 +1,136 @@
+.. _docs:
+
+Documentation
+*************
+
+These docs are hosted on `Read the Docs`_. The following will explain how to
+contribute to, and build, these docs locally.
+
+The documentation is primarily written in reStructuredText, with some pages
+written in standard Markdown.
+
+Building
+========
+
+There is a makefile target to build the documentation for you:
+
+.. code-block:: shell-session
+
+ $ tox -e doc
+
+This will do two things:
+
+- Build the documentation using sphinx.
+- Run doc8 against the documentation source code.
+
+Once built, the HTML files will be viewable in `doc/rtd_html`. Use your
+web browser to open `index.html` to view and navigate the site.
+
+Style guide
+===========
+
+Language
+--------
+
+Where possible, text should be written in UK English. However, discretion and
+common sense can both be applied. For example, where text refers to code
+elements that exist in US English, the spelling of these elements should not
+be changed to UK English.
+
+Headings
+--------
+
+In reStructuredText, headings are denoted using symbols to underline the text.
+The headings used across the documentation use the following hierarchy:
+
+- ``#####``: Top level header (reserved for the main index page)
+- ``*****``: Title header (used once at the top of a new page)
+- ``=====``: Section headers
+- ``-----``: Subsection headers
+- ``^^^^^``: Sub-subsection headers
+- ``"""""``: Paragraphs
+
+The length of the underline must be at least as long as the title itself.
+
+Ensure that you do not skip header levels when creating your document
+structure, i.e., that a section is followed by a subsection, and not a
+sub-subsection.
+
+Line length
+-----------
+
+Please keep the line lengths to a maximum of **79** characters. This ensures
+that the pages and tables do not get so wide that side scrolling is required.
+
+Anchor labels
+-------------
+
+Adding an anchor label at the top of the page allows for the page to be
+referenced by other pages. For example for the FAQ page this would be:
+
+.. code-block:: rst
+
+ .. _faq:
+
+ FAQ
+ ***
+
+When the reference is used in a document, the displayed text will be that of
+the next heading immediately following the label (so, FAQ in this example),
+unless specifically overridden.
+
+If you use labels within a page to refer, for example, to a subsection, use a
+label that follows the format: ``[pagelabel]-[Section]`` e.g., for this
+"Anchor labels" section, something like ``_docs-Anchor:`` or ``_docs-Label:``.
+Using a consistent style will aid greatly when referencing from other pages.
+
+Links
+-----
+
+To aid in documentation maintenance and keeping links up-to-date, links should
+be presented in a single block at the end of the page.
+
+Where possible, use contextual text in your links to aid users with screen
+readers and other accessibility tools. For example, "check out our
+:ref:`documentation style guide<docs>`" is preferable to "click
+:ref:`here<docs>` for more".
+
+Code blocks
+-----------
+
+Our documentation uses the Sphinx extension "sphinx-copybutton", which creates
+a small button on the right-hand side of code blocks for users to copy the
+code snippets we provide.
+
+The copied code will strip out the prompt symbol (``$``) so that users can
+paste commands directly into their terminal. For user convenience, please
+ensure that code output is presented in a separate code block to the commands.
+
+Vertical whitespace
+-------------------
+
+One newline between each section helps ensure readability of the documentation
+source code.
+
+Common words
+------------
+
+There are some common words that should follow specific usage:
+
+- ``cloud-init``: Always hyphenated. Follows sentence case, so only
+ capitalised at the start of a sentence (e.g., ``Cloud-init``).
+- ``metadata``, ``datasource``: One word.
+- ``user data``, ``vendor data``: Two words, not to be combined or hyphenated.
+
+Acronyms
+--------
+
+Acronyms are always capitalised (e.g., JSON, YAML, QEMU, LXD) in text.
+
+The first time an acronym is used on a page, it is best practice to introduce
+it by showing the expanded name followed by the acronym in parentheses. E.g.,
+Quick EMUlator (QEMU). If the acronym is very common, or you provide a link to
+a documentation page that provides such details, you will not need to do this.
+
+
+.. _Read the Docs: https://readthedocs.com/
diff --git a/doc/rtd/development/index.rst b/doc/rtd/development/index.rst
new file mode 100644
index 00000000..9768fab5
--- /dev/null
+++ b/doc/rtd/development/index.rst
@@ -0,0 +1,47 @@
+Development
+***********
+
+``Cloud-init`` is an open source project that warmly welcomes community
+projects, contributions, suggestions, fixes and constructive feedback. If you
+would like to contribute to ``cloud-init``, this set of documentation will help
+orient you with our processes.
+
+-----
+
+Contributing
+============
+
+.. toctree::
+ :maxdepth: 1
+
+ contributing.rst
+ module_creation.rst
+ code_review.rst
+ dir_layout.rst
+
+Debugging and reporting
+=======================
+
+.. toctree::
+ :maxdepth: 1
+
+ ../howto/bugs.rst
+ logging.rst
+ debugging.rst
+
+Testing
+=======
+
+.. toctree::
+ :maxdepth: 1
+
+ testing.rst
+ integration_tests.rst
+
+Documentation
+=============
+
+.. toctree::
+ :maxdepth: 1
+
+ docs.rst
diff --git a/doc/rtd/development/integration_tests.rst b/doc/rtd/development/integration_tests.rst
new file mode 100644
index 00000000..797cc5d0
--- /dev/null
+++ b/doc/rtd/development/integration_tests.rst
@@ -0,0 +1,214 @@
+.. _integration_tests:
+
+Integration testing
+*******************
+
+Overview
+=========
+
+Integration tests are written using ``pytest`` and are located at
+:file:`tests/integration_tests`. General design principles laid out in
+:ref:`testing` should be followed for integration tests.
+
+Setup is accomplished via a set of fixtures located in
+:file:`tests/integration_tests/conftest.py`.
+
+Test definition
+===============
+
+Tests are defined like any other ``pytest`` test. The ``user_data``
+mark can be used to supply the cloud-config user data. Platform-specific
+marks can be used to limit tests to particular platforms. The ``client``
+fixture can be used to interact with the launched test instance.
+
+See `Examples`_ section for examples.
+
+Test execution
+==============
+
+Test execution happens via ``pytest``. A ``tox`` definition exists to run
+integration tests. To run all integration tests, you would run:
+
+.. code-block:: bash
+
+ $ tox -e integration-tests
+
+``pytest`` arguments may also be passed. For example:
+
+.. code-block:: bash
+
+ $ tox -e integration-tests tests/integration_tests/modules/test_combined.py
+
+Configuration
+=============
+
+All possible configuration values are defined in
+`tests/integration_tests/integration_settings.py`_. Defaults can be overridden
+by supplying values in :file:`tests/integration_tests/user_settings.py` or by
+providing an environment variable of the same name prepended with
+``CLOUD_INIT_``. For example, to set the ``PLATFORM`` setting:
+
+.. code-block:: bash
+
+ CLOUD_INIT_PLATFORM='ec2' pytest tests/integration_tests/
+
+Cloud interaction
+=================
+
+Cloud interaction happens via the `pycloudlib library`_. In order to run
+integration tests, pycloudlib must `first be configured`_.
+
+For a minimal setup using LXD, write the following to
+:file:`~/.config/pycloudlib.toml`:
+
+.. code-block:: toml
+
+ [lxd]
+
+Image selection
+===============
+
+Each integration testing run uses a single image as its basis. This
+image is configured using the ``OS_IMAGE`` variable; see
+`Configuration`_ for details of how configuration works.
+
+``OS_IMAGE`` can take two types of value: an Ubuntu series name (e.g.
+"focal"), or an image specification. If an Ubuntu series name is
+given, then the most recent image for that series on the target cloud
+will be used. For other use cases, an image specification is used.
+
+In its simplest form, an image specification can simply be a cloud's
+image ID (e.g., "ami-deadbeef", "ubuntu:focal"). In this case, the
+identified image will be used as the basis for this testing run.
+
+This has a drawback, however. As we do not know what OS or release is
+within the image, the integration testing framework will run *all*
+tests against the image in question. If it's a RHEL8 image, then we
+would expect Ubuntu-specific tests to fail (and vice versa).
+
+To address this, a full image specification can be given. This is of
+the form: ``<image_id>[::<os>[::<release>]]`` where ``image_id`` is a
+cloud's image ID, ``os`` is the OS name, and ``release`` is the OS
+release name. So, for example, Ubuntu 18.04 (Bionic Beaver) on LXD is
+``ubuntu:bionic::ubuntu::bionic`` or RHEL8 on Amazon is
+``ami-justanexample::rhel::8``. When a full specification is given,
+only tests which are intended for use on that OS and release will be
+executed.
+
+Image setup
+===========
+
+Image setup occurs once when a test session begins and is implemented
+via fixture. Image setup roughly follows these steps:
+
+* Launch an instance on the specified test platform.
+* Install the version of ``cloud-init`` under test.
+* Run :command:`cloud-init clean` on the instance so subsequent boots
+ resemble "out of the box" behaviour.
+* Take a snapshot of the instance to be used as a new image from
+ which new instances can be launched.
+
+Test setup
+==========
+
+Test setup occurs between image setup and test execution. Test setup
+is implemented via one of the ``client`` fixtures. When a ``client`` fixture
+is used, a test instance from which to run tests is launched prior to
+test execution, and then torn down after.
+
+Continuous integration
+======================
+
+A subset of the integration tests are run when a pull request
+is submitted on GitHub. The tests run on these continuous
+integration (CI) runs are given a ``pytest`` mark:
+
+.. code-block:: python
+
+ @pytest.mark.ci
+
+Most new tests should *not* use this mark, so be aware that having a
+successful CI run does not necessarily mean that your test passed
+successfully.
+
+Fixtures
+========
+
+Integration tests rely heavily on fixtures to do initial test setup.
+One or more of these fixtures will be used in almost every integration test.
+
+Details such as the cloud platform or initial image to use are determined
+via what is specified in the `Configuration`_.
+
+``client``
+----------
+
+The ``client`` fixture should be used for most test cases. It ensures:
+
+- All setup performed by `session_cloud`_ and `setup_image`_.
+- `Pytest marks`_ used during instance creation are obtained and applied.
+- The test instance is launched.
+- Test failure status is determined after test execution.
+- Logs are collected (if configured) after test execution.
+- The test instance is torn down after test execution.
+
+``module_client`` and ``class_client`` fixtures also exist for the
+purpose of running multiple tests against a single launched instance.
+They provide the exact same functionality as ``client``, but are
+scoped to the module or class respectively.
+
+``session_cloud``
+-----------------
+
+The ``session_cloud`` session-scoped fixture will provide an
+`IntegrationCloud`_ instance for the currently configured cloud. The fixture
+also ensures that any custom cloud session cleanup is performed.
+
+``setup_image``
+---------------
+
+The ``setup_image`` session-scope fixture will create a new image to launch
+all further cloud instances during this test run. It ensures:
+
+- A cloud instance is launched on the configured platform.
+- The version of ``cloud-init`` under test is installed on the instance.
+- :command:`cloud-init clean --logs` is run on the instance.
+- A snapshot of the instance is taken to be used as the basis for
+ future instance launches.
+- The originally launched instance is torn down.
+- The custom created image is torn down after all tests finish.
+
+Examples
+--------
+A simple test case using the ``client`` fixture:
+
+.. code-block:: python
+
+ USER_DATA = """\
+ #cloud-config
+ bootcmd:
+ - echo 'hello!' > /var/tmp/hello.txt
+ """
+
+
+ @pytest.mark.user_data(USER_DATA)
+ def test_bootcmd(client):
+ log = client.read_from_file("/var/log/cloud-init.log")
+ assert "Shellified 1 commands." in log
+ assert client.execute('cat /var/tmp/hello.txt').strip() == "hello!"
+
+Customizing the launch arguments before launching an instance manually:
+
+.. code-block:: python
+
+ def test_launch(session_cloud: IntegrationCloud, setup_image):
+ with session_cloud.launch(launch_kwargs={"wait": False}) as client:
+ client.instance.wait()
+ assert client.execute("echo hello world").strip() == "hello world"
+
+.. LINKS:
+.. _tests/integration_tests/integration_settings.py: https://github.com/canonical/cloud-init/blob/main/tests/integration_tests/integration_settings.py
+.. _pycloudlib library: https://pycloudlib.readthedocs.io/en/latest/index.html
+.. _first be configured: https://pycloudlib.readthedocs.io/en/latest/configuration.html#configuration
+.. _Pytest marks: https://github.com/canonical/cloud-init/blob/af7eb1deab12c7208853c5d18b55228e0ba29c4d/tests/integration_tests/conftest.py#L220-L224
+.. _IntegrationCloud: https://github.com/canonical/cloud-init/blob/af7eb1deab12c7208853c5d18b55228e0ba29c4d/tests/integration_tests/clouds.py#L102
diff --git a/doc/rtd/development/logging.rst b/doc/rtd/development/logging.rst
new file mode 100644
index 00000000..e99eef8b
--- /dev/null
+++ b/doc/rtd/development/logging.rst
@@ -0,0 +1,278 @@
+.. _logging:
+
+Logging
+*******
+
+``Cloud-init`` supports both local and remote logging configurable through
+multiple configurations:
+
+- Python's built-in logging configuration
+- ``Cloud-init``'s event reporting system
+- The ``cloud-init`` ``rsyslog`` module
+
+Python logging
+==============
+
+``Cloud-init`` uses the Python logging module, and can accept config for this
+module using the standard Python ``fileConfig`` format. ``Cloud-init`` looks
+for config for the logging module under the ``logcfg`` key.
+
+.. note::
+ The logging configuration is not YAML, it is Python ``fileConfig`` format,
+ and is passed through directly to the Python logging module. Please use
+ the correct syntax for a multi-line string in YAML.
+
+By default, ``cloud-init`` uses the logging configuration provided in
+:file:`/etc/cloud/cloud.cfg.d/05_logging.cfg`. The default Python logging
+configuration writes all ``cloud-init`` events with a priority of ``WARNING``
+or higher to console, and writes all events with a level of ``DEBUG`` or
+higher to :file:`/var/log/cloud-init.log` and via :file:`syslog`.
+
+Python's ``fileConfig`` format consists of sections with headings in the
+format ``[title]`` and key value pairs in each section. Configuration for
+Python logging must contain the sections ``[loggers]``, ``[handlers]``, and
+``[formatters]``, which name the entities of their respective types that will
+be defined. The section name for each defined logger, handler and formatter
+will start with its type, followed by an underscore (``_``) and the name of
+the entity. For example, if a logger was specified with the name ``log01``,
+config for the logger would be in the section ``[logger_log01]``.
+
+Logger config entries contain basic logging setup. They may specify a list of
+handlers to send logging events to as well as the lowest priority level of
+events to handle. A logger named ``root`` must be specified and its
+configuration (under ``[logger_root]``) must contain a level and a list of
+handlers. A level entry can be any of the following: ``DEBUG``, ``INFO``,
+``WARNING``, ``ERROR``, ``CRITICAL``, or ``NOTSET``. For the ``root`` logger
+the ``NOTSET`` option will allow all logging events to be recorded.
+
+Each configured handler must specify a class under Python's ``logging``
+package namespace. A handler may specify a message formatter to use, a
+priority level, and arguments for the handler class. Common handlers are
+``StreamHandler``, which handles stream redirects (i.e., logging to
+``stderr``), and ``FileHandler`` which outputs to a log file. The logging
+module also supports logging over net sockets, over http, via smtp, and
+additional complex configurations. For full details about the handlers
+available for Python logging, please see the documentation for
+`python logging handlers`_.
+
+Log messages are formatted using the ``logging.Formatter`` class, which is
+configured using ``formatter`` config entities. A default format of
+``%(message)s`` is given if no formatter configs are specified. Formatter
+config entities accept a format string which supports variable replacements.
+These may also accept a ``datefmt`` string which may be used to configure the
+timestamp used in the log messages. The format variables ``%(asctime)s``,
+``%(levelname)s`` and ``%(message)s`` are commonly used and represent the
+timestamp, the priority level of the event and the event message. For
+additional information on logging formatters see `python logging formatters`_.
+
+.. note::
+ By default, the format string used in the logging formatter are in Python's
+ old style ``%s`` form. The ``str.format()`` and ``string.Template`` styles
+ can also be used by using ``{`` or ``$`` in place of ``%`` by setting the
+ ``style`` parameter in formatter config.
+
+A simple (but functional) Python logging configuration for ``cloud-init`` is
+below. It will log all messages of priority ``DEBUG`` or higher to both
+:file:`stderr` and :file:`/tmp/my.log` using a ``StreamHandler`` and a
+``FileHandler``, using the default format string ``%(message)s``: ::
+
+ logcfg: |
+ [loggers]
+ keys=root,cloudinit
+ [handlers]
+ keys=ch,cf
+ [formatters]
+ keys=
+ [logger_root]
+ level=DEBUG
+ handlers=
+ [logger_cloudinit]
+ level=DEBUG
+ qualname=cloudinit
+ handlers=ch,cf
+ [handler_ch]
+ class=StreamHandler
+ level=DEBUG
+ args=(sys.stderr,)
+ [handler_cf]
+ class=FileHandler
+ level=DEBUG
+ args=('/tmp/my.log',)
+
+For additional information about configuring Python's logging module, please
+see the documentation for `python logging config`_.
+
+.. _logging_command_output:
+
+Command output
+==============
+
+``Cloud-init`` can redirect its :file:`stdout` and :file:`stderr` based on
+config given under the ``output`` config key. The output of any commands run
+by ``cloud-init`` and any user or vendor scripts provided will also be
+included here. The ``output`` key accepts a dictionary for configuration.
+Output files may be specified individually for each stage (``init``,
+``config``, and ``final``), or a single key ``all`` may be used to specify
+output for all stages.
+
+The output for each stage may be specified as a dictionary of ``output`` and
+``error`` keys, for :file:`stdout` and :file:`stderr` respectively, as a tuple
+with :file:`stdout` first and :file:`stderr` second, or as a single string to
+use for both. The strings passed to all of these keys are handled by the
+system shell, so any form of redirection that can be used in bash is valid,
+including piping ``cloud-init``'s output to ``tee``, or ``logger``. If only a
+filename is provided, ``cloud-init`` will append its output to the file as
+though ``>>`` was specified.
+
+By default, ``cloud-init`` loads its output configuration from
+:file:`/etc/cloud/cloud.cfg.d/05_logging.cfg`. The default config directs both
+:file:`stdout` and :file:`stderr` from all ``cloud-init`` stages to
+:file:`/var/log/cloud-init-output.log`. The default config is given as: ::
+
+ output: { all: "| tee -a /var/log/cloud-init-output.log" }
+
+For a more complex example, the following configuration would output the init
+stage to :file:`/var/log/cloud-init.out` and :file:`/var/log/cloud-init.err`,
+for :file:`stdout` and :file:`stderr` respectively, replacing anything that
+was previously there. For the config stage, it would pipe both :file:`stdout`
+and :file:`stderr` through :command:`tee -a /var/log/cloud-config.log`. For
+the final stage it would append the output of :file:`stdout` and
+:file:`stderr` to :file:`/var/log/cloud-final.out` and
+:file:`/var/log/cloud-final.err` respectively. ::
+
+ output:
+ init:
+ output: "> /var/log/cloud-init.out"
+ error: "> /var/log/cloud-init.err"
+ config: "tee -a /var/log/cloud-config.log"
+ final:
+ - ">> /var/log/cloud-final.out"
+ - "/var/log/cloud-final.err"
+
+Event reporting
+===============
+
+``Cloud-init`` contains an eventing system that allows events to be emitted
+to a variety of destinations.
+
+Three configurations are available for reporting events:
+
+- ``webhook``: POST to a web server.
+- ``log``: Write to the ``cloud-init`` log at configurable log level.
+- ``stdout``: Print to :file:`stdout`.
+
+The default configuration is to emit events to the ``cloud-init`` log file
+at ``DEBUG`` level.
+
+Event reporting can be configured using the ``reporting`` key in
+``cloud-config`` user data.
+
+Configuration
+-------------
+
+``webhook``
+^^^^^^^^^^^
+
+.. code-block:: yaml
+
+ reporting:
+ <user-defined name>:
+ type: webhook
+ endpoint: <url>
+ timeout: <timeout in seconds>
+ retries: <number of retries>
+ consumer_key: <OAuth consumer key>
+ token_key: <OAuth token key>
+ token_secret: <OAuth token secret>
+ consumer_secret: <OAuth consumer secret>
+
+``endpoint`` is the only additional required key when specifying
+``type: webhook``.
+
+``log``
+^^^^^^^
+
+.. code-block:: yaml
+
+ reporting:
+ <user-defined name>:
+ type: log
+ level: <DEBUG|INFO|WARN|ERROR|FATAL>
+
+``level`` is optional and defaults to "DEBUG".
+
+``print``
+^^^^^^^^^
+
+.. code-block:: yaml
+
+ reporting:
+ <user-defined name>:
+ type: print
+
+
+Example
+^^^^^^^
+
+The follow example shows configuration for all three sources:
+
+.. code-block:: yaml
+
+ #cloud-config
+ reporting:
+ webserver:
+ type: webhook
+ endpoint: "http://10.0.0.1:55555/asdf"
+ timeout: 5
+ retries: 3
+ consumer_key: <consumer_key>
+ token_key: <token_key>
+ token_secret: <token_secret>
+ consumer_secret: <consumer_secret>
+ info_log:
+ type: log
+ level: WARN
+ stdout:
+ type: print
+
+``rsyslog`` module
+==================
+
+``Cloud-init``'s ``cc_rsyslog`` module allows for fully customizable
+``rsyslog`` configuration under the ``rsyslog`` config key. The simplest way
+to use the ``rsyslog`` module is by specifying remote servers under the
+``remotes`` key in ``rsyslog`` config. The ``remotes`` key takes a dictionary
+where each key represents the name of an ``rsyslog`` server and each value is
+the configuration for that server. The format for server config is:
+
+ - optional filter for log messages (defaults to ``*.*``)
+ - optional leading ``@`` or ``@@``, indicating UDP and TCP respectively
+ (defaults to ``@``, for UDP)
+ - IPv4 or IPv6 hostname or address. IPv6 addresses must be in ``[::1]``
+ format (e.g., ``@[fd00::1]:514``)
+ - optional port number (defaults to ``514``)
+
+For example, to send logging to an ``rsyslog`` server named ``log_serv`` with
+address ``10.0.4.1``, using port number ``514``, over UDP, with all log
+messages enabled one could use either of the following.
+
+With all options specified::
+
+ rsyslog:
+ remotes:
+ log_serv: "*.* @10.0.4.1:514"
+
+With defaults used::
+
+ rsyslog:
+ remotes:
+ log_serv: "10.0.4.1"
+
+
+For more information on ``rsyslog`` configuration, see
+:ref:`our module reference page<mod-rsyslog>`.
+
+.. LINKS:
+.. _python logging config: https://docs.python.org/3/library/logging.config.html#configuration-file-format
+.. _python logging handlers: https://docs.python.org/3/library/logging.handlers.html
+.. _python logging formatters: https://docs.python.org/3/library/logging.html#formatter-objects
diff --git a/doc/rtd/topics/module_creation.rst b/doc/rtd/development/module_creation.rst
index 56cadec4..cb46d7cf 100644
--- a/doc/rtd/topics/module_creation.rst
+++ b/doc/rtd/development/module_creation.rst
@@ -1,14 +1,15 @@
.. _module_creation:
-Module Creation
+Module creation
***************
-Much of cloud-init functionality is provided by :ref:`modules<modules>`.
+Much of ``cloud-init``'s functionality is provided by :ref:`modules<modules>`.
All modules follow a similar layout in order to provide consistent execution
and documentation. Use the example provided here to create a new module.
Example
=======
+
.. code-block:: python
# This file is part of cloud-init. See LICENSE file for license information.
@@ -50,32 +51,33 @@ Example
) -> None:
log.debug(f"Hi from module {name}")
+.. _module_creation-Guidelines:
Guidelines
==========
-* Create a new module in the ``cloudinit/config`` directory with a `cc_`
+* Create a new module in the :file:`cloudinit/config` directory with a ``cc_``
prefix.
* Your module must include a ``handle`` function. The arguments are:
- * ``name``: The module name specified in the configuration
+ * ``name``: The module name specified in the configuration.
* ``cfg``: A configuration object that is the result of the merging of
- cloud-config configuration with any datasource provided configuration.
+ cloud-config configuration with any datasource-provided configuration.
* ``cloud``: A cloud object that can be used to access various datasource
and paths for the given distro and data provided by the various datasource
instance types.
* ``log``: A logger object that can be used to log messages.
* ``args``: An argument list. This is usually empty and is only populated
if the module is called independently from the command line or if the
- module definition in ``/etc/cloud/cloud.cfg[.d]`` has been modified
+ module definition in :file:`/etc/cloud/cloud.cfg[.d]` has been modified
to pass arguments to this module.
* If your module introduces any new cloud-config keys, you must provide a
schema definition in `cloud-init-schema.json`_.
* The ``meta`` variable must exist and be of type `MetaSchema`_.
- * ``id``: The module id. In most cases this will be the filename without
- the `.py` extension.
+ * ``id``: The module ID. In most cases this will be the filename without
+ the ``.py`` extension.
* ``distros``: Defines the list of supported distros. It can contain
any of the values (not keys) defined in the `OSFAMILIES`_ map or
``[ALL_DISTROS]`` if there is no distro restriction.
@@ -84,11 +86,11 @@ Guidelines
* ``PER_ALWAYS``: Runs on every boot.
* ``ONCE``: Runs only on first boot.
* ``PER_INSTANCE``: Runs once per instance. When exactly this happens
- is dependent on the datasource but may triggered anytime there
+ is dependent on the datasource, but may triggered any time there
would be a significant change to the instance metadata. An example
could be an instance being moved to a different subnet.
- * ``activate_by_schema_keys``: (Optional) List of cloud-config keys that will
+ * ``activate_by_schema_keys``: Optional list of cloud-config keys that will
activate this module. When this list not empty, the config module will be
skipped unless one of the ``activate_by_schema_keys`` are present in merged
cloud-config instance-data.
@@ -100,15 +102,15 @@ Guidelines
* ``__doc__ = get_meta_doc(meta)`` is necessary to provide proper module
documentation.
-Module Execution
+Module execution
================
In order for a module to be run, it must be defined in a module run section in
-``/etc/cloud/cloud.cfg`` or ``/etc/cloud/cloud.cfg.d`` on the launched
+:file:`/etc/cloud/cloud.cfg` or :file:`/etc/cloud/cloud.cfg.d` on the launched
instance. The three module sections are
`cloud_init_modules`_, `cloud_config_modules`_, and `cloud_final_modules`_,
-corresponding to the :ref:`topics/boot:Network`, :ref:`topics/boot:Config`,
-and :ref:`topics/boot:Final` boot stages respectively.
+corresponding to the :ref:`Network<boot-Network>`, :ref:`Config<boot-Config>`,
+and :ref:`Final<boot-Final>` boot stages respectively.
Add your module to `cloud.cfg.tmpl`_ under the appropriate module section.
Each module gets run in the order listed, so ensure your module is defined
diff --git a/doc/rtd/topics/testing.rst b/doc/rtd/development/testing.rst
index 4bcbba5b..b1b52467 100644
--- a/doc/rtd/topics/testing.rst
+++ b/doc/rtd/development/testing.rst
@@ -1,102 +1,103 @@
.. _testing:
-*******
Testing
*******
-cloud-init has both unit tests and integration tests. Unit tests can
-be found at ``tests/unittests``. Integration tests can be found at
-``tests/integration_tests``. Documentation specifically for integration
+.. toctree::
+ :maxdepth: 1
+ :hidden:
+
+ integration_tests.rst
+
+``Cloud-init`` has both unit tests and integration tests. Unit tests can
+be found at :file:`tests/unittests`. Integration tests can be found at
+:file:`tests/integration_tests`. Documentation specifically for integration
tests can be found on the :ref:`integration_tests` page, but
the guidelines specified below apply to both types of tests.
-cloud-init uses `pytest`_ to run its tests, and has tests written both
-as ``unittest.TestCase`` sub-classes and as un-subclassed pytest tests.
+``Cloud-init`` uses `pytest`_ to run its tests, and has tests written both
+as ``unittest.TestCase`` sub-classes and as un-subclassed ``pytest`` tests.
Guidelines
==========
The following guidelines should be followed.
-Test Layout
+Test layout
-----------
-* For ease of organisation and greater accessibility for developers not
- familiar with pytest, all cloud-init unit tests must be contained
- within test classes
-
- * Put another way, module-level test functions should not be used
+* For ease of organisation and greater accessibility for developers unfamiliar
+ with ``pytest``, all ``cloud-init`` unit tests must be contained within test
+ classes. In other words, module-level test functions should not be used.
-* As all tests are contained within classes, it is acceptable to mix
- ``TestCase`` test classes and pytest test classes within the same
- test file
+* Since all tests are contained within classes, it is acceptable to mix
+ ``TestCase`` test classes and ``pytest`` test classes within the same
+ test file.
- * These can be easily distinguished by their definition: pytest
- classes will not use inheritance at all (e.g.
+ * These can be easily distinguished by their definition: ``pytest``
+ classes will not use inheritance at all (e.g.,
`TestGetPackageMirrorInfo`_), whereas ``TestCase`` classes will
- subclass (indirectly) from ``TestCase`` (e.g.
- `TestPrependBaseCommands`_)
+ subclass (indirectly) from ``TestCase`` (e.g.,
+ `TestPrependBaseCommands`_).
-* Unit tests and integration tests are located under cloud-init/tests
+* Unit tests and integration tests are located under :file:`cloud-init/tests`.
* For consistency, unit test files should have a matching name and
- directory location under `tests/unittests`
+ directory location under :file:`tests/unittests`.
- * For example: the expected test file for code in
- `cloudinit/path/to/file.py` is
- `tests/unittests/path/to/test_file.py`
+ * E.g., the expected test file for code in :file:`cloudinit/path/to/file.py`
+ is :file:`tests/unittests/path/to/test_file.py`.
-
-``pytest`` Tests
+``pytest`` tests
----------------
-* pytest test classes should use `pytest fixtures`_ to share
- functionality instead of inheritance
+* ``pytest`` test classes should use `pytest fixtures`_ to share
+ functionality instead of inheritance.
-* pytest tests should use bare ``assert`` statements, to take advantage
- of pytest's `assertion introspection`_
+* ``pytest`` tests should use bare ``assert`` statements, to take advantage
+ of ``pytest``'s `assertion introspection`_.
-``pytest`` Version Gotchas
---------------------------
+``pytest`` version "gotchas"
+----------------------------
-As we still support Ubuntu 18.04 (Bionic Beaver), we can only use pytest
-features that are available in v3.3.2. This is an inexhaustive list of
+As we still support Ubuntu 18.04 (Bionic Beaver), we can only use ``pytest``
+features that are available in v3.3.2. This is an inexhaustive list of
ways in which this may catch you out:
-* Only the following built-in fixtures are available [#fixture-list]_:
-
- * ``cache``
- * ``capfd``
- * ``capfdbinary``
- * ``caplog``
- * ``capsys``
- * ``capsysbinary``
- * ``doctest_namespace``
- * ``monkeypatch``
- * ``pytestconfig``
- * ``record_xml_property``
- * ``recwarn``
- * ``tmpdir_factory``
- * ``tmpdir``
-
-Mocking and Assertions
+ * Only the following built-in fixtures are available [#fixture-list]_:
+
+ * ``cache``
+ * ``capfd``
+ * ``capfdbinary``
+ * ``caplog``
+ * ``capsys``
+ * ``capsysbinary``
+ * ``doctest_namespace``
+ * ``monkeypatch``
+ * ``pytestconfig``
+ * ``record_xml_property``
+ * ``recwarn``
+ * ``tmpdir_factory``
+ * ``tmpdir``
+
+Mocking and assertions
----------------------
* Variables/parameter names for ``Mock`` or ``MagicMock`` instances
should start with ``m_`` to clearly distinguish them from non-mock
- variables
-
- * For example, ``m_readurl`` (which would be a mock for ``readurl``)
+ variables. For example, ``m_readurl`` (which would be a mock for
+ ``readurl``).
* The ``assert_*`` methods that are available on ``Mock`` and
``MagicMock`` objects should be avoided, as typos in these method
names may not raise ``AttributeError`` (and so can cause tests to
- silently pass). An important exception: if a ``Mock`` is
- `autospecced`_ then misspelled assertion methods *will* raise an
- ``AttributeError``, so these assertion methods may be used on
- autospecced ``Mock`` objects.
+ silently pass).
+
+ * **An important exception:** if a ``Mock`` is `autospecced`_ then
+ misspelled assertion methods *will* raise an ``AttributeError``, so these
+ assertion methods may be used on autospecced ``Mock`` objects.
- For non-autospecced ``Mock`` s, these substitutions can be used
+* For a non-autospecced ``Mock``, these substitutions can be used
(``m`` is assumed to be a ``Mock``):
* ``m.assert_any_call(*args, **kwargs)`` => ``assert
@@ -118,11 +119,10 @@ Mocking and Assertions
* When there are multiple patch calls in a test file for the module it
is testing, it may be desirable to capture the shared string prefix
- for these patch calls in a module-level variable. If used, such
- variables should be named ``M_PATH`` or, for datasource tests,
- ``DS_PATH``.
+ for these patch calls in a module-level variable. If used, such
+ variables should be named ``M_PATH`` or, for datasource tests, ``DS_PATH``.
-Test Argument Ordering
+Test argument ordering
----------------------
* Test arguments should be ordered as follows:
@@ -131,10 +131,10 @@ Test Argument Ordering
partially applies its generated ``Mock`` object as the first
argument, so these arguments must go first.
* ``pytest.mark.parametrize`` arguments, in the order specified to
- the ``parametrize`` decorator. These arguments are also provided
+ the ``parametrize`` decorator. These arguments are also provided
by a decorator, so it's natural that they sit next to the
``mock.patch`` arguments.
- * Fixture arguments, alphabetically. These are not provided by a
+ * Fixture arguments, alphabetically. These are not provided by a
decorator, so they are last, and their order has no defined
meaning, so we default to alphabetical.
@@ -152,6 +152,7 @@ Test Argument Ordering
in an ubuntu lxd container with python3-pytest installed.
+.. LINKS:
.. _pytest: https://docs.pytest.org/
.. _pytest fixtures: https://docs.pytest.org/en/latest/fixture.html
.. _TestGetPackageMirrorInfo: https://github.com/canonical/cloud-init/blob/42f69f410ab8850c02b1f53dd67c132aa8ef64f5/cloudinit/distros/tests/test_init.py\#L15
diff --git a/doc/rtd/explanation/analyze.rst b/doc/rtd/explanation/analyze.rst
new file mode 100644
index 00000000..93961928
--- /dev/null
+++ b/doc/rtd/explanation/analyze.rst
@@ -0,0 +1,352 @@
+.. _analyze:
+
+Performance
+***********
+
+The :command:`analyze` subcommand was added to ``cloud-init`` to help analyze
+``cloud-init`` boot time performance. It is loosely based on
+``systemd-analyze``, where there are four subcommands:
+
+- :command:`blame`
+- :command:`show`
+- :command:`dump`
+- :command:`boot`
+
+Usage
+=====
+
+The :command:`analyze` command requires one of the four subcommands:
+
+.. code-block:: shell-session
+
+ $ cloud-init analyze blame
+ $ cloud-init analyze show
+ $ cloud-init analyze dump
+ $ cloud-init analyze boot
+
+Availability
+============
+
+The :command:`analyze` subcommand is generally available across all
+distributions, with the exception of Gentoo and FreeBSD.
+
+Subcommands
+===========
+
+:command:`Blame`
+----------------
+
+The :command:`blame` subcommand matches :command:`systemd-analyze blame` where
+it prints, in descending order, the units that took the longest time to run.
+This output is highly useful for examining where ``cloud-init`` is spending
+its time.
+
+.. code-block:: shell-session
+
+ $ cloud-init analyze blame
+
+Example output:
+
+.. code-block::
+
+ -- Boot Record 01 --
+ 00.80300s (init-network/config-growpart)
+ 00.64300s (init-network/config-resizefs)
+ 00.62100s (init-network/config-ssh)
+ 00.57300s (modules-config/config-grub-dpkg)
+ 00.40300s (init-local/search-NoCloud)
+ 00.38200s (init-network/config-users-groups)
+ 00.19800s (modules-config/config-apt-configure)
+ 00.03700s (modules-final/config-keys-to-console)
+ 00.02100s (init-network/config-update_etc_hosts)
+ 00.02100s (init-network/check-cache)
+ 00.00800s (modules-final/config-ssh-authkey-fingerprints)
+ 00.00800s (init-network/consume-vendor-data)
+ 00.00600s (modules-config/config-timezone)
+ 00.00500s (modules-final/config-final-message)
+ 00.00400s (init-network/consume-user-data)
+ 00.00400s (init-network/config-mounts)
+ 00.00400s (init-network/config-disk_setup)
+ 00.00400s (init-network/config-bootcmd)
+ 00.00400s (init-network/activate-datasource)
+ 00.00300s (init-network/config-update_hostname)
+ 00.00300s (init-network/config-set_hostname)
+ 00.00200s (modules-final/config-snappy)
+ 00.00200s (init-network/config-rsyslog)
+ 00.00200s (init-network/config-ca-certs)
+ 00.00200s (init-local/check-cache)
+ 00.00100s (modules-final/config-scripts-vendor)
+ 00.00100s (modules-final/config-scripts-per-once)
+ 00.00100s (modules-final/config-salt-minion)
+ 00.00100s (modules-final/config-rightscale_userdata)
+ 00.00100s (modules-final/config-phone-home)
+ 00.00100s (modules-final/config-package-update-upgrade-install)
+ 00.00100s (modules-final/config-fan)
+ 00.00100s (modules-config/config-ubuntu-advantage)
+ 00.00100s (modules-config/config-ssh-import-id)
+ 00.00100s (modules-config/config-snap)
+ 00.00100s (modules-config/config-set-passwords)
+ 00.00100s (modules-config/config-runcmd)
+ 00.00100s (modules-config/config-locale)
+ 00.00100s (modules-config/config-byobu)
+ 00.00100s (modules-config/config-apt-pipelining)
+ 00.00100s (init-network/config-write-files)
+ 00.00100s (init-network/config-seed_random)
+ 00.00100s (init-network/config-migrator)
+ 00.00000s (modules-final/config-ubuntu-drivers)
+ 00.00000s (modules-final/config-scripts-user)
+ 00.00000s (modules-final/config-scripts-per-instance)
+ 00.00000s (modules-final/config-scripts-per-boot)
+ 00.00000s (modules-final/config-puppet)
+ 00.00000s (modules-final/config-power-state-change)
+ 00.00000s (modules-final/config-mcollective)
+ 00.00000s (modules-final/config-lxd)
+ 00.00000s (modules-final/config-landscape)
+ 00.00000s (modules-final/config-chef)
+ 00.00000s (modules-config/config-snap_config)
+ 00.00000s (modules-config/config-ntp)
+ 00.00000s (modules-config/config-disable-ec2-metadata)
+ 00.00000s (init-network/setup-datasource)
+
+ 1 boot records analyzed
+
+:command:`Show`
+---------------
+
+The :command:`show` subcommand is similar to
+:command:`systemd-analyze critical-chain` which prints a list of units, the
+time they started and how long they took. ``Cloud-init`` has five
+:ref:`boot stages<boot_stages>`, and within each stage a number of modules may
+run depending on configuration. :command:`cloudinit-analyze show` will, for
+each boot, print this information and a summary of the total time.
+
+The following is an abbreviated example of the :command:`show` subcommand:
+
+.. code-block:: shell-session
+
+ $ cloud-init analyze show
+
+Example output:
+
+.. code-block:: shell-session
+
+ -- Boot Record 01 --
+ The total time elapsed since completing an event is printed after the "@" character.
+ The time the event takes is printed after the "+" character.
+
+ Starting stage: init-local
+ |``->no cache found @00.01700s +00.00200s
+ |`->found local data from DataSourceNoCloud @00.11000s +00.40300s
+ Finished stage: (init-local) 00.94200 seconds
+
+ Starting stage: init-network
+ |`->restored from cache with run check: DataSourceNoCloud [seed=/dev/sr0][dsmode=net] @04.79500s +00.02100s
+ |`->setting up datasource @04.88900s +00.00000s
+ |`->reading and applying user-data @04.90100s +00.00400s
+ |`->reading and applying vendor-data @04.90500s +00.00800s
+ |`->activating datasource @04.95200s +00.00400s
+ Finished stage: (init-network) 02.72100 seconds
+
+ Starting stage: modules-config
+ |`->config-snap ran successfully @15.43100s +00.00100s
+ ...
+ |`->config-runcmd ran successfully @16.22300s +00.00100s
+ |`->config-byobu ran successfully @16.23400s +00.00100s
+ Finished stage: (modules-config) 00.83500 seconds
+
+ Starting stage: modules-final
+ |`->config-snappy ran successfully @16.87400s +00.00200s
+ |`->config-package-update-upgrade-install ran successfully @16.87600s +00.00100s
+ ...
+ |`->config-final-message ran successfully @16.93700s +00.00500s
+ |`->config-power-state-change ran successfully @16.94300s +00.00000s
+ Finished stage: (modules-final) 00.10300 seconds
+
+ Total Time: 4.60100 seconds
+
+ 1 boot records analyzed
+
+If additional boot records are detected then they are printed out from oldest
+to newest.
+
+:command:`Dump`
+---------------
+
+The :command:`dump` subcommand simply dumps the ``cloud-init`` logs that the
+:command:`analyze` module is performing its analysis on, and returns a list of
+dictionaries that can be consumed for other reporting needs. Each element in
+the list is a boot entry.
+
+.. code-block:: shell-session
+
+ $ cloud-init analyze dump
+
+Example output:
+
+.. code-block::
+
+ [
+ {
+ "description": "starting search for local datasources",
+ "event_type": "start",
+ "name": "init-local",
+ "origin": "cloudinit",
+ "timestamp": 1567057578.037
+ },
+ {
+ "description": "attempting to read from cache [check]",
+ "event_type": "start",
+ "name": "init-local/check-cache",
+ "origin": "cloudinit",
+ "timestamp": 1567057578.054
+ },
+ {
+ "description": "no cache found",
+ "event_type": "finish",
+ "name": "init-local/check-cache",
+ "origin": "cloudinit",
+ "result": "SUCCESS",
+ "timestamp": 1567057578.056
+ },
+ {
+ "description": "searching for local data from DataSourceNoCloud",
+ "event_type": "start",
+ "name": "init-local/search-NoCloud",
+ "origin": "cloudinit",
+ "timestamp": 1567057578.147
+ },
+ {
+ "description": "found local data from DataSourceNoCloud",
+ "event_type": "finish",
+ "name": "init-local/search-NoCloud",
+ "origin": "cloudinit",
+ "result": "SUCCESS",
+ "timestamp": 1567057578.55
+ },
+ {
+ "description": "searching for local datasources",
+ "event_type": "finish",
+ "name": "init-local",
+ "origin": "cloudinit",
+ "result": "SUCCESS",
+ "timestamp": 1567057578.979
+ },
+ {
+ "description": "searching for network datasources",
+ "event_type": "start",
+ "name": "init-network",
+ "origin": "cloudinit",
+ "timestamp": 1567057582.814
+ },
+ {
+ "description": "attempting to read from cache [trust]",
+ "event_type": "start",
+ "name": "init-network/check-cache",
+ "origin": "cloudinit",
+ "timestamp": 1567057582.832
+ },
+ ...
+ {
+ "description": "config-power-state-change ran successfully",
+ "event_type": "finish",
+ "name": "modules-final/config-power-state-change",
+ "origin": "cloudinit",
+ "result": "SUCCESS",
+ "timestamp": 1567057594.98
+ },
+ {
+ "description": "running modules for final",
+ "event_type": "finish",
+ "name": "modules-final",
+ "origin": "cloudinit",
+ "result": "SUCCESS",
+ "timestamp": 1567057594.982
+ }
+ ]
+
+
+:command:`Boot`
+---------------
+
+The :command:`boot` subcommand prints out kernel-related timestamps that are
+not included in any of the ``cloud-init`` logs. There are three different
+timestamps that are presented to the user:
+
+- ``kernel start``
+- ``kernel finish boot``
+- ``cloud-init start``
+
+This was added for additional clarity into the boot process that
+``cloud-init`` does not have control over, to aid in debugging performance
+issues related to ``cloud-init`` startup, and tracking regression.
+
+.. code-block:: shell-session
+
+ $ cloud-init analyze boot
+
+Example output:
+
+.. code-block::
+
+ -- Most Recent Boot Record --
+ Kernel Started at: 2019-08-29 01:35:37.753790
+ Kernel ended boot at: 2019-08-29 01:35:38.807407
+ Kernel time to boot (seconds): 1.053617000579834
+ Cloud-init activated by systemd at: 2019-08-29 01:35:43.992460
+ Time between Kernel end boot and Cloud-init activation (seconds): 5.185053110122681
+ Cloud-init start: 2019-08-29 08:35:45.867000
+ successful
+
+Timestamp gathering
+-------------------
+
+The following boot-related timestamps are gathered on demand when
+:command:`cloud-init analyze boot` runs:
+
+ - Kernel startup gathered from system uptime
+ - Kernel finishes initialization from systemd
+ ``UserSpaceMonotonicTimestamp`` property
+ - ``Cloud-init`` activation from the property ``InactiveExitTimestamp``
+ of the ``cloud-init`` local systemd unit
+
+In order to gather the necessary timestamps using systemd, running the
+following command will gather the ``UserspaceTimestamp``:
+
+.. code-block:: shell-session
+
+ $ systemctl show -p UserspaceTimestampMonotonic
+
+Example output:
+
+.. code-block::
+
+ UserspaceTimestampMonotonic=989279
+
+The ``UserspaceTimestamp`` tracks when the init system starts, which is used
+as an indicator of the kernel finishing initialisation.
+
+Running the following command will gather the ``InactiveExitTimestamp``:
+
+.. code-block:: shell-session
+
+ $ systemctl show cloud-init-local -p InactiveExitTimestampMonotonic
+
+Example output:
+
+.. code-block::
+
+ InactiveExitTimestampMonotonic=4493126
+
+The ``InactiveExitTimestamp`` tracks when a particular systemd unit
+transitions from the `Inactive` to `Active` state, which can be used to mark
+the beginning of systemd's activation of ``cloud-init``.
+
+Currently this only works for distros that use systemd as the init process.
+We will be expanding support for other distros in the future and this document
+will be updated accordingly.
+
+If systemd is not present on the system, ``dmesg`` is used to attempt to find
+an event that logs the beginning of the init system. However, with this method
+only the first two timestamps are able to be found; ``dmesg`` does not monitor
+userspace processes, so no ``cloud-init`` start timestamps are emitted --
+unlike when using systemd.
diff --git a/doc/rtd/explanation/boot.rst b/doc/rtd/explanation/boot.rst
new file mode 100644
index 00000000..42ccbc87
--- /dev/null
+++ b/doc/rtd/explanation/boot.rst
@@ -0,0 +1,263 @@
+.. _boot_stages:
+
+Boot stages
+***********
+
+To be able to provide the functionality that it does, ``cloud-init`` must be
+integrated into the boot in a fairly controlled way. There are five
+stages to boot:
+
+1. Generator
+2. Local
+3. Network
+4. Config
+5. Final
+
+.. _boot-Generator:
+
+Generator
+=========
+
+When booting under ``systemd``, a `generator`_ will run that determines if
+``cloud-init.target`` should be included in the boot goals. By default, this
+generator will enable ``cloud-init``. It will not enable ``cloud-init``
+if either:
+
+- The file :file:`/etc/cloud/cloud-init.disabled` exists, or
+- The kernel command line as found in :file:`/proc/cmdline` contains
+ ``cloud-init=disabled``. When running in a container, the kernel command
+ line is not honored, but ``cloud-init`` will read an environment
+ variable named ``KERNEL_CMDLINE`` in its place.
+
+.. note::
+ These mechanisms for disabling ``cloud-init`` at runtime currently only
+ exist in ``systemd``.
+
+.. _boot-Local:
+
+Local
+=====
+
++------------------+----------------------------------------------------------+
+| systemd service | ``cloud-init-local.service`` |
++---------+--------+----------------------------------------------------------+
+| runs | as soon as possible with ``/`` mounted read-write |
++---------+--------+----------------------------------------------------------+
+| blocks | as much of boot as possible, *must* block network |
++---------+--------+----------------------------------------------------------+
+| modules | none |
++---------+--------+----------------------------------------------------------+
+
+The purpose of the local stage is to:
+
+ - Locate "local" data sources, and
+ - Apply networking configuration to the system (including "fallback").
+
+In most cases, this stage does not do much more than that. It finds the
+datasource and determines the network configuration to be used. That
+network configuration can come from:
+
+- **datasource**: Cloud-provided network configuration via metadata.
+- **fallback**: ``Cloud-init``'s fallback networking consists of rendering
+ the equivalent to ``dhcp on eth0``, which was historically the most popular
+ mechanism for network configuration of a guest.
+- **none**: Network configuration can be disabled by writing the file
+ :file:`/etc/cloud/cloud.cfg` with the content:
+ ``network: {config: disabled}``.
+
+If this is an instance's first boot, then the selected network configuration
+is rendered. This includes clearing of all previous (stale) configuration
+including persistent device naming with old MAC addresses.
+
+This stage must block network bring-up or any stale configuration that might
+have already been applied. Otherwise, that could have negative effects such
+as DHCP hooks or broadcast of an old hostname. It would also put the system
+in an odd state to recover from, as it may then have to restart network
+devices.
+
+``Cloud-init`` then exits and expects for the continued boot of the operating
+system to bring network configuration up as configured.
+
+.. note::
+ In the past, local datasources have been only those that were available
+ without network (such as 'ConfigDrive'). However, as seen in the recent
+ additions to the :ref:`DigitalOcean datasource<datasource_digital_ocean>`,
+ even data sources that require a network can operate at this stage.
+
+.. _boot-Network:
+
+Network
+=======
+
++------------------+----------------------------------------------------------+
+| systemd service | ``cloud-init.service`` |
++---------+--------+----------------------------------------------------------+
+| runs | after local stage and configured networking is up |
++---------+--------+----------------------------------------------------------+
+| blocks | as much of remaining boot as possible |
++---------+--------+----------------------------------------------------------+
+| modules | *cloud_init_modules* in ``/etc/cloud/cloud.cfg`` |
++---------+--------+----------------------------------------------------------+
+
+This stage requires all configured networking to be online, as it will fully
+process any user data that is found. Here, processing means it will:
+
+- retrieve any ``#include`` or ``#include-once`` (recursively) including
+ http,
+- decompress any compressed content, and
+- run any part-handler found.
+
+This stage runs the ``disk_setup`` and ``mounts`` modules which may partition
+and format disks and configure mount points (such as in :file:`/etc/fstab`).
+Those modules cannot run earlier as they may receive configuration input
+from sources only available via the network. For example, a user may have
+provided user data in a network resource that describes how local mounts
+should be done.
+
+On some clouds, such as Azure, this stage will create filesystems to be
+mounted, including ones that have stale (previous instance) references in
+:file:`/etc/fstab`. As such, entries in :file:`/etc/fstab` other than those
+necessary for cloud-init to run should not be done until after this stage.
+
+A part-handler will run at this stage, as will boothooks including
+cloud-config ``bootcmd``. The user of this functionality has to be aware
+that the system is in the process of booting when their code runs.
+
+.. _boot-Config:
+
+Config
+======
+
++------------------+----------------------------------------------------------+
+| systemd service | ``cloud-config.service`` |
++---------+--------+----------------------------------------------------------+
+| runs | after network |
++---------+--------+----------------------------------------------------------+
+| blocks | nothing |
++---------+--------+----------------------------------------------------------+
+| modules | *cloud_config_modules* in ``/etc/cloud/cloud.cfg`` |
++---------+--------+----------------------------------------------------------+
+
+This stage runs config modules only. Modules that do not really have an
+effect on other stages of boot are run here, including ``runcmd``.
+
+.. _boot-Final:
+
+Final
+=====
+
++------------------+----------------------------------------------------------+
+| systemd service | ``cloud-final.service`` |
++---------+--------+----------------------------------------------------------+
+| runs | as final part of boot (traditional "rc.local") |
++---------+--------+----------------------------------------------------------+
+| blocks | nothing |
++---------+--------+----------------------------------------------------------+
+| modules | *cloud_final_modules* in ``/etc/cloud/cloud.cfg`` |
++---------+--------+----------------------------------------------------------+
+
+This stage runs as late in boot as possible. Any scripts that a user is
+accustomed to running after logging into a system should run correctly here.
+Things that run here include:
+
+- package installations,
+- configuration management plugins (Ansible, Puppet, Chef, salt-minion), and
+- user-defined scripts (i.e., shell scripts passed as user data).
+
+For scripts external to ``cloud-init`` looking to wait until ``cloud-init`` is
+finished, the :command:`cloud-init status --wait` subcommand can help block
+external scripts until ``cloud-init`` is done without having to write your own
+``systemd`` units dependency chains. See :ref:`cli_status` for more info.
+
+.. _boot-First_boot_determination:
+
+First boot determination
+========================
+
+``Cloud-init`` has to determine whether or not the current boot is the first
+boot of a new instance, so that it applies the appropriate configuration. On
+an instance's first boot, it should run all "per-instance" configuration,
+whereas on a subsequent boot it should run only "per-boot" configuration. This
+section describes how ``cloud-init`` performs this determination, as well as
+why it is necessary.
+
+When it runs, ``cloud-init`` stores a cache of its internal state for use
+across stages and boots.
+
+If this cache is present, then ``cloud-init`` has run on this system
+before [#not-present]_. There are two cases where this could occur. Most
+commonly, the instance has been rebooted, and this is a second/subsequent
+boot. Alternatively, the filesystem has been attached to a *new* instance,
+and this is the instance's first boot. The most obvious case where this
+happens is when an instance is launched from an image captured from a
+launched instance.
+
+By default, ``cloud-init`` attempts to determine which case it is running
+in by checking the instance ID in the cache against the instance ID it
+determines at runtime. If they do not match, then this is an instance's
+first boot; otherwise, it's a subsequent boot. Internally, ``cloud-init``
+refers to this behaviour as ``check``.
+
+This behaviour is required for images captured from launched instances to
+behave correctly, and so is the default that generic cloud images ship with.
+However, there are cases where it can cause problems [#problems]_. For these
+cases, ``cloud-init`` has support for modifying its behaviour to trust the
+instance ID that is present in the system unconditionally. This means that
+``cloud-init`` will never detect a new instance when the cache is present,
+and it follows that the only way to cause ``cloud-init`` to detect a new
+instance (and therefore its first boot) is to manually remove
+``cloud-init``'s cache. Internally, this behaviour is referred to as
+``trust``.
+
+To configure which of these behaviours to use, ``cloud-init`` exposes the
+``manual_cache_clean`` configuration option. When ``false`` (the default),
+``cloud-init`` will ``check`` and clean the cache if the instance IDs do
+not match (this is the default, as discussed above). When ``true``,
+``cloud-init`` will ``trust`` the existing cache (and therefore not clean it).
+
+Manual cache cleaning
+=====================
+
+``Cloud-init`` ships a command for manually cleaning the cache:
+:command:`cloud-init clean`. See :ref:`cli_clean`'s documentation for further
+details.
+
+Reverting ``manual_cache_clean`` setting
+----------------------------------------
+
+Currently there is no support for switching an instance that is launched with
+``manual_cache_clean: true`` from ``trust`` behaviour to ``check`` behaviour,
+other than manually cleaning the cache.
+
+.. warning:: If you want to capture an instance that is currently in ``trust``
+ mode as an image for launching other instances, you **must** manually clean
+ the cache. If you do not do so, then instances launched from the captured
+ image will all detect their first boot as a subsequent boot of the captured
+ instance, and will not apply any per-instance configuration.
+
+ This is a functional issue, but also a potential security one:
+ ``cloud-init`` is responsible for rotating SSH host keys on first boot,
+ and this will not happen on these instances.
+
+.. [#not-present] It follows that if this cache is not present,
+ ``cloud-init`` has not run on this system before, so this is
+ unambiguously this instance's first boot.
+
+.. [#problems] A couple of ways in which this strict reliance on the presence
+ of a datasource has been observed to cause problems:
+
+ - If a cloud's metadata service is flaky and ``cloud-init`` cannot
+ obtain the instance ID locally on that platform, ``cloud-init``'s
+ instance ID determination will sometimes fail to determine the current
+ instance ID, which makes it impossible to determine if this is an
+ instance's first or subsequent boot (`#1885527`_).
+ - If ``cloud-init`` is used to provision a physical appliance or device
+ and an attacker can present a datasource to the device with a different
+ instance ID, then ``cloud-init``'s default behaviour will detect this as
+ an instance's first boot and reset the device using the attacker's
+ configuration (this has been observed with the
+ :ref:`NoCloud datasource<datasource_nocloud>` in `#1879530`_).
+
+.. _generator: https://www.freedesktop.org/software/systemd/man/systemd.generator.html
+.. _#1885527: https://bugs.launchpad.net/ubuntu/+source/cloud-init/+bug/1885527
+.. _#1879530: https://bugs.launchpad.net/ubuntu/+source/cloud-init/+bug/1879530
diff --git a/doc/rtd/explanation/configuration.rst b/doc/rtd/explanation/configuration.rst
new file mode 100644
index 00000000..456ded97
--- /dev/null
+++ b/doc/rtd/explanation/configuration.rst
@@ -0,0 +1,81 @@
+.. _configuration:
+
+Configuration sources
+*********************
+
+Internally, ``cloud-init`` builds a single configuration that is then
+referenced throughout the life of ``cloud-init``. The configuration is built
+from multiple sources such that if a key is defined in multiple sources, the
+higher priority source overwrites the lower priority source.
+
+Base configuration
+==================
+
+From lowest priority to highest, configuration sources are:
+
+- **Hardcoded config**: Config_ that lives within the source of ``cloud-init``
+ and cannot be changed.
+- **Configuration directory**: Anything defined in :file:`/etc/cloud/cloud.cfg`
+ and :file:`/etc/cloud/cloud.cfg.d`.
+- **Runtime config**: Anything defined in :file:`/run/cloud-init/cloud.cfg`.
+- **Kernel command line**: On the kernel command line, anything found between
+ ``cc:`` and ``end_cc`` will be interpreted as cloud-config user data.
+
+These four sources make up the base configuration.
+
+Vendor and user data
+====================
+
+Added to the base configuration are :ref:`vendor data<vendordata>` and
+:ref:`user data<user_data_formats>` which are both provided by the datasource.
+
+These get fetched from the datasource and are defined at instance launch.
+
+.. note::
+ While much of what is defined in the base configuration can be overridden by
+ vendor data and user data, base configuration sources do not conform to
+ :ref:`#cloud-config<user_data_formats>`.
+
+Network configuration
+=====================
+
+Network configuration happens independently from other ``cloud-init``
+configuration. See :ref:`network configuration documentation<network_config>`
+for more information.
+
+Specifying configuration
+==========================
+
+End users
+---------
+
+Pass :ref:`user data<user_data_formats>` to the cloud provider.
+Every platform supporting ``cloud-init`` will provide a method of supplying
+user data. If you're unsure how to do this, reference the documentation
+provided by the cloud platform you're on. Additionally, there may be
+related ``cloud-init`` documentation in the :ref:`datasource<datasources>`
+section.
+
+Once an instance has been initialised, the user data may not be edited.
+It is sourced directly from the cloud, so even if you find a local file
+that contains user data, it will likely be overwritten in the next boot.
+
+Distro providers
+----------------
+
+Modify the base config. This often involves submitting a PR to modify
+the base `cloud.cfg template`_, which is used to customise
+:file:`/etc/cloud/cloud.cfg` per distro. Additionally, a file can be added to
+:file:`/etc/cloud/cloud.cfg.d` to override a piece of the base configuration.
+
+Cloud providers
+---------------
+
+Pass vendor data. This is the preferred method for clouds to provide
+their own customisation. In some cases, it may make sense to modify the
+base config in the same manner as distro providers on cloud-supported
+images.
+
+
+.. _Config: https://github.com/canonical/cloud-init/blob/b861ea8a5e1fd0eb33096f60f54eeff42d80d3bd/cloudinit/settings.py#L22
+.. _cloud.cfg template: https://github.com/canonical/cloud-init/blob/main/config/cloud.cfg.tmpl
diff --git a/doc/rtd/explanation/events.rst b/doc/rtd/explanation/events.rst
new file mode 100644
index 00000000..0767d025
--- /dev/null
+++ b/doc/rtd/explanation/events.rst
@@ -0,0 +1,95 @@
+.. _events:
+
+Events and updates
+******************
+
+Events
+======
+
+``Cloud-init`` will fetch and apply cloud and user data configuration
+upon several event types. The two most common events for ``cloud-init``
+are when an instance first boots and any subsequent boot thereafter (reboot).
+In addition to boot events, ``cloud-init`` users and vendors are interested
+in when devices are added. ``Cloud-init`` currently supports the following
+event types:
+
+- ``BOOT_NEW_INSTANCE``: New instance first boot.
+- ``BOOT``: Any system boot other than ``BOOT_NEW_INSTANCE``.
+- ``BOOT_LEGACY``: Similar to ``BOOT``, but applies networking config twice
+ each boot: once during the :ref:`Local stage<boot-Local>`, then again in the
+ :ref:`Network stage<boot-Network>`. As this behaviour was previously the
+ default behaviour, this option exists to prevent regressing such behaviour.
+- ``HOTPLUG``: Dynamic add of a system device.
+
+Future work will likely include infrastructure and support for the following
+events:
+
+- ``METADATA_CHANGE``: An instance's metadata has changed.
+- ``USER_REQUEST``: Directed request to update.
+
+Datasource event support
+========================
+
+All :ref:`datasources<datasources>` support the ``BOOT_NEW_INSTANCE`` event
+by default. Each datasource will declare a set of these events that it is
+capable of handling. Datasources may not support all event types. In some
+cases a system may be configured to allow a particular event but may be
+running on a platform whose datasource cannot support the event.
+
+Configuring event updates
+=========================
+
+Update configuration may be specified via user data, which can be used to
+enable or disable handling of specific events. This configuration will be
+honored as long as the events are supported by the datasource. However,
+configuration will always be applied at first boot, regardless of the user
+data specified.
+
+Updates
+-------
+
+Update policy configuration defines which events are allowed to be handled.
+This is separate from whether a particular platform or datasource has the
+capability for such events.
+
+``scope``: *<name of the* ``scope`` *for event policy>*
+ The ``scope`` value is a string which defines which domain the event occurs
+ under. Currently, the only known ``scope`` is ``network``, though more
+ ``scopes`` may be added in the future. ``Scopes`` are defined by convention
+ but arbitrary values can be used.
+
+``when``: *<list of events to handle for a particular* ``scope`` *>*
+ Each ``scope`` requires a ``when`` element to specify which events
+ are to allowed to be handled.
+
+Hotplug
+=======
+
+When the ``hotplug`` event is supported by the datasource and configured in
+user data, ``cloud-init`` will respond to the addition or removal of network
+interfaces to the system. In addition to fetching and updating the system
+metadata, ``cloud-init`` will also bring up/down the newly added interface.
+
+.. warning::
+ Due to its use of ``systemd`` sockets, ``hotplug`` functionality is
+ currently incompatible with SELinux. This issue is being `tracked
+ on Launchpad`_. Additionally, ``hotplug`` support is considered
+ experimental for non-Debian-based systems.
+
+Example
+=======
+
+Apply network config every boot
+-------------------------------
+
+On every boot, apply network configuration found in the datasource.
+
+.. code-block:: shell-session
+
+ # apply network config on every boot
+ updates:
+ network:
+ when: ['boot']
+
+.. _Cloud-init: https://launchpad.net/cloud-init
+.. _tracked on Launchpad: https://bugs.launchpad.net/cloud-init/+bug/1936229
diff --git a/doc/rtd/explanation/format.rst b/doc/rtd/explanation/format.rst
new file mode 100644
index 00000000..1e9c9307
--- /dev/null
+++ b/doc/rtd/explanation/format.rst
@@ -0,0 +1,224 @@
+.. _user_data_formats:
+
+User data formats
+*****************
+
+User data that will be acted upon by ``cloud-init`` must be in one of the
+following types.
+
+Cloud config data
+=================
+
+Cloud-config is the simplest way to accomplish some things via user data.
+Using cloud-config syntax, the user can specify certain things in a
+human-friendly format.
+
+These things include:
+
+- ``apt upgrade`` should be run on first boot
+- a different ``apt`` mirror should be used
+- additional ``apt`` sources should be added
+- certain SSH keys should be imported
+- *and many more...*
+
+.. note::
+ This file must be valid YAML syntax.
+
+See the :ref:`yaml_examples` section for a commented set of examples of
+supported cloud config formats.
+
+Begins with: ``#cloud-config`` or ``Content-Type: text/cloud-config`` when
+using a MIME archive.
+
+.. note::
+ New in ``cloud-init`` v. 18.4: Cloud config data can also render cloud
+ instance metadata variables using jinja templating. See
+ :ref:`instance_metadata` for more information.
+
+.. _user_data_script:
+
+User data script
+================
+
+Typically used by those who just want to execute a shell script.
+
+Begins with: ``#!`` or ``Content-Type: text/x-shellscript`` when using a MIME
+archive.
+
+User data scripts can optionally render cloud instance metadata variables using
+jinja templating. See :ref:`instance_metadata` for more information.
+
+Example script
+--------------
+
+Create a script file :file:`myscript.sh` that contains the following:
+
+.. code-block::
+
+ #!/bin/sh
+ echo "Hello World. The time is now $(date -R)!" | tee /root/output.txt
+
+Now run:
+
+.. code-block:: shell-session
+
+ $ euca-run-instances --key mykey --user-data-file myscript.sh ami-a07d95c9
+
+Kernel command line
+===================
+
+When using the NoCloud datasource, users can pass user data via the kernel
+command line parameters. See the :ref:`NoCloud datasource<datasource_nocloud>`
+and :ref:`kernel_cmdline` documentation for more details.
+
+Gzip compressed content
+=======================
+
+Content found to be gzip compressed will be uncompressed.
+The uncompressed data will then be used as if it were not compressed.
+This is typically useful because user data is limited to ~16384 [#]_ bytes.
+
+MIME multi-part archive
+=======================
+
+This list of rules is applied to each part of this multi-part file.
+Using a MIME multi-part file, the user can specify more than one type of data.
+
+For example, both a user data script and a cloud-config type could be
+specified.
+
+Supported content-types are listed from the ``cloud-init`` subcommand
+:command:`make-mime`:
+
+.. code-block:: shell-session
+
+ $ cloud-init devel make-mime --list-types
+
+Example output:
+
+.. code-block::
+
+ cloud-boothook
+ cloud-config
+ cloud-config-archive
+ cloud-config-jsonp
+ jinja2
+ part-handler
+ x-include-once-url
+ x-include-url
+ x-shellscript
+ x-shellscript-per-boot
+ x-shellscript-per-instance
+ x-shellscript-per-once
+
+Helper subcommand to generate MIME messages
+-------------------------------------------
+
+The ``cloud-init`` `make-mime`_ subcommand can also generate MIME multi-part
+files.
+
+The :command:`make-mime` subcommand takes pairs of (filename, "text/" mime
+subtype) separated by a colon (e.g., ``config.yaml:cloud-config``) and emits a
+MIME multipart message to :file:`stdout`.
+
+Examples
+--------
+
+Create user data containing both a cloud-config (:file:`config.yaml`)
+and a shell script (:file:`script.sh`)
+
+.. code-block:: shell-session
+
+ $ cloud-init devel make-mime -a config.yaml:cloud-config -a script.sh:x-shellscript > userdata
+
+Create user data containing 3 shell scripts:
+
+- :file:`always.sh` - run every boot
+- :file:`instance.sh` - run once per instance
+- :file:`once.sh` - run once
+
+.. code-block:: shell-session
+
+ $ cloud-init devel make-mime -a always.sh:x-shellscript-per-boot -a instance.sh:x-shellscript-per-instance -a once.sh:x-shellscript-per-once
+
+``include`` file
+================
+
+This content is an :file:`include` file.
+
+The file contains a list of URLs, one per line. Each of the URLs will be read
+and their content will be passed through this same set of rules, i.e., the
+content read from the URL can be gzipped, MIME multi-part, or plain text. If
+an error occurs reading a file the remaining files will not be read.
+
+Begins with: ``#include`` or ``Content-Type: text/x-include-url`` when using
+a MIME archive.
+
+``cloud-boothook``
+==================
+
+This content is `boothook` data. It is stored in a file under
+:file:`/var/lib/cloud` and executed immediately. This is the earliest `hook`
+available. Note, that there is no mechanism provided for running only once. The
+`boothook` must take care of this itself.
+
+It is provided with the instance id in the environment variable
+``INSTANCE_ID``. This could be made use of to provide a 'once-per-instance'
+type of functionality.
+
+Begins with: ``#cloud-boothook`` or ``Content-Type: text/cloud-boothook`` when
+using a MIME archive.
+
+Part-handler
+============
+
+This is a `part-handler`: It contains custom code for either supporting new
+mime-types in multi-part user data, or overriding the existing handlers for
+supported mime-types. It will be written to a file in
+:file:`/var/lib/cloud/data` based on its filename (which is generated).
+
+This must be Python code that contains a ``list_types`` function and a
+``handle_part`` function. Once the section is read the ``list_types`` method
+will be called. It must return a list of mime-types that this `part-handler`
+handles. Since MIME parts are processed in order, a `part-handler` part
+must precede any parts with mime-types it is expected to handle in the same
+user data.
+
+The ``handle_part`` function must be defined like:
+
+.. code-block:: python
+
+ def handle_part(data, ctype, filename, payload):
+ # data = the cloudinit object
+ # ctype = "__begin__", "__end__", or the mime-type of the part that is being handled.
+ # filename = the filename of the part (or a generated filename if none is present in mime data)
+ # payload = the parts' content
+
+``Cloud-init`` will then call the ``handle_part`` function once before it
+handles any parts, once per part received, and once after all parts have been
+handled. The ``'__begin__'`` and ``'__end__'`` sentinels allow the part
+handler to do initialisation or teardown before or after receiving any parts.
+
+Begins with: ``#part-handler`` or ``Content-Type: text/part-handler`` when
+using a MIME archive.
+
+Example
+-------
+
+.. literalinclude:: ../../examples/part-handler.txt
+ :language: python
+ :linenos:
+
+Also, `this blog post`_ offers another example for more advanced usage.
+
+Disabling user data
+===================
+
+``Cloud-init`` can be configured to ignore any user data provided to instance.
+This allows custom images to prevent users from accidentally breaking closed
+appliances. Setting ``allow_userdata: false`` in the configuration will disable
+``cloud-init`` from processing user data.
+
+.. _make-mime: https://github.com/canonical/cloud-init/blob/main/cloudinit/cmd/devel/make_mime.py
+.. [#] See your cloud provider for applicable user-data size limitations...
+.. _this blog post: http://foss-boss.blogspot.com/2011/01/advanced-cloud-init-custom-handlers.html
diff --git a/doc/rtd/explanation/index.rst b/doc/rtd/explanation/index.rst
new file mode 100644
index 00000000..c6114096
--- /dev/null
+++ b/doc/rtd/explanation/index.rst
@@ -0,0 +1,20 @@
+Explanation
+***********
+
+Our explanatory and conceptual guides are written to provide a better
+understanding of how ``cloud-init`` works. They enable you to expand your
+knowledge and become better at using and configuring ``cloud-init``.
+
+-----
+
+.. toctree::
+ :maxdepth: 1
+
+ configuration.rst
+ boot.rst
+ format.rst
+ events.rst
+ instancedata.rst
+ vendordata.rst
+ security.rst
+ analyze.rst
diff --git a/doc/rtd/topics/instancedata.rst b/doc/rtd/explanation/instancedata.rst
index 575dbf1b..8f5b310a 100644
--- a/doc/rtd/topics/instancedata.rst
+++ b/doc/rtd/explanation/instancedata.rst
@@ -1,114 +1,111 @@
.. _instance_metadata:
-*****************
-Instance Metadata
+
+Instance metadata
*****************
.. toctree::
- :maxdepth: 1
- :hidden:
+ :maxdepth: 1
+ :hidden:
- kernel-cmdline.rst
+ kernel-cmdline.rst
-What is instance-data?
-========================
+What is ``instance-data?``
+==========================
-Each cloud provider presents unique
-configuration metadata to a launched cloud instance. Cloud-init
-crawls this metadata and then caches and exposes this information
-as a standarized and versioned JSON object known as instance-data.
-This instance-data may then be
-queried or later used by cloud-init in templated configuration and scripts.
+Each cloud provider presents unique configuration metadata to a launched cloud
+instance. ``Cloud-init`` crawls this metadata and then caches and exposes this
+information as a standardised and versioned JSON object known as
+``instance-data``. This ``instance-data`` may then be queried or later used
+by ``cloud-init`` in templated configuration and scripts.
An example of a small subset of instance-data on a launched EC2 instance:
.. code-block:: json
- {
- "v1": {
- "cloud_name": "aws",
- "distro": "ubuntu",
- "distro_release": "jammy",
- "distro_version": "22.04",
- "instance_id": "i-06b5687b4d7b8595d",
- "machine": "x86_64",
- "platform": "ec2",
- "python_version": "3.10.4",
- "region": "us-east-2",
- "variant": "ubuntu"
- }
- }
+ {
+ "v1": {
+ "cloud_name": "aws",
+ "distro": "ubuntu",
+ "distro_release": "jammy",
+ "distro_version": "22.04",
+ "instance_id": "i-06b5687b4d7b8595d",
+ "machine": "x86_64",
+ "platform": "ec2",
+ "python_version": "3.10.4",
+ "region": "us-east-2",
+ "variant": "ubuntu"
+ }
+ }
Discovery
=========
-One way to easily explore what instance-data variables are available on
+One way to easily explore which ``instance-data`` variables are available on
your machine is to use the :ref:`cloud-init query<cli_query>` tool.
-Warnings or exceptions will be raised on invalid instance-data keys,
+Warnings or exceptions will be raised on invalid ``instance-data`` keys,
paths or invalid syntax.
-The **query** command also publishes ``userdata`` and ``vendordata`` keys to
-the root user which will contain the decoded user and vendor data provided to
-this instance. Non-root users referencing userdata or vendordata keys will
-see only redacted values.
+The :command:`query` command also publishes ``userdata`` and ``vendordata``
+keys to the root user which will contain the decoded user and vendor data
+provided to this instance. Non-root users referencing ``userdata`` or
+``vendordata`` keys will see only redacted values.
.. note::
- To save time designing a user-data template for a specific cloud's
- instance-data.json, use the 'render' cloud-init command on an
- instance booted on your favorite cloud. See :ref:`cli_devel` for more
- information.
+ To save time designing a user data template for a specific cloud's
+ :file:`instance-data.json`, use the :command:`render` command on an
+ instance booted on your favorite cloud. See :ref:`cli_devel` for more
+ information.
+
+.. _instancedata-Using:
-Using instance-data
-===================
+Using ``instance-data``
+=======================
-Instance-data can be used in:
+``instance-data`` can be used in:
-* :ref:`User-data scripts<topics/format:User-Data Script>`
-* :ref:`Cloud-config data<topics/format:Cloud Config Data>`
-* :ref:`Base configuration<configuration>`
-* Command line interface via **cloud-init query** or
- **cloud-init devel render**
+* :ref:`User data scripts<user_data_script>`.
+* :ref:`Cloud-config data<user_data_formats>`.
+* :ref:`Base configuration<configuration>`.
+* Command line interface via :command:`cloud-init query` or
+ :command:`cloud-init devel render`.
The aforementioned configuration sources support jinja template rendering.
When the first line of the provided configuration begins with
-**## template: jinja**, cloud-init will use jinja to render that file.
-Any instance-data variables are surfaced as jinja template
-variables.
+**## template: jinja**, ``cloud-init`` will use jinja to render that file.
+Any ``instance-data`` variables are surfaced as jinja template variables.
.. note::
- Trying to reference jinja variables that don't exist in instance-data
- will result in warnings in ``/var/log/cloud-init.log`` and the following
- string in your rendered user-data:
- ``CI_MISSING_JINJA_VAR/<your_varname>``.
+ Trying to reference jinja variables that don't exist in ``instance-data``
+ will result in warnings in :file:`/var/log/cloud-init.log` and the following
+ string in your rendered :file:`user-data`:
+ ``CI_MISSING_JINJA_VAR/<your_varname>``.
-Sensitive data such as user passwords may be contained in
-instance-data. Cloud-init separates this sensitive data such that
-is it only readable by root. In the case that a non-root user attempts
-to read sensitive instance-data, they will receive redacted data or same
-warnings and text that occur if a variable does not exist.
+Sensitive data such as user passwords may be contained in ``instance-data``.
+``Cloud-init`` separates this sensitive data such that is it only readable by
+root. In the case that a non-root user attempts to read sensitive
+``instance-data``, they will receive redacted data or the same warnings and
+text that occur if a variable does not exist.
-Example Usage
--------------
-
-Cloud config with instance-data
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Example: Cloud config with ``instance-data``
+--------------------------------------------
.. code-block:: yaml
- ## template: jinja
- #cloud-config
- runcmd:
- - echo 'EC2 public hostname allocated to instance: {{
- ds.meta_data.public_hostname }}' > /tmp/instance_metadata
- - echo 'EC2 availability zone: {{ v1.availability_zone }}' >>
- /tmp/instance_metadata
- - curl -X POST -d '{"hostname": "{{ds.meta_data.public_hostname }}",
- "availability-zone": "{{ v1.availability_zone }}"}'
- https://example.com
-
-User-data script with instance-data
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+ ## template: jinja
+ #cloud-config
+ runcmd:
+ - echo 'EC2 public hostname allocated to instance: {{
+ ds.meta_data.public_hostname }}' > /tmp/instance_metadata
+ - echo 'EC2 availability zone: {{ v1.availability_zone }}' >>
+ /tmp/instance_metadata
+ - curl -X POST -d '{"hostname": "{{ds.meta_data.public_hostname }}",
+ "availability-zone": "{{ v1.availability_zone }}"}'
+ https://example.com
+
+Example: User data script with ``instance-data``
+------------------------------------------------
.. code-block:: jinja
@@ -120,245 +117,275 @@ User-data script with instance-data
{%- endif %}
...
-CLI discovery of instance-data
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Example: CLI discovery of ``instance-data``
+-------------------------------------------
.. code-block:: shell-session
- # List all instance-data keys and values as root user
- % sudo cloud-init query --all
- {...}
-
- # List all top-level instance-data keys available
- % cloud-init query --list-keys
-
- # Introspect nested keys on an object
- % cloud-init query -f "{{ds.keys()}}"
- dict_keys(['meta_data', '_doc'])
-
- # Failure to reference valid dot-delimited key path on a known top-level key
- % cloud-init query v1.not_here
- ERROR: instance-data 'v1' has no 'not_here'
-
- # Test expected value using valid instance-data key path
- % cloud-init query -f "My AMI: {{ds.meta_data.ami_id}}"
- My AMI: ami-0fecc35d3c8ba8d60
-
- # The --format command renders jinja templates, this can also be used
- # to develop and test jinja template constructs
- % cat > test-templating.yaml <<EOF
- {% for val in ds.meta_data.keys() %}
- - {{ val }}
- {% endfor %}
- EOF
- % cloud-init query --format="$( cat test-templating.yaml )"
- - instance_id
- - dsmode
- - local_hostname
+ # List all instance-data keys and values as root user
+ $ sudo cloud-init query --all
+ {...}
+
+ # List all top-level instance-data keys available
+ $ cloud-init query --list-keys
+
+ # Introspect nested keys on an object
+ $ cloud-init query -f "{{ds.keys()}}"
+ dict_keys(['meta_data', '_doc'])
+
+ # Failure to reference valid dot-delimited key path on a known top-level key
+ $ cloud-init query v1.not_here
+ ERROR: instance-data 'v1' has no 'not_here'
+
+ # Test expected value using valid instance-data key path
+ $ cloud-init query -f "My AMI: {{ds.meta_data.ami_id}}"
+ My AMI: ami-0fecc35d3c8ba8d60
+
+ # The --format command renders jinja templates, this can also be used
+ # to develop and test jinja template constructs
+ $ cat > test-templating.yaml <<EOF
+ {% for val in ds.meta_data.keys() %}
+ - {{ val }}
+ {% endfor %}
+ EOF
+ $ cloud-init query --format="$( cat test-templating.yaml )"
+ - instance_id
+ - dsmode
+ - local_hostname
Reference
=========
-Storage Locations
+Storage locations
-----------------
-* ``/run/cloud-init/instance-data.json``: world-readable json containing
- standardized keys, sensitive keys redacted
-* ``/run/cloud-init/instance-data-sensitive.json``: root-readable unredacted
- json blob
-
-Format of instance-data.json
-----------------------------
-
-Top-level keys:
-
-* **base64_encoded_keys**: A list of forward-slash delimited key paths into
- the instance-data.json object whose value is base64encoded for json
- compatibility. Values at these paths should be decoded to get the original
- value.
-
-* **sensitive_keys**: A list of forward-slash delimited key paths into
- the instance-data.json object whose value is considered by the datasource as
- 'security sensitive'. Only the keys listed here will be redacted from
- instance-data.json for non-root users.
-
-* **merged_cfg**: Merged cloud-init :ref:`base_config_reference` from
- `/etc/cloud/cloud.cfg` and `/etc/cloud/cloud-cfg.d`. Values under this key
- could contain sensitive information such as passwords, so it is included in
- the **sensitive-keys** list which is only readable by root.
-
-* **ds**: Datasource-specific metadata crawled for the specific cloud
- platform. It should closely represent the structure of the cloud metadata
- crawled. The structure of content and details provided are entirely
- cloud-dependent. Mileage will vary depending on what the cloud exposes.
- The content exposed under the 'ds' key is currently **experimental** and
- expected to change slightly in the upcoming cloud-init release.
-
-* **sys_info**: Information about the underlying os, python, architecture and
- kernel. This represents the data collected by `cloudinit.util.system_info`.
-
-* **v1**: Standardized cloud-init metadata keys, these keys are guaranteed to
- exist on all cloud platforms. They will also retain their current behavior
- and format and will be carried forward even if cloud-init introduces a new
- version of standardized keys with **v2**.
-
-To cut down on keystrokes on the command line, cloud-init also provides
-top-level key aliases for any standardized ``v#`` keys present. The preceding
+
+* :file:`/run/cloud-init/instance-data.json`: world-readable JSON containing
+ standardised keys, sensitive keys redacted.
+* :file:`/run/cloud-init/instance-data-sensitive.json`: root-readable
+ unredacted JSON blob.
+
+:file:`instance-data.json` top level keys
+-----------------------------------------
+
+``base64_encoded_keys``
+^^^^^^^^^^^^^^^^^^^^^^^
+
+A list of forward-slash delimited key paths into the :file:`instance-data.json`
+object whose value is base64encoded for JSON compatibility. Values at these
+paths should be decoded to get the original value.
+
+``sensitive_keys``
+^^^^^^^^^^^^^^^^^^
+
+A list of forward-slash delimited key paths into the :file:`instance-data.json`
+object whose value is considered by the datasource as 'security sensitive'.
+Only the keys listed here will be redacted from :file:`instance-data.json` for
+non-root users.
+
+``merged_cfg``
+^^^^^^^^^^^^^^
+
+Merged ``cloud-init`` :ref:`base_config_reference` from
+:file:`/etc/cloud/cloud.cfg` and :file:`/etc/cloud/cloud-cfg.d`. Values under
+this key could contain sensitive information such as passwords, so it is
+included in the ``sensitive-keys`` list which is only readable by root.
+
+``ds``
+^^^^^^
+
+Datasource-specific metadata crawled for the specific cloud platform. It should
+closely represent the structure of the cloud metadata crawled. The structure of
+content and details provided are entirely cloud-dependent. Mileage will vary
+depending on what the cloud exposes. The content exposed under the ``ds`` key
+is currently **experimental** and expected to change slightly in the upcoming
+``cloud-init`` release.
+
+``sys_info``
+^^^^^^^^^^^^
+
+Information about the underlying OS, Python, architecture and kernel. This
+represents the data collected by ``cloudinit.util.system_info``.
+
+``v1``
+^^^^^^
+
+Standardised ``cloud-init`` metadata keys, these keys are guaranteed to exist
+on all cloud platforms. They will also retain their current behaviour and
+format, and will be carried forward even if ``cloud-init`` introduces a new
+version of standardised keys with ``v2``.
+
+To cut down on keystrokes on the command line, ``cloud-init`` also provides
+top-level key aliases for any standardised ``v#`` keys present. The preceding
``v1`` is not required of ``v1.var_name`` These aliases will represent the
value of the highest versioned standard key. For example, ``cloud_name``
value will be ``v2.cloud_name`` if both ``v1`` and ``v2`` keys are present in
-instance-data.json.
+:file:`instance-data.json`.
-cloud-init also provides jinja-safe key aliases for any instance-data
-keys which contain jinja operator characters such as +, -, ., /, etc. Any
-jinja operator will be replaced with underscores in the jinja-safe key
-alias. This allows for cloud-init templates to use aliased variable
-references which allow for jinja's dot-notation reference such as
+``Cloud-init`` also provides jinja-safe key aliases for any ``instance-data``
+keys which contain jinja operator characters such as ``+``, ``-``, ``.``,
+``/``, etc. Any jinja operator will be replaced with underscores in the
+jinja-safe key alias. This allows for ``cloud-init`` templates to use aliased
+variable references which allow for jinja's dot-notation reference such as
``{{ ds.v1_0.my_safe_key }}`` instead of ``{{ ds["v1.0"]["my/safe-key"] }}``.
-The standardized keys present:
+Standardised :file:`instance-data.json` v1 keys
+-----------------------------------------------
+
+``v1._beta_keys``
+^^^^^^^^^^^^^^^^^
-v1._beta_keys
--------------
-List of standardized keys still in 'beta'. The format, intent or presence of
+List of standardised keys still in 'beta'. The format, intent or presence of
these keys can change. Do not consider them production-ready.
Example output:
-- [subplatform]
+ - [subplatform]
+
+``v1.cloud_name``
+^^^^^^^^^^^^^^^^^
-v1.cloud_name
--------------
Where possible this will indicate the 'name' of the cloud the system is running
on. This is different than the 'platform' item. For example, the cloud name of
Amazon Web Services is 'aws', while the platform is 'ec2'.
-If determining a specific name is not possible or provided in meta-data, then
-this filed may contain the same content as 'platform'.
+If determining a specific name is not possible or provided in
+:file:`meta-data`, then this filed may contain the same content as 'platform'.
Example output:
-- aws
-- openstack
-- azure
-- configdrive
-- nocloud
-- ovf
+ - aws
+ - openstack
+ - azure
+ - configdrive
+ - nocloud
+ - ovf
+
+``v1.distro``, ``v1.distro_version``, ``v1.distro_release``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-v1.distro, v1.distro_version, v1.distro_release
------------------------------------------------
This shall be the distro name, version and release as determined by
-`cloudinit.util.get_linux_distro`.
+``cloudinit.util.get_linux_distro``.
Example output:
-- alpine, 3.12.0, ''
-- centos, 7.5, core
-- debian, 9, stretch
-- freebsd, 12.0-release-p10,
-- opensuse, 42.3, x86_64
-- opensuse-tumbleweed, 20180920, x86_64
-- redhat, 7.5, 'maipo'
-- sles, 12.3, x86_64
-- ubuntu, 20.04, focal
-
-v1.instance_id
---------------
-Unique instance_id allocated by the cloud.
+ - alpine, 3.12.0, ''
+ - centos, 7.5, core
+ - debian, 9, stretch
+ - freebsd, 12.0-release-p10,
+ - opensuse, 42.3, x86_64
+ - opensuse-tumbleweed, 20180920, x86_64
+ - redhat, 7.5, 'maipo'
+ - sles, 12.3, x86_64
+ - ubuntu, 20.04, focal
-Examples output:
+``v1.instance_id``
+^^^^^^^^^^^^^^^^^^
-- i-<hash>
+Unique ``instance_id`` allocated by the cloud.
-v1.kernel_release
------------------
-This shall be the running kernel `uname -r`
+Example output:
+
+ - i-<hash>
+
+``v1.kernel_release``
+^^^^^^^^^^^^^^^^^^^^^
+
+This shall be the running kernel ``uname -r``.
Example output:
-- 5.3.0-1010-aws
+ - 5.3.0-1010-aws
+
+``v1.local_hostname``
+^^^^^^^^^^^^^^^^^^^^^
-v1.local_hostname
------------------
The internal or local hostname of the system.
-Examples output:
+Example output:
+
+ - ip-10-41-41-70
+ - <user-provided-hostname>
-- ip-10-41-41-70
-- <user-provided-hostname>
+``v1.machine``
+^^^^^^^^^^^^^^
-v1.machine
-----------
-This shall be the running cpu machine architecture `uname -m`
+This shall be the running cpu machine architecture ``uname -m``.
Example output:
-- x86_64
-- i686
-- ppc64le
-- s390x
+ - x86_64
+ - i686
+ - ppc64le
+ - s390x
+
+``v1.platform``
+^^^^^^^^^^^^^^^
-v1.platform
--------------
An attempt to identify the cloud platform instance that the system is running
on.
-Examples output:
+Example output:
+
+ - ec2
+ - openstack
+ - lxd
+ - gce
+ - nocloud
+ - ovf
-- ec2
-- openstack
-- lxd
-- gce
-- nocloud
-- ovf
+``v1.subplatform``
+^^^^^^^^^^^^^^^^^^
-v1.subplatform
---------------
Additional platform details describing the specific source or type of metadata
used. The format of subplatform will be:
``<subplatform_type> (<url_file_or_dev_path>)``
-Examples output:
+Example output:
+
+ - metadata (http://168.254.169.254)
+ - seed-dir (/path/to/seed-dir/)
+ - config-disk (/dev/cd0)
+ - configdrive (/dev/sr0)
-- metadata (http://168.254.169.254)
-- seed-dir (/path/to/seed-dir/)
-- config-disk (/dev/cd0)
-- configdrive (/dev/sr0)
+``v1.public_ssh_keys``
+^^^^^^^^^^^^^^^^^^^^^^
-v1.public_ssh_keys
-------------------
A list of SSH keys provided to the instance by the datasource metadata.
-Examples output:
+Example output:
-- ['ssh-rsa AA...', ...]
+ - ['ssh-rsa AA...', ...]
-v1.python_version
------------------
-The version of python that is running cloud-init as determined by
-`cloudinit.util.system_info`
+``v1.python_version``
+^^^^^^^^^^^^^^^^^^^^^
+
+The version of Python that is running ``cloud-init`` as determined by
+``cloudinit.util.system_info``.
Example output:
-- 3.7.6
+ - 3.7.6
+
+``v1.region``
+^^^^^^^^^^^^^
-v1.region
----------
-The physical region/data center in which the instance is deployed.
+The physical region/data centre in which the instance is deployed.
-Examples output:
+Example output:
+
+ - us-east-2
-- us-east-2
+``v1.availability_zone``
+^^^^^^^^^^^^^^^^^^^^^^^^
-v1.availability_zone
---------------------
The physical availability zone in which the instance is deployed.
-Examples output:
+Example output:
-- us-east-2b
-- nova
-- null
+ - us-east-2b
+ - nova
+ - null
Example Output
--------------
diff --git a/doc/rtd/explanation/kernel-cmdline.rst b/doc/rtd/explanation/kernel-cmdline.rst
new file mode 100644
index 00000000..94f646f5
--- /dev/null
+++ b/doc/rtd/explanation/kernel-cmdline.rst
@@ -0,0 +1,69 @@
+.. _kernel_cmdline:
+
+Kernel command line
+*******************
+
+In order to allow an ephemeral, or otherwise pristine image to receive some
+configuration, ``cloud-init`` will read a URL directed by the kernel command
+line and proceed as if its data had previously existed.
+
+This allows for configuring a metadata service, or some other data.
+
+.. note::
+ Usage of the kernel command line is somewhat of a last resort,
+ as it requires knowing in advance the correct command line or modifying
+ the boot loader to append data.
+
+For example, when :command:`cloud-init init --local` runs, it will check to
+see if ``cloud-config-url`` appears in key/value fashion in the kernel command
+line, as in:
+
+.. code-block:: text
+
+ root=/dev/sda ro cloud-config-url=http://foo.bar.zee/abcde
+
+``Cloud-init`` will then read the contents of the given URL. If the content
+starts with ``#cloud-config``, it will store that data to the local filesystem
+in a static filename :file:`/etc/cloud/cloud.cfg.d/91_kernel_cmdline_url.cfg`,
+and consider it as part of the config from that point forward.
+
+If that file exists already, it will not be overwritten, and the
+``cloud-config-url`` parameter is completely ignored.
+
+Then, when the datasource runs, it will find that config already available.
+
+So, to be able to configure the MAAS datasource by controlling the
+kernel command line from outside the image, you can append:
+
+.. code-block:: text
+
+ cloud-config-url=http://your.url.here/abcdefg
+
+Then, have the following content at that url:
+
+.. code-block:: yaml
+
+ #cloud-config
+ datasource:
+ MAAS:
+ metadata_url: http://mass-host.localdomain/source
+ consumer_key: Xh234sdkljf
+ token_key: kjfhgb3n
+ token_secret: 24uysdfx1w4
+
+.. warning::
+
+ ``url`` kernel command line key is deprecated.
+ Please use ``cloud-config-url`` parameter instead.
+
+.. note::
+
+ Since ``cloud-config-url=`` is so generic, in order to avoid false
+ positives, ``cloud-init`` requires the content to start with
+ ``#cloud-config`` for it to be considered.
+
+.. note::
+
+ The ``cloud-config-url=`` is un-authed http GET, and contains credentials.
+ It could be set up to be randomly generated and also to check the source
+ address in order to be more secure.
diff --git a/doc/rtd/topics/security.rst b/doc/rtd/explanation/security.rst
index ad934ded..c1ffd9ce 100644
--- a/doc/rtd/topics/security.rst
+++ b/doc/rtd/explanation/security.rst
@@ -1,6 +1,6 @@
-********
+.. _security:
+
Security
********
-.. _security:
.. mdinclude:: ../../../SECURITY.md
diff --git a/doc/rtd/explanation/vendordata.rst b/doc/rtd/explanation/vendordata.rst
new file mode 100644
index 00000000..621fcdeb
--- /dev/null
+++ b/doc/rtd/explanation/vendordata.rst
@@ -0,0 +1,71 @@
+.. _vendordata:
+
+Vendor data
+***********
+
+Overview
+========
+
+Vendor data is data provided by the entity that launches an instance (e.g.,
+the cloud provider). This data can be used to customise the image to fit into
+the particular environment it is being run in.
+
+Vendor data follows the same rules as user data, with the following
+caveats:
+
+1. Users have ultimate control over vendor data. They can disable its
+ execution or disable handling of specific parts of multi-part input.
+2. By default it only runs on first boot.
+3. Vendor data can be disabled by the user. If the use of vendor data is
+ required for the instance to run, then vendor data should not be used.
+4. User-supplied cloud-config is merged over cloud-config from vendor data.
+
+Users providing cloud-config data can use the ``#cloud-config-jsonp`` method
+to more finely control their modifications to the vendor-supplied
+cloud-config. For example, if both vendor and user have provided ``runcmd``
+then the default merge handler will cause the user's ``runcmd`` to override
+the one provided by the vendor. To append to ``runcmd``, the user could better
+provide multi-part input with a ``cloud-config-jsonp`` part like:
+
+.. code:: yaml
+
+ #cloud-config-jsonp
+ [{ "op": "add", "path": "/runcmd", "value": ["my", "command", "here"]}]
+
+Further, we strongly advise vendors to not "be evil". By evil, we mean any
+action that could compromise a system. Since users trust you, please take
+care to make sure that any vendor data is safe, atomic, idempotent and does
+not put your users at risk.
+
+Input formats
+=============
+
+``Cloud-init`` will download and cache to filesystem any vendor data that it
+finds. Vendor data is handled exactly like user data. This means that the
+vendor can supply multi-part input and have those parts acted on in the same
+way as with user data.
+
+The only differences are:
+
+* Vendor-data-defined scripts are stored in a different location than
+ user-data-defined scripts (to avoid namespace collision).
+* The user can disable part handlers via the cloud-config settings.
+ For example, to disable handling of 'part-handlers' in vendor data,
+ the user could provide user data like this:
+
+.. code:: yaml
+
+ #cloud-config
+ vendordata: {excluded: 'text/part-handler'}
+
+Examples
+========
+
+You can find examples in the examples subdirectory.
+
+Additionally, the :file:`tools` directory contains
+:file:`write-mime-multipart`, which can be used to easily generate MIME
+multi-part files from a list of input files. That data can then be given to
+an instance.
+
+See :command:`write-mime-multipart --help` for usage.
diff --git a/doc/rtd/googleaf254801a5285c31.html b/doc/rtd/googleaf254801a5285c31.html
new file mode 100644
index 00000000..b6030712
--- /dev/null
+++ b/doc/rtd/googleaf254801a5285c31.html
@@ -0,0 +1 @@
+google-site-verification: googleaf254801a5285c31.html \ No newline at end of file
diff --git a/doc/rtd/howto/bugs.rst b/doc/rtd/howto/bugs.rst
new file mode 100644
index 00000000..753d4545
--- /dev/null
+++ b/doc/rtd/howto/bugs.rst
@@ -0,0 +1,115 @@
+.. _reporting_bugs:
+
+Reporting bugs
+**************
+
+In this guide, we will show you how to:
+
+1) Collect logs to support your bug report.
+2) File bugs to the upstream ``cloud-init`` project via `Launchpad`_.
+3) Report issues for distro-specific packages.
+
+Collect logs
+============
+
+To aid in debugging, please collect the necessary logs. To do so, run the
+:command:`collect-logs` subcommand to produce a tarfile that you can easily
+upload:
+
+.. code-block:: shell-session
+
+ $ sudo cloud-init collect-logs
+
+Example output:
+
+.. code-block::
+
+ Wrote /home/ubuntu/cloud-init.tar.gz
+
+If your version of ``cloud-init`` does not have the :command:`collect-logs`
+subcommand, then please manually collect the base log files by running the
+following:
+
+.. code-block:: shell-session
+
+ $ sudo dmesg > dmesg.txt
+ $ sudo journalctl -o short-precise > journal.txt
+ $ sudo tar -cvf cloud-init.tar dmesg.txt journal.txt /run/cloud-init \
+ /var/log/cloud-init.log /var/log/cloud-init-output.log
+
+Report upstream bugs
+====================
+
+Bugs for upstream ``cloud-init`` are tracked using Launchpad. To file a bug:
+
+1. Collect the necessary debug logs as described above.
+2. `Create a Launchpad account`_ or login to your existing account.
+3. `Report an upstream cloud-init bug`_.
+
+If debug logs are not provided, you will be asked for them before any
+further time is spent debugging. If you are unable to obtain the required
+logs please explain why in the bug.
+
+If your bug is for a specific distro using ``cloud-init``, please first
+consider reporting it with the upstream distro or confirm that it still occurs
+with the latest upstream ``cloud-init`` code. See the following section for
+details on specific distro reporting.
+
+Distro-specific issues
+======================
+
+For issues specific to your distro please use one of the following
+distro-specific reporting mechanisms:
+
+Ubuntu
+------
+
+To report a bug on Ubuntu use the :command:`ubuntu-bug` command on the affected
+system to automatically collect the necessary logs and file a bug on
+Launchpad:
+
+.. code-block:: shell-session
+
+ $ ubuntu-bug cloud-init
+
+If that does not work or is not an option, please collect the logs using the
+commands in the above Collect Logs section and then report the bug on the
+`Ubuntu bug tracker`_. Make sure to attach your collected logs!
+
+Debian
+------
+
+To file a bug against the Debian package of ``cloud-init`` please use the
+`Debian bug tracker`_ to file against 'Package: cloud-init'. See the
+`Debian bug reporting wiki`_ page for more details.
+
+Red Hat, CentOS and Fedora
+--------------------------
+
+To file a bug against the Red Hat or Fedora packages of ``cloud-init`` please
+use the `Red Hat bugzilla`_.
+
+SUSE and openSUSE
+-----------------
+
+To file a bug against the SUSE packages of ``cloud-init`` please use the
+`SUSE bugzilla`_.
+
+Arch Linux
+----------
+
+To file a bug against the Arch package of ``cloud-init`` please use the
+`Arch Linux Bugtracker`_. See the `Arch Linux bug reporting wiki`_ for more
+details.
+
+.. LINKS:
+.. _Launchpad: https://launchpad.net/
+.. _Create a Launchpad account: https://help.launchpad.net/YourAccount/NewAccount
+.. _Report an upstream cloud-init bug: https://bugs.launchpad.net/cloud-init/+filebug
+.. _Ubuntu bug tracker: https://bugs.launchpad.net/ubuntu/+source/cloud-init/+filebug
+.. _Debian bug tracker: https://bugs.debian.org/cgi-bin/pkgreport.cgi?pkg=cloud-init;dist=unstable
+.. _Debian bug reporting wiki: https://www.debian.org/Bugs/Reporting
+.. _Red Hat bugzilla: https://bugzilla.redhat.com/
+.. _SUSE bugzilla: https://bugzilla.suse.com/index.cgi
+.. _Arch Linux Bugtracker: https://bugs.archlinux.org/
+.. _Arch Linux bug reporting wiki: https://wiki.archlinux.org/index.php/Bug_reporting_guidelines
diff --git a/doc/rtd/howto/index.rst b/doc/rtd/howto/index.rst
new file mode 100644
index 00000000..23936fa9
--- /dev/null
+++ b/doc/rtd/howto/index.rst
@@ -0,0 +1,23 @@
+.. _howto_index:
+
+How-to guides
+*************
+
+If you have a specific goal in mind and are already familiar with the basics
+of ``cloud-init``, our how-to guides cover some of the more common operations
+and tasks that you may need to complete.
+
+They will help you to achieve a particular end result, but may require you to
+understand and adapt the steps to fit your specific requirements.
+
+-----
+
+How do I...?
+============
+
+.. toctree::
+ :maxdepth: 1
+
+ Test cloud-init locally before deploying <predeploy_testing.rst>
+ Change how often a module runs <module_run_frequency.rst>
+ Report a bug <bugs.rst>
diff --git a/doc/rtd/howto/module_run_frequency.rst b/doc/rtd/howto/module_run_frequency.rst
new file mode 100644
index 00000000..fc0f8921
--- /dev/null
+++ b/doc/rtd/howto/module_run_frequency.rst
@@ -0,0 +1,40 @@
+.. _module_frequency:
+
+How to change a module's run frequency
+**************************************
+
+You may want to change the default frequency at which a module runs, for
+example, to make the module run on every boot.
+
+To override the default frequency, you will need to modify the module
+list in :file:`/etc/cloud/cloud.cfg`:
+
+1. Change the module from a string (default) to a list.
+2. Set the first list item to the module name and the second item to the
+ frequency.
+
+Example
+=======
+
+The following example demonstrates how to log boot times to a file every boot.
+
+Update :file:`/etc/cloud/cloud.cfg`:
+
+.. code-block:: yaml
+ :name: /etc/cloud/cloud.cfg
+ :emphasize-lines: 3
+
+ cloud_final_modules:
+ # list shortened for brevity
+ - [phone-home, always]
+ - final-message
+ - power-state-change
+
+Then your user data could then be:
+
+.. code-block:: yaml
+
+ #cloud-config
+ phone_home:
+ url: http://example.com/$INSTANCE_ID/
+ post: all
diff --git a/doc/rtd/howto/predeploy_testing.rst b/doc/rtd/howto/predeploy_testing.rst
new file mode 100644
index 00000000..783ef51e
--- /dev/null
+++ b/doc/rtd/howto/predeploy_testing.rst
@@ -0,0 +1,141 @@
+.. _predeploy_testing:
+
+How to test ``cloud-init`` locally before deploying
+***************************************************
+
+It's very likely that you will want to test ``cloud-init`` locally before
+deploying it to the cloud. Fortunately, there are several different virtual
+machines (VMs) and container tools that are ideal for this sort of local
+testing.
+
+In this guide, we will show how to use three of the most popular tools:
+`Multipass`_, `LXD`_ and `QEMU`_.
+
+Multipass
+=========
+
+Multipass is a cross-platform tool for launching Ubuntu VMs across Linux,
+Windows, and macOS.
+
+When a user launches a Multipass VM, user data can be passed by adding the
+``--cloud-init`` flag and the appropriate YAML file containing the user data:
+
+.. code-block:: shell-session
+
+ $ multipass launch bionic --name test-vm --cloud-init userdata.yaml
+
+Multipass will validate the YAML syntax of the cloud-config file before
+attempting to start the VM! A nice addition which saves time when you're
+experimenting and launching instances with various cloud-configs.
+
+Multipass *only* supports passing user data, and *only* as YAML cloud-config
+files. Passing a script, a MIME archive, or any of the other user data formats
+``cloud-init`` supports will result in an error from the YAML syntax validator.
+
+LXD
+===
+
+LXD offers a streamlined user experience for using Linux system containers.
+With LXD, a user can pass:
+
+* user data,
+* vendor data,
+* metadata, and
+* network configuration.
+
+The following command initialises a container with user data:
+
+.. code-block:: shell-session
+
+ $ lxc init ubuntu-daily:bionic test-container
+ $ lxc config set test-container user.user-data - < userdata.yaml
+ $ lxc start test-container
+
+To avoid the extra commands this can also be done at launch:
+
+.. code-block:: shell-session
+
+ $ lxc launch ubuntu-daily:bionic test-container --config=user.user-data="$(cat userdata.yaml)"
+
+Finally, a profile can be set up with the specific data if you need to
+launch this multiple times:
+
+.. code-block:: shell-session
+
+ $ lxc profile create dev-user-data
+ $ lxc profile set dev-user-data user.user-data - < cloud-init-config.yaml
+ $ lxc launch ubuntu-daily:bionic test-container -p default -p dev-user-data
+
+The above examples all show how to pass user data. To pass other types of
+configuration data use the config option specified below:
+
++----------------+---------------------------+
+| Data | Config option |
++================+===========================+
+| user data | cloud-init.user-data |
++----------------+---------------------------+
+| vendor data | cloud-init.vendor-data |
++----------------+---------------------------+
+| network config | cloud-init.network-config |
++----------------+---------------------------+
+
+See the LXD `Instance Configuration`_ docs for more info about configuration
+values or the LXD `Custom Network Configuration`_ document for more about
+custom network config.
+
+QEMU
+====
+
+The :command:`cloud-localds` command from the `cloud-utils`_ package generates
+a disk with user-supplied data. The ``NoCloud`` datasouce allows users to
+provide their own user data, metadata, or network configuration directly to
+an instance without running a network service. This is helpful for launching
+local cloud images with QEMU, for example.
+
+The following is an example of creating the local disk using the
+:command:`cloud-localds` command:
+
+.. code-block:: shell-session
+
+ $ cat >user-data <<EOF
+ #cloud-config
+ password: password
+ chpasswd:
+ expire: False
+ ssh_pwauth: True
+ ssh_authorized_keys:
+ - ssh-rsa AAAA...UlIsqdaO+w==
+ EOF
+ $ cloud-localds seed.img user-data
+
+The resulting :file:`seed.img` can then be passed along to a cloud image
+containing ``cloud-init``. Below is an example of passing the :file:`seed.img`
+with QEMU:
+
+.. code-block:: shell-session
+
+ $ qemu-system-x86_64 -m 1024 -net nic -net user \
+ -hda ubuntu-20.04-server-cloudimg-amd64.img \
+ -hdb seed.img
+
+The now-booted image will allow for login using the password provided above.
+
+For additional configuration, users can provide much more detailed
+configuration, including network configuration and metadata:
+
+.. code-block:: shell-session
+
+ $ cloud-localds --network-config=network-config-v2.yaml \
+ seed.img userdata.yaml metadata.yaml
+
+See the :ref:`network_config_v2` page for details on the format and config of
+network configuration. To learn more about the possible values for metadata,
+check out the :ref:`datasource_nocloud` page.
+
+
+.. _Multipass: https://multipass.run/
+.. _LXD: https://linuxcontainers.org/
+.. _QEMU: https://www.qemu.org/
+.. _Instance Configuration: https://linuxcontainers.org/lxd/docs/master/instances
+.. _Custom Network Configuration: https://linuxcontainers.org/lxd/docs/master/cloud-init
+.. _cloud-utils: https://github.com/canonical/cloud-utils/
diff --git a/doc/rtd/index.rst b/doc/rtd/index.rst
index 159113f4..81045051 100644
--- a/doc/rtd/index.rst
+++ b/doc/rtd/index.rst
@@ -1,101 +1,96 @@
.. _index:
-cloud-init Documentation
+Cloud-init documentation
########################
-Cloud-init is the *industry standard* multi-distribution method for
-cross-platform cloud instance initialization.
+``Cloud-init`` is the *industry standard* multi-distribution method for
+cross-platform cloud instance initialisation. It is supported across all major
+public cloud providers, provisioning systems for private cloud infrastructure,
+and bare-metal installations.
-During boot, cloud-init identifies the cloud it is running on and initializes
-the system accordingly. Cloud instances will automatically be provisioned
-during first boot with networking, storage, ssh keys, packages and various
-other system aspects already configured.
+During boot, ``cloud-init`` identifies the cloud it is running on and
+initialises the system accordingly. Cloud instances will automatically be
+provisioned during first boot with networking, storage, SSH keys, packages
+and various other system aspects already configured.
-Cloud-init provides the necessary glue between launching a cloud instance and
-connecting to it so that it works as expected.
+``Cloud-init`` provides the necessary glue between launching a cloud instance
+and connecting to it so that it works as expected.
-For cloud users, cloud-init provides no-install first-boot configuration
+For cloud users, ``cloud-init`` provides no-install first-boot configuration
management of a cloud instance. For cloud providers, it provides instance setup
that can be integrated with your cloud.
-Project and community
-*********************
-Cloud-init is an open source project that warmly welcomes community
-projects, contributions, suggestions, fixes and constructive feedback.
+-----
-* `Code of conduct <https://ubuntu.com/community/code-of-conduct>`_
-* Ask questions in IRC on ``#cloud-init`` on Libera
-* `Mailing list <https://launchpad.net/~cloud-init>`_
-* `Contribute on Github <https://github.com/canonical/cloud-init/blob/main/CONTRIBUTING.rst>`_
-* `Release schedule <https://discourse.ubuntu.com/search?q=cloud-init%20release%20schedule%20order%3Alatest>`_
+.. grid:: 1 1 2 2
+ :gutter: 3
-Having trouble? We would like to help!
-**************************************
+ .. grid-item-card:: **Tutorials**
+ :link: tutorial/index
+ :link-type: doc
-- Check out the :ref:`lxd_tutorial` if you're new to cloud-init
-- Try the :ref:`FAQ` for answers to some common questions
-- Find a bug? `Report bugs on Launchpad <https://bugs.launchpad.net/cloud-init/+filebug>`_
+ Get started - a hands-on introduction to ``cloud-init`` for new users
-.. toctree::
- :hidden:
- :titlesonly:
- :caption: Getting Started
+ .. grid-item-card:: **How-to guides**
+ :link: howto/index
+ :link-type: doc
- topics/tutorial.rst
- topics/availability.rst
- topics/boot.rst
- topics/cli.rst
- topics/faq.rst
- topics/bugs.rst
+ Step-by-step guides covering key operations and common tasks
-.. toctree::
- :hidden:
- :titlesonly:
- :caption: Explanation
+.. grid:: 1 1 2 2
+ :gutter: 3
+ :reverse:
- topics/configuration.rst
+ .. grid-item-card:: **Reference**
+ :link: reference/index
+ :link-type: doc
-.. toctree::
- :hidden:
- :titlesonly:
- :caption: User Data
+ Technical information - specifications, APIs, architecture
- topics/format.rst
- topics/examples.rst
- topics/events.rst
- topics/merging.rst
+ .. grid-item-card:: **Explanation**
+ :link: explanation/index
+ :link-type: doc
-.. toctree::
- :hidden:
- :titlesonly:
- :caption: Instance Data
+ Discussion and clarification of key topics
- topics/instancedata.rst
- topics/datasources.rst
- topics/vendordata.rst
- topics/network-config.rst
+-----
-.. toctree::
- :hidden:
- :titlesonly:
- :caption: Reference
+Having trouble? We would like to help!
+======================================
+
+- :ref:`Check out our tutorials<tutorial_index>` if you're new to
+ ``cloud-init``
+- :ref:`Try the FAQ<faq>` for answers to some common questions
+- You can also search the ``cloud-init`` `mailing list archive`_
+- Find a bug? `Report bugs on Launchpad`_
+
+Project and community
+=====================
+
+``Cloud-init`` is an open source project that warmly welcomes community
+projects, contributions, suggestions, fixes and constructive feedback.
- topics/base_config_reference.rst
- topics/modules.rst
+* Read our `Code of Conduct`_
+* Ask questions in the ``#cloud-init`` `IRC channel on Libera`_
+* Join the `cloud-init mailing list`_
+* :ref:`Contribute on GitHub<contributing>`
+* `Release schedule`_
.. toctree::
:hidden:
- :titlesonly:
- :caption: Development
-
- topics/contributing.rst
- topics/module_creation.rst
- topics/code_review.rst
- topics/security.rst
- topics/debugging.rst
- topics/logging.rst
- topics/dir_layout.rst
- topics/analyze.rst
- topics/docs.rst
- topics/testing.rst
- topics/integration_tests.rst
+ :maxdepth: 2
+
+ tutorial/index
+ howto/index
+ explanation/index
+ reference/index
+ development/index
+
+
+.. LINKS
+.. _Code of Conduct: https://ubuntu.com/community/code-of-conduct
+.. _IRC channel on Libera: https://kiwiirc.com/nextclient/irc.libera.chat/cloud-init
+.. _cloud-init mailing list: https://launchpad.net/~cloud-init
+.. _mailing list archive: https://lists.launchpad.net/cloud-init/
+.. _Release schedule: https://discourse.ubuntu.com/t/cloud-init-release-schedule/32244
+.. _Report bugs on Launchpad: https://bugs.launchpad.net/cloud-init/+filebug
diff --git a/doc/rtd/topics/availability.rst b/doc/rtd/reference/availability.rst
index bf4de71d..b70342a5 100644
--- a/doc/rtd/topics/availability.rst
+++ b/doc/rtd/reference/availability.rst
@@ -3,19 +3,19 @@
Availability
************
-Below outlines the current availability of cloud-init across
+Below outlines the current availability of ``cloud-init`` across
distributions and clouds, both public and private.
.. note::
- If a distribution or cloud does not show up in the list below contact
- them and ask for images to be generated using cloud-init!
+ If a distribution or cloud does not show up in the list below, contact
+ them and ask for images to be generated using ``cloud-init``!
Distributions
=============
-Cloud-init has support across all major Linux distributions, FreeBSD, NetBSD,
-OpenBSD and DragonFlyBSD:
+``Cloud-init`` has support across all major Linux distributions, FreeBSD,
+NetBSD, OpenBSD and DragonFlyBSD:
- Alpine Linux
- Arch Linux
@@ -34,7 +34,7 @@ OpenBSD and DragonFlyBSD:
Clouds
======
-Cloud-init provides support across a wide ranging list of execution
+``Cloud-init`` provides support across a wide ranging list of execution
environments in the public cloud:
- Amazon Web Services
@@ -62,7 +62,7 @@ environments in the public cloud:
- Zadara Edge Cloud Platform
- 3DS Outscale
-Additionally, cloud-init is supported on these private clouds:
+Additionally, ``cloud-init`` is supported on these private clouds:
- Bare metal installs
- OpenStack
@@ -70,5 +70,3 @@ Additionally, cloud-init is supported on these private clouds:
- KVM
- Metal-as-a-Service (MAAS)
- VMware
-
-.. vi: textwidth=79
diff --git a/doc/rtd/topics/base_config_reference.rst b/doc/rtd/reference/base_config_reference.rst
index 97abcff5..928efc55 100644
--- a/doc/rtd/topics/base_config_reference.rst
+++ b/doc/rtd/reference/base_config_reference.rst
@@ -1,19 +1,18 @@
.. _base_config_reference:
-Base Configuration
+Base configuration
******************
.. warning::
- This documentation is intended for custom image creators, such as
- distros and cloud providers, not
- end users. Modifying the base configuration should not be necessary for
- end users and can result in a system that may be unreachable or
- may no longer boot.
+ This documentation is intended for custom image creators, such as distros
+ and cloud providers, not end users. Modifying the base configuration
+ should not be necessary for end users and can result in a system that may
+ be unreachable or may no longer boot.
-Cloud-init base config is primarily defined in two places:
+``Cloud-init`` base config is primarily defined in two places:
-* **/etc/cloud/cloud.cfg**
-* **/etc/cloud/cloud.cfg.d/*.cfg**
+* :file:`/etc/cloud/cloud.cfg`
+* :file:`/etc/cloud/cloud.cfg.d/*.cfg`
See the :ref:`configuration sources explanation<configuration>` for more
information on how these files get sourced and combined with other
@@ -22,34 +21,33 @@ configuration.
Generation
==========
-``cloud.cfg`` isn't present in any of cloud-init's source files. The
-`configuration is templated`_ and customized for each
-distribution supported by cloud-init.
+:file:`cloud.cfg` isn't present in any of ``cloud-init``'s source files. The
+`configuration is templated`_ and customised for each
+distribution supported by ``cloud-init``.
-Base Configuration Keys
+Base configuration keys
=======================
-Module Keys
+Module keys
-----------
Modules are grouped into the following keys:
-* **cloud_init_modules**: Modules run during
- :ref:`network<topics/boot:network>` timeframe.
-* **cloud_config_modules**: Modules run during
- :ref:`config<topics/boot:config>` timeframe.
-* **cloud_final_modules**: Modules run during
- :ref:`final<topics/boot:final>` timeframe.
+* ``cloud_init_modules``: Modules run during :ref:`network<boot-Network>`
+ timeframe.
+* ``cloud_config_modules``: Modules run during :ref:`config<boot-Config>`
+ timeframe.
+* ``cloud_final_modules``: Modules run during :ref:`final<boot-Final>`
+ timeframe.
-Each ``modules`` definition contains an array of strings, where each string
-is the name of the module. Each name is taken directly from the
-module filename,
-with the ``cc_`` prefix and ``.py`` suffix removed, and with
-``-`` and ``_`` being interchangeable.
+Each ``module`` definition contains an array of strings, where each string
+is the name of the module. Each name is taken directly from the module
+filename, with the ``cc_`` prefix and ``.py`` suffix removed, and with ``-``
+and ``_`` being interchangeable.
Alternatively, in place of the module name, an array of
-`<name>, <frequency>[, <args>]` args may be specified. See
-:ref:`the module creation guidelines<topics/module_creation:guidelines>` for
+``<name>, <frequency>[, <args>]`` args may be specified. See
+:ref:`the module creation guidelines<module_creation-Guidelines>` for
more information on ``frequency`` and ``args``.
.. note::
@@ -58,10 +56,9 @@ more information on ``frequency`` and ``args``.
frequency is **not** a recommended way to reduce instance boot time.
Examples
-^^^^^^^^
+--------
-To specify that only `cc_final_message.py`_ run during final
-timeframe:
+To specify that only `cc_final_message.py`_ run during final timeframe:
.. code-block:: yaml
@@ -83,9 +80,9 @@ user data):
cloud_final_modules:
- [final_message, once, "my final message"]
-.. _datasource_base_config:
+.. _base_config-Datasource:
-Datasource Keys
+Datasource keys
---------------
Many datasources allow configuration of the datasource for use in
@@ -100,126 +97,157 @@ generally take the form of:
<datasource_name>:
...
-System Info Keys
+System info keys
----------------
-These keys are used for setup of cloud-init itself, or the datasource
-or distro. Anything under the ``system_info`` cannot be overridden by
-vendor data, user data, or any other handlers or transforms. In some cases,
-there may be a ``system_info`` key used for the distro, while the same
-key is used outside of ``system_info`` for a userdata module.
+
+These keys are used for setup of ``cloud-init`` itself, or the datasource
+or distro. Anything under ``system_info`` cannot be overridden by vendor data,
+user data, or any other handlers or transforms. In some cases there may be a
+``system_info`` key used for the distro, while the same key is used outside of
+``system_info`` for a user data module.
Both keys will be processed independently.
-* **system_info**: Top-level key
+* ``system_info``: Top-level key.
- - **paths**: Definitions of common paths used by cloud-init
+ - ``paths``: Definitions of common paths used by ``cloud-init``.
- + **cloud_dir**: Defaults to ``/var/lib/cloud``
- + **templates_dir**: Defaults to ``/etc/cloud/templates``
+ + ``cloud_dir``: Defaults to :file:`/var/lib/cloud`.
+ + ``templates_dir``: Defaults to :file:`/etc/cloud/templates`.
- - **distro**: Name of distro being used.
- - **default_user**: Defines the default user for the system using the same
- user configuration as :ref:`topics/modules:users and groups`. Note that
+ - ``distro``: Name of distro being used.
+ - ``default_user``: Defines the default user for the system using the same
+ user configuration as :ref:`Users and Groups<mod-users_groups>`. Note that
this CAN be overridden if a ``users`` configuration
is specified without a ``- default`` entry.
- - **ntp_client**: The default ntp client for the distro. Takes the same
- form as ``ntp_client`` defined in :ref:`topics/modules:ntp`.
- - **package_mirrors**: Defines the package mirror info for apt.
- - **ssh_svcname**: The ssh service name. For most distros this will be
+ - ``ntp_client``: The default NTP client for the distro. Takes the same
+ form as ``ntp_client`` defined in :ref:`NTP<mod-ntp>`.
+ - ``package_mirrors``: Defines the package mirror info for apt.
+ - ``ssh_svcname``: The SSH service name. For most distros this will be
either ``ssh`` or ``sshd``.
- - **network**: Top-level key for distro-specific networking configuration
+ - ``network``: Top-level key for distro-specific networking configuration.
- + **renderers**: Prioritized list of networking configurations to try
+ + ``renderers``: Prioritised list of networking configurations to try
on this system. The first valid entry found will be used.
Options are:
- * **eni** - For /etc/network/interfaces
- * **network-manager**
- * **netplan**
- * **networkd** - For systemd-networkd
- * **freebsd**
- * **netbsd**
- * **openbsd**
+ * ``eni``: For :file:`/etc/network/interfaces`.
+ * ``network-manager``
+ * ``netplan``
+ * ``networkd``: For ``systemd-networkd``.
+ * ``freebsd``
+ * ``netbsd``
+ * ``openbsd``
- + **activators**: Prioritized list of networking tools to try to activate
+ + ``activators``: Prioritised list of networking tools to try to activate
network on this system. The first valid entry found will be used.
Options are:
- * **eni** - For ``ifup``/``ifdown``
- * **netplan** - For ``netplan generate``/``netplan apply``
- * **network-manager** - For ``nmcli connection load``/
- ``nmcli connection up``
- * **networkd** - For ``ip link set up``/``ip link set down``
+ * ``eni``: For ``ifup``/``ifdown``.
+ * ``netplan``: For ``netplan generate``/``netplan apply``.
+ * ``network-manager``: For ``nmcli connection load``/
+ ``nmcli connection up``.
+ * ``networkd``: For ``ip link set up``/``ip link set down``.
-Logging Keys
+Logging keys
------------
See :ref:`the logging explanation<logging>` for a comprehensive
-logging explanation. Note that cloud-init has a default logging
-definition that shouldn't need to be altered. It is defined on the
-instance at ``/etc/cloud/cloud.cfg.d/05_logging.cfg``.
+logging explanation. Note that ``cloud-init`` has a default logging
+definition that shouldn't need to be altered. It is defined in this
+instance at :file:`/etc/cloud/cloud.cfg.d/05_logging.cfg`.
The logging keys used in the base configuration are as follows:
-**logcfg**: A standard python `fileConfig`_ formatted log configuration.
+``logcfg``
+^^^^^^^^^^
+
+A standard python `fileConfig`_ formatted log configuration.
This is the primary logging configuration key and will take precedence over
-**log_cfgs** or **log_basic** keys.
+``log_cfgs`` or ``log_basic`` keys.
+
+``log_cfgs``
+^^^^^^^^^^^^
-**log_cfgs**: A list of logging configs in `fileConfig`_ format to apply
-when running cloud-init. Note that **log_cfgs** is used in
-``/etc/cloud.cfg.d/05_logging.cfg``.
+A list of logging configs in `fileConfig`_ format to apply
+when running ``cloud-init``. Note that ``log_cfgs`` is used in
+:file:`/etc/cloud.cfg.d/05_logging.cfg`.
-**log_basic**: Boolean value to determine if cloud-init should apply a
+``log_basic``
+^^^^^^^^^^^^^
+
+Boolean value to determine if ``cloud-init`` should apply a
basic default logging configuration if none has been provided. Defaults
-to ``true`` but only takes effect if **logcfg** or **log_cfgs** hasn't
+to ``true`` but only takes effect if ``logcfg`` or ``log_cfgs`` hasn't
been defined.
-**output**: If and how to redirect stdout/stderr. Defined in
-``/etc/cloud.cfg.d/05_logging.cfg`` and explained in
+``output``
+^^^^^^^^^^
+
+If and how to redirect ``stdout``/``stderr``. Defined in
+:file:`/etc/cloud.cfg.d/05_logging.cfg` and explained in
:ref:`the logging explanation<logging_command_output>`.
-**syslog_fix_perms**: Takes a list of ``<owner:group>`` strings and will set
-the owner of **def_log_file** accordingly.
+``syslog_fix_perms``
+^^^^^^^^^^^^^^^^^^^^
+
+Takes a list of ``<owner:group>`` strings and will set the owner of
+``def_log_file`` accordingly.
-**def_log_file**: Only used in conjunction with **syslog_fix_perms**.
+``def_log_file``
+^^^^^^^^^^^^^^^^
+
+Only used in conjunction with ``syslog_fix_perms``.
Specifies the filename to be used for setting permissions. Defaults
-to ``/var/log/cloud-init.log``.
+to :file:`/var/log/cloud-init.log`.
-Other Keys
+Other keys
----------
-**network**: The :ref:`network_config` to be applied to this instance.
+``network``
+^^^^^^^^^^^
+
+The :ref:`network configuration<network_config>` to be applied to this
+instance.
+
+``datasource_pkg_list``
+^^^^^^^^^^^^^^^^^^^^^^^
-**datasource_pkg_list**: Prioritized list of python packages to search when
-finding a datasource. Automatically includes ``cloudinit.sources``.
+Prioritised list of python packages to search when finding a datasource.
+Automatically includes ``cloudinit.sources``.
-**datasource_list**: Prioritized list of datasources that cloud-init will
-attempt to find on boot. By default, this will be defined in
-``/etc/cloud/cloud.cfg.d``. There are two primary use cases for modifying
-the datasource_list:
+``datasource_list``
+^^^^^^^^^^^^^^^^^^^
-1. Remove known invalid datasources. This may avoid long timeouts attempting
- to detect datasources on any system without a systemd-generator hook
- that invokes ds-identify.
+Prioritised list of datasources that ``cloud-init`` will attempt to find on
+boot. By default, this will be defined in :file:`/etc/cloud/cloud.cfg.d`. There
+are two primary use cases for modifying the ``datasource_list``:
+
+1. Remove known invalid datasources. This may avoid long timeouts when
+ attempting to detect datasources on any system without a systemd-generator
+ hook that invokes ``ds-identify``.
2. Override default datasource ordering to discover a different datasource
- type than would typically be prioritized.
+ type than would typically be prioritised.
+
+If ``datasource_list`` has only a single entry (or a single entry + ``None``),
+:ref:`cloud-init's generator script<boot-Generator>` will automatically assume
+and use this datasource without attempting detection.
-If **datasource_list** has only a single entry (or a single entry + ``None``),
-:ref:`cloud-init's generator script<topics/boot:generator>`
-will automatically assume and use this datasource without
-attempting detection.
+``vendor_data``/``vendor_data2``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-**vendor_data**/**vendor_data2**: Allows the user to disable ``vendor_data``
-or ``vendor_data2`` along with providing a prefix for any executed scripts.
+Allows the user to disable ``vendor_data`` or ``vendor_data2`` along with
+providing a prefix for any executed scripts.
Format is a dict with ``enabled`` and ``prefix`` keys:
-* **enabled**: Boolean indicating whether to enable or disable the vendor_data
-* **prefix**: A path to prepend to any vendor_data provided script
+* ``enabled``: A boolean indicating whether to enable or disable the
+ ``vendor_data``.
+* ``prefix``: A path to prepend to any ``vendor_data``-provided script.
Example
=======
-On an ubuntu system, ``/etc/cloud/cloud.cfg`` should look similar to:
+On an Ubuntu system, :file:`/etc/cloud/cloud.cfg` should look similar to:
.. code-block:: yaml
diff --git a/doc/rtd/reference/cli.rst b/doc/rtd/reference/cli.rst
new file mode 100644
index 00000000..246b9721
--- /dev/null
+++ b/doc/rtd/reference/cli.rst
@@ -0,0 +1,416 @@
+.. _cli:
+
+CLI commands
+************
+
+For the latest list of subcommands and arguments use ``cloud-init``'s
+``--help`` option. This can be used against ``cloud-init`` itself, or on any
+of its subcommands.
+
+.. code-block:: shell-session
+
+ $ cloud-init --help
+
+Example output:
+
+.. code-block::
+
+ usage: cloud-init [-h] [--version] [--file FILES] [--debug] [--force]
+ {init,modules,single,query,dhclient-hook,features,analyze,devel,collect-logs,clean,status,schema} ...
+
+ options:
+ -h, --help show this help message and exit
+ --version, -v Show program's version number and exit.
+ --file FILES, -f FILES
+ Use additional yaml configuration files.
+ --debug, -d Show additional pre-action logging (default: False).
+ --force Force running even if no datasource is found (use at your own risk).
+
+ Subcommands:
+ {init,modules,single,query,dhclient-hook,features,analyze,devel,collect-logs,clean,status,schema}
+ init Initialize cloud-init and perform initial modules.
+ modules Activate modules using a given configuration key.
+ single Run a single module.
+ query Query standardized instance metadata from the command line.
+ dhclient-hook Run the dhclient hook to record network info.
+ features List defined features.
+ analyze Devel tool: Analyze cloud-init logs and data.
+ devel Run development tools.
+ collect-logs Collect and tar all cloud-init debug info.
+ clean Remove logs and artifacts so cloud-init can re-run.
+ status Report cloud-init status or wait on completion.
+ schema Validate cloud-config files using jsonschema.
+
+
+The rest of this document will give an overview of each of the subcommands.
+
+.. _cli_analyze:
+
+:command:`analyze`
+==================
+
+Get detailed reports of where ``cloud-init`` spends its time during the boot
+process. For more complete reference see :ref:`analyze`.
+
+Possible subcommands include:
+
+* :command:`blame`: report ordered by most costly operations.
+* :command:`dump`: machine-readable JSON dump of all ``cloud-init`` tracked
+ events.
+* :command:`show`: show time-ordered report of the cost of operations during
+ each boot stage.
+* :command:`boot`: show timestamps from kernel initialisation, kernel finish
+ initialisation, and ``cloud-init`` start.
+
+.. _cli_clean:
+
+:command:`clean`
+================
+
+Remove ``cloud-init`` artifacts from :file:`/var/lib/cloud` to simulate a clean
+instance. On reboot, ``cloud-init`` will re-run all stages as it did on
+first boot.
+
+* :command:`--logs`: Optionally remove all ``cloud-init`` log files in
+ :file:`/var/log/`.
+* :command:`--reboot`: Reboot the system after removing artifacts.
+* :command:`--machine-id`: Set :file:`/etc/machine-id` to ``uninitialized\n``
+ on this image for systemd environments. On distributions without systemd,
+ remove the file. Best practice when cloning a golden image, to ensure the
+ next boot of that image auto-generates a unique machine ID.
+ `More details on machine-id`_.
+
+.. _cli_collect_logs:
+
+:command:`collect-logs`
+=======================
+
+Collect and tar ``cloud-init``-generated logs, data files, and system
+information for triage. This subcommand is integrated with apport.
+
+Logs collected include:
+
+* :file:`/var/log/cloud-init.log`
+* :file:`/var/log/cloud-init-output.log`
+* :file:`/run/cloud-init`
+* :file:`/var/lib/cloud/instance/user-data.txt`
+* ``cloud-init`` package version
+* ``dmesg`` output
+* ``journalctl`` output
+
+.. note::
+ Ubuntu users can file bugs using :command:`ubuntu-bug cloud-init` to
+ automatically attach these logs to a bug report.
+
+.. _cli_devel:
+
+:command:`devel`
+================
+
+Collection of development tools under active development. These tools will
+likely be promoted to top-level subcommands when stable.
+
+Do **NOT** rely on the output of these commands as they can and will change.
+
+Current subcommands:
+
+:command:`net-convert`
+----------------------
+
+Manually use ``cloud-init``'s network format conversion. Useful for testing
+configuration or testing changes to the network conversion logic itself.
+
+:command:`render`
+-----------------
+
+Use ``cloud-init``'s jinja template render to process **#cloud-config** or
+**custom-scripts**, injecting any variables from
+:file:`/run/cloud-init/instance-data.json`. It accepts a user data file
+containing the jinja template header ``## template: jinja`` and renders that
+content with any :file:`instance-data.json` variables present.
+
+:command:`hotplug-hook`
+-----------------------
+
+Respond to newly added system devices by retrieving updated system metadata
+and bringing up/down the corresponding device. This command is intended to be
+called via a ``systemd`` service and is not considered user-accessible except
+for debugging purposes.
+
+.. _cli_features:
+
+:command:`features`
+===================
+
+Print out each feature supported. If ``cloud-init`` does not have the
+:command:`features` subcommand, it also does not support any features
+described in this document.
+
+.. code-block:: shell-session
+
+ $ cloud-init features
+
+Example output:
+
+.. code-block::
+
+ NETWORK_CONFIG_V1
+ NETWORK_CONFIG_V2
+
+
+.. _cli_init:
+
+:command:`init`
+===============
+
+Generally run by OS init systems to execute ``cloud-init``'s stages:
+*init* and *init-local*. See :ref:`boot_stages` for more info.
+Can be run on the commandline, but is generally gated to run only once
+due to semaphores in :file:`/var/lib/cloud/instance/sem/` and
+:file:`/var/lib/cloud/sem`.
+
+* :command:`--local`: Run *init-local* stage instead of *init*.
+
+.. _cli_modules:
+
+:command:`modules`
+==================
+
+Generally run by OS init systems to execute ``modules:config`` and
+``modules:final`` boot stages. This executes cloud config :ref:`modules`
+configured to run in the Init, Config and Final stages. The modules are
+declared to run in various boot stages in the file
+:file:`/etc/cloud/cloud.cfg` under keys:
+
+* ``cloud_init_modules``
+* ``cloud_config_modules``
+* ``cloud_final_modules``
+
+Can be run on the command line, but each module is gated to run only once due
+to semaphores in :file:`/var/lib/cloud/`.
+
+* :command:`--mode [init|config|final]`: Run ``modules:init``,
+ ``modules:config`` or ``modules:final`` ``cloud-init`` stages.
+ See :ref:`boot_stages` for more info.
+
+.. _cli_query:
+
+:command:`query`
+================
+
+Query standardised cloud instance metadata crawled by ``cloud-init`` and stored
+in :file:`/run/cloud-init/instance-data.json`. This is a convenience
+command-line interface to reference any cached configuration metadata that
+``cloud-init`` crawls when booting the instance. See :ref:`instance_metadata`
+for more info.
+
+* :command:`--all`: Dump all available instance data as JSON which can be
+ queried.
+* :command:`--instance-data`: Optional path to a different
+ :file:`instance-data.json` file to source for queries.
+* :command:`--list-keys`: List available query keys from cached instance data.
+* :command:`--format`: A string that will use jinja-template syntax to render a
+ string replacing.
+* :command:`<varname>`: A dot-delimited variable path into the
+ :file:`instance-data.json` object.
+
+Below demonstrates how to list all top-level query keys that are standardised
+aliases:
+
+.. code-block:: shell-session
+
+ $ cloud-init query --list-keys
+
+Example output:
+
+.. code-block::
+
+ _beta_keys
+ availability_zone
+ base64_encoded_keys
+ cloud_name
+ ds
+ instance_id
+ local_hostname
+ platform
+ public_ssh_keys
+ region
+ sensitive_keys
+ subplatform
+ userdata
+ v1
+ vendordata
+
+Here are a few examples of how to query standardised metadata from clouds:
+
+.. code-block:: shell-session
+
+ $ cloud-init query v1.cloud_name
+
+Example output:
+
+.. code-block::
+
+ aws # or openstack, azure, gce etc.
+
+Any standardised ``instance-data`` under a <v#> key is aliased as a top-level
+key for convenience:
+
+.. code-block:: shell-session
+
+ $ cloud-init query cloud_name
+
+Example output:
+
+.. code-block::
+
+ aws # or openstack, azure, gce etc.
+
+One can also query datasource-specific metadata on EC2, e.g.:
+
+.. code-block:: shell-session
+
+ $ cloud-init query ds.meta_data.public_ipv4
+
+
+.. note::
+
+ The standardised instance data keys under **v#** are guaranteed not to
+ change behaviour or format. If using top-level convenience aliases for any
+ standardised instance data keys, the most value (highest **v#**) of that key
+ name is what is reported as the top-level value. So these aliases act as a
+ 'latest'.
+
+This data can then be formatted to generate custom strings or data. For
+example, we can generate a custom hostname FQDN based on ``instance-id``, cloud
+and region:
+
+.. code-block:: shell-session
+
+ $ cloud-init query --format 'custom-{{instance_id}}.{{region}}.{{v1.cloud_name}}.com'
+
+.. code-block::
+
+ custom-i-0e91f69987f37ec74.us-east-2.aws.com
+
+
+.. _cli_schema:
+
+:command:`schema`
+=================
+
+Validate cloud-config files using jsonschema.
+
+* :command:`-h, --help`: Show this help message and exit.
+* :command:`-c CONFIG_FILE, --config-file CONFIG_FILE`: Path of the
+ cloud-config YAML file to validate.
+* :command:`--system`: Validate the system cloud-config user data.
+* :command:`-d DOCS [cc_module ...], --docs DOCS [cc_module ...]`:
+ Print schema module
+ docs. Choices are: "all" or "space-delimited" ``cc_names``.
+* :command:`--annotate`: Annotate existing cloud-config file with errors.
+
+The following example checks a config file and annotates the config file with
+errors on :file:`stdout`.
+
+.. code-block:: shell-session
+
+ $ cloud-init schema -c ./config.yml --annotate
+
+
+.. _cli_single:
+
+:command:`single`
+=================
+
+Attempt to run a single, named, cloud config module.
+
+* :command:`--name`: The cloud-config module name to run.
+* :command:`--frequency`: Module frequency for this run.
+ One of (``always``|``once-per-instance``|``once``).
+* :command:`--report`: Enable reporting.
+
+The following example re-runs the ``cc_set_hostname`` module ignoring the
+module default frequency of ``once-per-instance``:
+
+.. code-block:: shell-session
+
+ $ cloud-init single --name set_hostname --frequency always
+
+.. note::
+
+ Mileage may vary trying to re-run each ``cloud-config`` module, as
+ some are not idempotent.
+
+.. _cli_status:
+
+:command:`status`
+=================
+
+Report whether ``cloud-init`` is running, done, disabled or errored. Exits
+non-zero if an error is detected in ``cloud-init``.
+
+* :command:`--long`: Detailed status information.
+* :command:`--wait`: Block until ``cloud-init`` completes.
+* :command:`--format [yaml|json|tabular]`: Machine-readable JSON or YAML
+ detailed output.
+
+The :command:`status` command can be used simply as follows:
+
+.. code-block:: shell-session
+
+ $ cloud-init status
+
+Which shows whether ``cloud-init`` is currently running, done, disabled, or in
+error, as in this example output:
+
+.. code-block::
+
+ status: running
+
+The :command:`--long` option, shown below, provides a more verbose output.
+
+.. code-block:: shell-session
+
+ $ cloud-init status --long
+
+Example output when ``cloud-init`` is running:
+
+.. code-block::
+
+ status: running
+ time: Fri, 26 Jan 2018 21:39:43 +0000
+ detail:
+ Running in stage: init-local
+
+Example output when ``cloud-init`` is done:
+
+.. code-block::
+
+ status: done
+ boot_status_code: enabled-by-generator
+ last_update: Tue, 16 Aug 2022 19:12:58 +0000
+ detail:
+ DataSourceNoCloud [seed=/var/lib/cloud/seed/nocloud-net][dsmode=net]
+
+The detailed output can be shown in machine-readable JSON or YAML with the
+:command:`format` option, for example:
+
+.. code-block:: shell-session
+
+ $ cloud-init status --format=json
+
+Which would produce the following example output:
+
+.. code-block::
+
+ {
+ "boot_status_code": "enabled-by-generator",
+ "datasource": "nocloud",
+ "detail": "DataSourceNoCloud [seed=/var/lib/cloud/seed/nocloud-net][dsmode=net]",
+ "errors": [],
+ "last_update": "Tue, 16 Aug 2022 19:12:58 +0000",
+ "status": "done"
+ }
+
+.. _More details on machine-id: https://www.freedesktop.org/software/systemd/man/machine-id.html
diff --git a/doc/rtd/reference/datasources.rst b/doc/rtd/reference/datasources.rst
new file mode 100644
index 00000000..6db0053a
--- /dev/null
+++ b/doc/rtd/reference/datasources.rst
@@ -0,0 +1,134 @@
+.. _datasources:
+
+Datasources
+***********
+
+Datasources are sources of configuration data for ``cloud-init`` that typically
+come from the user (i.e., user data) or come from the cloud that created the
+configuration drive (i.e., metadata). Typical user data includes files,
+YAML, and shell scripts whereas typical metadata includes server name,
+instance id, display name, and other cloud specific details.
+
+Since there are multiple ways to provide this data (each cloud solution seems
+to prefer its own way), a datasource abstract class was internally created to
+allow for a single way to access the different cloud systems methods, providing
+this data through the typical usage of subclasses.
+
+Any metadata processed by ``cloud-init``'s datasources is persisted as
+:file:`/run/cloud-init/instance-data.json`. ``Cloud-init`` provides tooling to
+quickly introspect some of that data. See :ref:`instance_metadata` for more
+information.
+
+Known sources
+=============
+
+The following is a list of documents for each supported datasource:
+
+.. toctree::
+ :titlesonly:
+
+ datasources/aliyun.rst
+ datasources/altcloud.rst
+ datasources/ec2.rst
+ datasources/azure.rst
+ datasources/cloudsigma.rst
+ datasources/cloudstack.rst
+ datasources/configdrive.rst
+ datasources/digitalocean.rst
+ datasources/e24cloud.rst
+ datasources/exoscale.rst
+ datasources/fallback.rst
+ datasources/gce.rst
+ datasources/lxd.rst
+ datasources/maas.rst
+ datasources/nocloud.rst
+ datasources/nwcs.rst
+ datasources/opennebula.rst
+ datasources/openstack.rst
+ datasources/oracle.rst
+ datasources/ovf.rst
+ datasources/rbxcloud.rst
+ datasources/smartos.rst
+ datasources/upcloud.rst
+ datasources/vmware.rst
+ datasources/vultr.rst
+ datasources/zstack.rst
+
+Datasource creation
+===================
+
+The datasource objects have a few touch points with ``cloud-init``. If you
+are interested in adding a new datasource for your cloud platform you will
+need to take care of the following items:
+
+Identify a mechanism for positive identification of the platform
+----------------------------------------------------------------
+
+It is good practice for a cloud platform to positively identify itself to
+the guest. This allows the guest to make educated decisions based on the
+platform on which it is running. On the x86 and arm64 architectures, many
+clouds identify themselves through DMI data. For example, Oracle's public
+cloud provides the string ``'OracleCloud.com'`` in the DMI chassis-asset
+field.
+
+``Cloud-init``-enabled images produce a log file with details about the
+platform. Reading through this log in :file:`/run/cloud-init/ds-identify.log`
+may provide the information needed to uniquely identify the platform.
+If the log is not present, you can generate it by running from source
+:file:`./tools/ds-identify` or the installed location
+:file:`/usr/lib/cloud-init/ds-identify`.
+
+The mechanism used to identify the platform will be required for the
+``ds-identify`` and datasource module sections below.
+
+Add datasource module :file:`cloudinit/sources/DataSource<CloudPlatform>.py`
+----------------------------------------------------------------------------
+
+It is suggested that you start by copying one of the simpler datasources
+such as ``DataSourceHetzner``.
+
+Add tests for datasource module
+-------------------------------
+
+Add a new file with some tests for the module to
+:file:`cloudinit/sources/test_<yourplatform>.py`. For example, see
+:file:`cloudinit/sources/tests/test_oracle.py`
+
+Update ``ds-identify``
+----------------------
+
+In ``systemd`` systems, ``ds-identify`` is used to detect which datasource
+should be enabled, or if ``cloud-init`` should run at all. You'll need to
+make changes to :file:`tools/ds-identify`.
+
+Add tests for ``ds-identify``
+-----------------------------
+
+Add relevant tests in a new class to
+:file:`tests/unittests/test_ds_identify.py`. You can use ``TestOracle`` as
+an example.
+
+Add your datasource name to the built-in list of datasources
+------------------------------------------------------------
+
+Add your datasource module name to the end of the ``datasource_list``
+entry in :file:`cloudinit/settings.py`.
+
+Add your cloud platform to apport collection prompts
+----------------------------------------------------
+
+Update the list of cloud platforms in :file:`cloudinit/apport.py`. This list
+will be provided to the user who invokes :command:`ubuntu-bug cloud-init`.
+
+Enable datasource by default in Ubuntu packaging branches
+---------------------------------------------------------
+
+Ubuntu packaging branches contain a template file,
+:file:`debian/cloud-init.templates`, which ultimately sets the default
+``datasource_list`` when installed via package. This file needs updating when
+the commit gets into a package.
+
+Add documentation for your datasource
+-------------------------------------
+
+You should add a new file in :file:`doc/datasources/<cloudplatform>.rst`.
diff --git a/doc/rtd/reference/datasources/aliyun.rst b/doc/rtd/reference/datasources/aliyun.rst
new file mode 100644
index 00000000..5121f53d
--- /dev/null
+++ b/doc/rtd/reference/datasources/aliyun.rst
@@ -0,0 +1,111 @@
+.. _datasource_aliyun:
+
+Alibaba Cloud (AliYun)
+**********************
+
+The ``AliYun`` datasource reads data from Alibaba Cloud ECS. Support is
+present in ``cloud-init`` since 0.7.9.
+
+Metadata service
+================
+
+The Alibaba Cloud metadata service is available at the well known URL
+:file:`http://100.100.100.200/`. For more information see Alibaba Cloud ECS
+on `metadata`_.
+
+Configuration
+=============
+
+The following configuration can be set for the datasource in system
+configuration (in :file:`/etc/cloud/cloud.cfg` or
+:file:`/etc/cloud/cloud.cfg.d/`).
+
+An example configuration with the default values is provided below:
+
+.. code-block:: yaml
+
+ datasource:
+ AliYun:
+ metadata_urls: ["http://100.100.100.200"]
+ timeout: 50
+ max_wait: 120
+
+Versions
+--------
+
+Like the EC2 metadata service, Alibaba Cloud's metadata service provides
+versioned data under specific paths. As of April 2018, there are only
+``2016-01-01`` and ``latest`` versions.
+
+It is expected that the dated version will maintain a stable interface but
+``latest`` may change content at a future date.
+
+``Cloud-init`` uses the ``2016-01-01`` version.
+
+You can list the versions available to your instance with:
+
+.. code-block:: shell-session
+
+ $ curl http://100.100.100.200/
+
+Example output:
+
+.. code-block::
+
+ 2016-01-01
+ latest
+
+Metadata
+--------
+
+Instance metadata can be queried at
+:file:`http://100.100.100.200/2016-01-01/meta-data`:
+
+.. code-block:: shell-session
+
+ $ curl http://100.100.100.200/2016-01-01/meta-data
+
+Example output:
+
+.. code-block::
+
+ dns-conf/
+ eipv4
+ hostname
+ image-id
+ instance-id
+ instance/
+ mac
+ network-type
+ network/
+ ntp-conf/
+ owner-account-id
+ private-ipv4
+ public-keys/
+ region-id
+ serial-number
+ source-address
+ sub-private-ipv4-list
+ vpc-cidr-block
+ vpc-id
+
+Userdata
+--------
+
+If provided, user data will appear at
+:file:`http://100.100.100.200/2016-01-01/user-data`.
+If no user data is provided, this will return a 404.
+
+.. code-block:: shell-session
+
+ $ curl http://100.100.100.200/2016-01-01/user-data
+
+Example output:
+
+.. code-block::
+
+ #!/bin/sh
+ echo "Hello World."
+
+.. LINKS
+.. _metadata: https://www.alibabacloud.com/help/zh/faq-detail/49122.htm
diff --git a/doc/rtd/reference/datasources/altcloud.rst b/doc/rtd/reference/datasources/altcloud.rst
new file mode 100644
index 00000000..19233404
--- /dev/null
+++ b/doc/rtd/reference/datasources/altcloud.rst
@@ -0,0 +1,89 @@
+.. _datasource_alt_cloud:
+
+AltCloud
+*********
+
+The datasource AltCloud will be used to pick up user data on `RHEVm`_ and
+`vSphere`_.
+
+RHEVm
+=====
+
+For `RHEVm`_ v3.0 the user data is injected into the VM using floppy
+injection via the `RHEVm`_ dashboard "Custom Properties".
+
+The format of the "Custom Properties" entry must be: ::
+
+ floppyinject=user-data.txt:<base64 encoded data>
+
+For example, to pass a simple bash script:
+
+.. code-block:: sh
+
+ $ cat simple_script.bash
+ #!/bin/bash
+ echo "Hello Joe!" >> /tmp/JJV_Joe_out.txt
+
+ $ base64 < simple_script.bash
+ IyEvYmluL2Jhc2gKZWNobyAiSGVsbG8gSm9lISIgPj4gL3RtcC9KSlZfSm9lX291dC50eHQK
+
+To pass this example script to ``cloud-init`` running in a `RHEVm`_ v3.0 VM
+set the "Custom Properties" when creating the RHEMv v3.0 VM to: ::
+
+ floppyinject=user-data.txt:IyEvYmluL2Jhc2gKZWNobyAiSGVsbG8gSm9lISIgPj4gL3RtcC9KSlZfSm9lX291dC50eHQK
+
+.. note::
+ The prefix with file name must be: ``floppyinject=user-data.txt:``
+
+It is also possible to launch a `RHEVm`_ v3.0 VM and pass optional user
+data to it using the `Delta Cloud`_.
+
+vSphere
+=======
+
+For VMWare's `vSphere`_ the user data is injected into the VM as an ISO
+via the CD-ROM. This can be done using the `vSphere`_ dashboard
+by connecting an ISO image to the CD/DVD drive.
+
+To pass this example script to ``cloud-init`` running in a `vSphere`_ VM
+set the CD/DVD drive when creating the vSphere VM to point to an
+ISO on the data store.
+
+.. note::
+ The ISO must contain the user data.
+
+For example, to pass the same ``simple_script.bash`` to vSphere:
+
+Create the ISO
+--------------
+
+.. code-block:: sh
+
+ $ mkdir my-iso
+
+.. note::
+ The file name on the ISO must be: ``user-data.txt``
+
+.. code-block:: sh
+
+ $ cp simple_script.bash my-iso/user-data.txt
+ $ genisoimage -o user-data.iso -r my-iso
+
+Verify the ISO
+--------------
+
+.. code-block:: sh
+
+ $ sudo mkdir /media/vsphere_iso
+ $ sudo mount -o loop user-data.iso /media/vsphere_iso
+ $ cat /media/vsphere_iso/user-data.txt
+ $ sudo umount /media/vsphere_iso
+
+Then, launch the `vSphere`_ VM the ISO ``user-data.iso`` attached as a CD-ROM.
+
+It is also possible to launch a `vSphere`_ VM and pass optional user
+data to it using the Delta Cloud.
+
+.. _RHEVm: https://www.redhat.com/virtualization/rhev/desktop/rhevm/
+.. _vSphere: https://www.vmware.com/products/datacenter-virtualization/vsphere/overview.html
+.. _Delta Cloud: http://deltacloud.apache.org
diff --git a/doc/rtd/reference/datasources/azure.rst b/doc/rtd/reference/datasources/azure.rst
new file mode 100644
index 00000000..5f8f3b84
--- /dev/null
+++ b/doc/rtd/reference/datasources/azure.rst
@@ -0,0 +1,133 @@
+.. _datasource_azure:
+
+Azure
+*****
+
+This datasource finds metadata and user data from the Azure cloud platform.
+
+The Azure cloud platform provides initial data to an instance via an attached
+CD formatted in UDF. This CD contains a :file:`ovf-env.xml` file that
+provides some information. Additional information is obtained via interaction
+with the "endpoint".
+
+IMDS
+====
+
+Azure provides the `instance metadata service (IMDS)`_, which is a REST service
+on ``169.254.169.254`` providing additional configuration information to the
+instance. ``Cloud-init`` uses the IMDS for:
+
+- Network configuration for the instance which is applied per boot.
+- A pre-provisioning gate which blocks instance configuration until Azure
+ fabric is ready to provision.
+- Retrieving SSH public keys. ``Cloud-init`` will first try to utilise SSH
+ keys returned from IMDS, and if they are not provided from IMDS then it will
+ fall back to using the OVF file provided from the CD-ROM. There is a large
+ performance benefit to using IMDS for SSH key retrieval, but in order to
+ support environments where IMDS is not available then we must continue to
+ all for keys from OVF[?]
+
+Configuration
+=============
+
+The following configuration can be set for the datasource in system
+configuration (in :file:`/etc/cloud/cloud.cfg` or
+:file:`/etc/cloud/cloud.cfg.d/`).
+
+The settings that may be configured are:
+
+* :command:`apply_network_config`
+
+ Boolean set to True to use network configuration described by Azure's IMDS
+ endpoint instead of fallback network config of DHCP on eth0. Default is
+ True.
+* :command:`data_dir`
+
+ Path used to read metadata files and write crawled data.
+
+* :command:`disk_aliases`
+
+ A dictionary defining which device paths should be interpreted as ephemeral
+ images. See :ref:`cc_disk_setup <mod-disk_setup>` module for more info.
+
+Configuration for the datasource can also be read from a ``dscfg`` entry in
+the ``LinuxProvisioningConfigurationSet``. Content in ``dscfg`` node is
+expected to be base64 encoded YAML content, and it will be merged into the
+``'datasource: Azure'`` entry.
+
+An example configuration with the default values is provided below:
+
+.. code-block:: yaml
+
+ datasource:
+ Azure:
+ apply_network_config: true
+ data_dir: /var/lib/waagent
+ disk_aliases:
+ ephemeral0: /dev/disk/cloud/azure_resource
+
+
+User data
+=========
+
+User data is provided to ``cloud-init`` inside the :file:`ovf-env.xml` file.
+``Cloud-init`` expects that user data will be provided as a base64 encoded
+value inside the text child of an element named ``UserData`` or
+``CustomData``, which is a direct child of the
+``LinuxProvisioningConfigurationSet`` (a sibling to ``UserName``).
+
+If both ``UserData`` and ``CustomData`` are provided, the behaviour is
+undefined on which will be selected. In the example below, user data provided
+is ``'this is my userdata'``.
+
+Example:
+
+.. code-block:: xml
+
+ <wa:ProvisioningSection>
+ <wa:Version>1.0</wa:Version>
+ <LinuxProvisioningConfigurationSet
+ xmlns="http://schemas.microsoft.com/windowsazure"
+ xmlns:i="http://www.w3.org/2001/XMLSchema-instance">
+ <ConfigurationSetType>LinuxProvisioningConfiguration</ConfigurationSetType>
+ <HostName>myHost</HostName>
+ <UserName>myuser</UserName>
+ <UserPassword/>
+ <CustomData>dGhpcyBpcyBteSB1c2VyZGF0YQ===</CustomData>
+ <dscfg>eyJhZ2VudF9jb21tYW5kIjogWyJzdGFydCIsICJ3YWxpbnV4YWdlbnQiXX0=</dscfg>
+ <DisableSshPasswordAuthentication>true</DisableSshPasswordAuthentication>
+ <SSH>
+ <PublicKeys>
+ <PublicKey>
+ <Fingerprint>6BE7A7C3C8A8F4B123CCA5D0C2F1BE4CA7B63ED7</Fingerprint>
+ <Path>this-value-unused</Path>
+ </PublicKey>
+ </PublicKeys>
+ </SSH>
+ </LinuxProvisioningConfigurationSet>
+ </wa:ProvisioningSection>
+
+HostName
+========
+
+When the user launches an instance, they provide a hostname for that instance.
+The hostname is provided to the instance in the :file:`ovf-env.xml` file as
+``HostName``.
+
+Whatever value the instance provides in its DHCP request will resolve in the
+domain returned in the 'search' request.
+
+A generic image will already have a hostname configured. The Ubuntu cloud
+images have ``ubuntu`` as the hostname of the system, and the initial DHCP
+request on eth0 is not guaranteed to occur after the datasource code has been
+run. So, on first boot, that initial value will be sent in the DHCP request
+and *that* value will resolve.
+
+In order to make the ``HostName`` provided in the :file:`ovf-env.xml`
+resolve, a DHCP request must be made with the new value. ``Cloud-init``
+handles this by setting the hostname in the datasource's ``get_data`` method
+via :command:`hostname $HostName`, and then bouncing the interface. This
+behaviour can be configured or disabled in the datasource config. See
+'Configuration' above.
+
+.. _instance metadata service (IMDS): https://docs.microsoft.com/en-us/azure/virtual-machines/windows/instance-metadata-service
diff --git a/doc/rtd/reference/datasources/cloudsigma.rst b/doc/rtd/reference/datasources/cloudsigma.rst
new file mode 100644
index 00000000..50f255ef
--- /dev/null
+++ b/doc/rtd/reference/datasources/cloudsigma.rst
@@ -0,0 +1,38 @@
+.. _datasource_cloudsigma:
+
+CloudSigma
+**********
+
+This datasource finds metadata and user data from the `CloudSigma`_ cloud
+platform. Data transfer occurs through a virtual serial port of the
+`CloudSigma`_'s VM, and the presence of a network adapter is **NOT** a
+requirement. See `server context`_ in their public documentation for more
+information.
+
+Setting a hostname
+==================
+
+By default, the name of the server will be applied as a hostname on the first
+boot.
+
+Providing user data
+-------------------
+
+You can provide user data to the VM using the dedicated `meta field`_ in the
+`server context`_ ``cloudinit-user-data``. By default, *cloud-config* format
+is expected there, and the ``#cloud-config`` header can be omitted. However,
+since this is a raw-text field you could provide any of the valid :ref:`config
+formats<user_data_formats>`.
+
+You have the option to encode your user data using Base64. In order to do that
+you have to add the ``cloudinit-user-data`` field to the ``base64_fields``.
+The latter is a comma-separated field with all the meta fields having
+Base64-encoded values.
+
+If your user data does not need an internet connection you can create a
+`meta field`_ in the `server context`_ ``cloudinit-dsmode`` and set "local" as
+the value. If this field does not exist, the default value is "net".
+
+.. _CloudSigma: http://cloudsigma.com/
+.. _server context: http://cloudsigma-docs.readthedocs.org/en/latest/server_context.html
+.. _meta field: http://cloudsigma-docs.readthedocs.org/en/latest/meta.html
diff --git a/doc/rtd/reference/datasources/cloudstack.rst b/doc/rtd/reference/datasources/cloudstack.rst
new file mode 100644
index 00000000..2cf439e1
--- /dev/null
+++ b/doc/rtd/reference/datasources/cloudstack.rst
@@ -0,0 +1,66 @@
+.. _datasource_cloudstack:
+
+CloudStack
+**********
+
+`Apache CloudStack`_ exposes user data, metadata, user password, and account
+SSH key through the ``virtual router``. The datasource obtains the ``virtual
+router`` address via DHCP lease information given to the instance.
+For more details on metadata and user data, refer to the
+`CloudStack Administrator Guide`_.
+
+The following URLs provide to access user data and metadata from the Virtual
+Machine. ``data-server.`` is a well-known hostname provided by the CloudStack
+``virtual router`` that points to the next ``UserData`` server (which is
+usually also the ``virtual router``).
+
+.. code-block:: bash
+
+ http://data-server./latest/user-data
+ http://data-server./latest/meta-data
+ http://data-server./latest/meta-data/{metadata type}
+
+If ``data-server.`` cannot be resolved, ``cloud-init`` will try to obtain the
+``virtual router``'s address from the system's DHCP leases. If that fails,
+it will use the system's default gateway.
+
+Configuration
+=============
+
+The following configuration can be set for the datasource in system
+configuration (in :file:`/etc/cloud/cloud.cfg` or
+:file:`/etc/cloud/cloud.cfg.d/`).
+
+The settings that may be configured are:
+
+* :command:`max_wait`
+
+ The maximum amount of clock time in seconds that should be spent searching
+ ``metadata_urls``. A value less than zero will result in only one request
+ being made, to the first in the list.
+
+ Default: 120
+
+* :command:`timeout`
+
+ The timeout value provided to ``urlopen`` for each individual http request.
+ This is used both when selecting a ``metadata_url`` and when crawling
+ the metadata service.
+
+ Default: 50
+
+Example
+-------
+
+An example configuration with the default values is provided below:
+
+.. code-block:: yaml
+
+ datasource:
+ CloudStack:
+ max_wait: 120
+ timeout: 50
+
+
+.. _Apache CloudStack: http://cloudstack.apache.org/
+.. _CloudStack Administrator Guide: http://docs.cloudstack.apache.org/projects/cloudstack-administration/en/latest/virtual_machines.html#user-data-and-meta-data
diff --git a/doc/rtd/reference/datasources/configdrive.rst b/doc/rtd/reference/datasources/configdrive.rst
new file mode 100644
index 00000000..17913517
--- /dev/null
+++ b/doc/rtd/reference/datasources/configdrive.rst
@@ -0,0 +1,141 @@
+.. _datasource_config_drive:
+
+Config drive
+************
+
+The configuration drive datasource supports the `OpenStack`_ configuration
+drive disk.
+
+By default, ``cloud-init`` *always* considers this source to be a
+fully-fledged datasource. Instead, the typical behavior is to assume it is
+really only present to provide networking information. ``Cloud-init`` will
+copy the network information, apply it to the system, and then continue on.
+The "full" datasource could then be found in the EC2 metadata service. If
+this is not the case then the files contained on the located drive must
+provide equivalents to what the EC2 metadata service would provide (which is
+typical of the version 2 support listed below).
+
+.. note::
+ See `the config drive extension`_ and `metadata introduction`_ in the
+ public documentation for more information.
+
+.. dropdown:: Version 1 (deprecated)
+
+ **Note: Version 1 is legacy and should be considered deprecated.
+ Version 2 has been supported in OpenStack since 2012.2 (Folsom).**
+
+ The following criteria are required to use a config drive:
+
+ 1. Must be formatted with `vfat`_ filesystem.
+ 2. Must contain *one* of the following files: ::
+
+ /etc/network/interfaces
+ /root/.ssh/authorized_keys
+ /meta.js
+
+ ``/etc/network/interfaces``
+
+ This file is laid down by nova in order to pass static networking
+ information to the guest. ``Cloud-init`` will copy it off of the
+ config-drive and into /etc/network/interfaces (or convert it to RH
+ format) as soon as it can, and then attempt to bring up all network
+ interfaces.
+
+ ``/root/.ssh/authorized_keys``
+
+ This file is laid down by nova, and contains the ssk keys that were
+ provided to nova on instance creation (nova-boot --key ....)
+
+ ``/meta.js``
+
+ meta.js is populated on the config-drive in response to the user
+ passing "meta flags" (nova boot --meta key=value ...). It is
+ expected to be json formatted.
+
+
+Version 2
+=========
+
+The following criteria are required to use a config drive:
+
+1. Must be formatted with `vfat`_ or `iso9660`_ filesystem, or have a
+ *filesystem* label of ``config-2`` or ``CONFIG-2``.
+2. The files that will typically be present in the config drive are: ::
+
+ openstack/
+ - 2012-08-10/ or latest/
+ - meta_data.json
+ - user_data (not mandatory)
+ - content/
+ - 0000 (referenced content files)
+ - 0001
+ - ....
+ ec2
+ - latest/
+ - meta-data.json (not mandatory)
+
+Keys and values
+===============
+
+``Cloud-init``'s behaviour can be modified by keys found in the
+:file:`meta.js` (version 1 only) file in the following ways.
+
+``ds-mode``
+-----------
+
+::
+
+ dsmode:
+ values: local, net, pass
+ default: pass
+
+This is what indicates if config drive is a final datasource or not. By
+default it is 'pass', meaning this datasource should not be read. Set it to
+'local' or 'net' to stop ``cloud-init`` from continuing to search for other
+datasources after network config.
+
+The difference between 'local' and 'net' is that local will not require
+networking to be up before user-data actions (or boothooks) are run.
+
+``instance-id``
+---------------
+
+::
+
+ instance-id:
+ default: iid-dsconfigdrive
+
+This is utilised as the metadata's instance-id. It should generally
+be unique, as it is what is used to determine "is this a new instance?".
+
+``public-keys``
+---------------
+
+::
+
+ public-keys:
+ default: None
+
+If present, these keys will be used as the public keys for the
+instance. This value overrides the content in ``authorized_keys``.
+
+.. note::
+ It is likely preferable to provide keys via user data.
+
+``user-data``
+-------------
+
+::
+
+ user-data:
+ default: None
+
+This provides ``cloud-init`` user data. See :ref:`examples <yaml_examples>`
+for details of what needs to be present here.
+
+.. _OpenStack: http://www.openstack.org/
+.. _metadata introduction: https://docs.openstack.org/nova/latest/user/metadata.html#config-drives
+.. _python-novaclient: https://github.com/openstack/python-novaclient
+.. _iso9660: https://en.wikipedia.org/wiki/ISO_9660
+.. _vfat: https://en.wikipedia.org/wiki/File_Allocation_Table
+.. _the config drive extension: https://docs.openstack.org/nova/latest/admin/config-drive.html
diff --git a/doc/rtd/reference/datasources/digitalocean.rst b/doc/rtd/reference/datasources/digitalocean.rst
new file mode 100644
index 00000000..7283aac1
--- /dev/null
+++ b/doc/rtd/reference/datasources/digitalocean.rst
@@ -0,0 +1,32 @@
+.. _datasource_digital_ocean:
+
+DigitalOcean
+************
+
+The `DigitalOcean`_ datasource consumes the content served from DigitalOcean's
+metadata service. This metadata service serves information about the
+running droplet via http over the link local address ``169.254.169.254``. The
+metadata API endpoints are fully described in the DigitalOcean
+`metadata documentation`_.
+
+Configuration
+=============
+
+DigitalOcean's datasource can be configured as follows: ::
+
+ datasource:
+ DigitalOcean:
+ retries: 3
+ timeout: 2
+
+* ``retries``
+
+ Specifies the number of times to attempt connection to the metadata service.
+
+* ``timeout``
+
+ Specifies the timeout (in seconds) to wait for a response from the
+ metadata service.
+
+.. _DigitalOcean: http://digitalocean.com/
+.. _metadata documentation: https://developers.digitalocean.com/metadata/
diff --git a/doc/rtd/reference/datasources/e24cloud.rst b/doc/rtd/reference/datasources/e24cloud.rst
new file mode 100644
index 00000000..e2c125db
--- /dev/null
+++ b/doc/rtd/reference/datasources/e24cloud.rst
@@ -0,0 +1,10 @@
+.. _datasource_e24cloud:
+
+E24Cloud
+********
+
+`E24Cloud`_ platform provides an AWS EC2 metadata service clone. It identifies
+itself to guests using the DMI system-manufacturer
+(:file:`/sys/class/dmi/id/sys_vendor`).
+
+.. _E24Cloud: https://www.e24cloud.com/en/
diff --git a/doc/rtd/reference/datasources/ec2.rst b/doc/rtd/reference/datasources/ec2.rst
new file mode 100644
index 00000000..7e2b522a
--- /dev/null
+++ b/doc/rtd/reference/datasources/ec2.rst
@@ -0,0 +1,153 @@
+.. _datasource_ec2:
+
+Amazon EC2
+**********
+
+The EC2 datasource is the oldest and most widely used datasource that
+``cloud-init`` supports. This datasource interacts with a *magic* IP provided
+to the instance by the cloud provider (typically this IP is
+``169.254.169.254``). At this IP a http server is provided to the
+instance so that the instance can make calls to get instance user data and
+instance metadata.
+
+Metadata is accessible via the following URL: ::
+
+ GET http://169.254.169.254/2009-04-04/meta-data/
+ ami-id
+ ami-launch-index
+ ami-manifest-path
+ block-device-mapping/
+ hostname
+ instance-id
+ instance-type
+ local-hostname
+ local-ipv4
+ placement/
+ public-hostname
+ public-ipv4
+ public-keys/
+ reservation-id
+ security-groups
+
+User data is accessible via the following URL: ::
+
+ GET http://169.254.169.254/2009-04-04/user-data
+ 1234,fred,reboot,true | 4512,jimbo, | 173,,,
+
+Note that there are multiple EC2 Metadata versions of this data provided
+to instances. ``Cloud-init`` attempts to use the most recent API version it
+supports in order to get the latest API features and instance-data. If a given
+API version is not exposed to the instance, those API features will be
+unavailable to the instance.
+
++----------------+----------------------------------------------------------+
++ EC2 version | supported instance-data/feature |
++================+==========================================================+
++ **2021-03-23** | Required for Instance tag support. This feature must be |
+| | enabled individually on each instance. See the |
+| | `EC2 tags user guide`_. |
++----------------+----------------------------------------------------------+
+| **2016-09-02** | Required for secondary IP address support. |
++----------------+----------------------------------------------------------+
+| **2009-04-04** | Minimum supports EC2 API version for metadata and |
+| | user data. |
++----------------+----------------------------------------------------------+
+
+To see which versions are supported by your cloud provider use the following
+URL: ::
+
+ GET http://169.254.169.254/
+ 1.0
+ 2007-01-19
+ 2007-03-01
+ 2007-08-29
+ 2007-10-10
+ 2007-12-15
+ 2008-02-01
+ 2008-09-01
+ 2009-04-04
+ ...
+ latest
+
+
+Configuration settings
+======================
+
+The following configuration can be set for the datasource in system
+configuration (in :file:`/etc/cloud/cloud.cfg` or
+:file:`/etc/cloud/cloud.cfg.d/`).
+
+The settings that may be configured are:
+
+``metadata_urls``
+-----------------
+
+This list of URLs will be searched for an EC2 metadata service. The first
+entry that successfully returns a 200 response for
+``<url>/<version>/meta-data/instance-id`` will be selected.
+
+Default: ['http://169.254.169.254', 'http://[fd00:ec2::254]',
+'http://instance-data:8773'].
+
+``max_wait``
+------------
+
+The maximum amount of clock time in seconds that should be spent searching
+``metadata_urls``. A value less than zero will result in only one request
+being made, to the first in the list.
+
+Default: 120
+
+``timeout``
+-----------
+
+The timeout value provided to ``urlopen`` for each individual http request.
+This is used both when selecting a ``metadata_url`` and when crawling the
+metadata service.
+
+Default: 50
+
+``apply_full_imds_network_config``
+----------------------------------
+
+Boolean (default: True) to allow ``cloud-init`` to configure any secondary
+NICs and secondary IPs described by the metadata service. All network
+interfaces are configured with DHCP (v4) to obtain a primary IPv4 address and
+route. Interfaces which have a non-empty ``ipv6s`` list will also enable
+DHCPv6 to obtain a primary IPv6 address and route. The DHCP response (v4 and
+v6) return an IP that matches the first element of ``local-ipv4s`` and
+``ipv6s`` lists respectively. All additional values (secondary addresses) in
+the static IP lists will be added to the interface.
+
+An example configuration with the default values is provided below:
+
+.. code-block:: yaml
+
+ datasource:
+ Ec2:
+ metadata_urls: ["http://169.254.169.254:80", "http://instance-data:8773"]
+ max_wait: 120
+ timeout: 50
+ apply_full_imds_network_config: true
+
+Notes
+=====
+
+ * There are 2 types of EC2 instances, network-wise: Virtual Private
+ Cloud (VPC) ones and Classic ones (also known as non-VPC). One major
+ difference between them is that Classic instances have their MAC address
+ changed on stop/restart operations, so ``cloud-init`` will recreate the
+ network config file for EC2 Classic instances every boot. On VPC instances
+ this file is generated only on the first boot of the instance.
+ The check for the instance type is performed by ``is_classic_instance()``
+ method.
+
+ * For EC2 instances with multiple network interfaces (NICs) attached, DHCP4
+ will be enabled to obtain the primary private IPv4 address of those NICs.
+ Wherever DHCP4 or DHCP6 is enabled for a NIC, a DHCP route-metric will be
+ added with the value of ``<device-number + 1> * 100`` to ensure DHCP
+ routes on the primary NIC are preferred to any secondary NICs.
+ For example: the primary NIC will have a DHCP route-metric of 100,
+ the next NIC will have 200.
+
+.. _EC2 tags user guide: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html#work-with-tags-in-IMDS
diff --git a/doc/rtd/reference/datasources/exoscale.rst b/doc/rtd/reference/datasources/exoscale.rst
new file mode 100644
index 00000000..f6824b75
--- /dev/null
+++ b/doc/rtd/reference/datasources/exoscale.rst
@@ -0,0 +1,83 @@
+.. _datasource_exoscale:
+
+Exoscale
+********
+
+This datasource supports reading from the metadata server used on the
+`Exoscale platform`_. Use of the Exoscale datasource is recommended to benefit
+from new features of the Exoscale platform.
+
+The datasource relies on the availability of a compatible metadata server
+(``http://169.254.169.254`` is used by default) and its companion password
+server, reachable at the same address (by default on port 8080).
+
+Crawling of metadata
+====================
+
+The metadata service and password server are crawled slightly differently:
+
+* The "metadata service" is crawled every boot.
+* The password server is also crawled every boot (the Exoscale datasource
+ forces the password module to run with "frequency always").
+
+In the password server case, the following rules apply in order to enable the
+"restore instance password" functionality:
+
+* If a password is returned by the password server, it is then marked "saved"
+ by the ``cloud-init`` datasource. Subsequent boots will skip setting the
+ password (the password server will return ``saved_password``).
+* When the instance password is reset (via the Exoscale UI), the password
+ server will return the non-empty password at next boot, therefore causing
+ ``cloud-init`` to reset the instance's password.
+
+Configuration
+=============
+
+Users of this datasource are discouraged from changing the default settings
+unless instructed to by Exoscale support.
+
+The following settings are available and can be set for the
+:ref:`datasource base configuration<base_config-Datasource>`
+(in :file:`/etc/cloud/cloud.cfg.d/`).
+
+The settings available are:
+
+* ``metadata_url``: The URL for the metadata service.
+
+ Defaults to ``http://169.254.169.254``.
+
+* ``api_version``: The API version path on which to query the instance
+ metadata.
+
+ Defaults to ``1.0``.
+
+* ``password_server_port``: The port (on the metadata server) on which the
+ password server listens.
+
+ Defaults to ``8080``.
+
+* ``timeout``: The timeout value provided to ``urlopen`` for each individual
+ http request.
+
+ Defaults to ``10``.
+
+* ``retries``: The number of retries that should be done for a http request.
+
+ Defaults to ``6``.
+
+Example
+-------
+
+An example configuration with the default values is provided below:
+
+.. code-block:: yaml
+
+ datasource:
+ Exoscale:
+ metadata_url: "http://169.254.169.254"
+ api_version: "1.0"
+ password_server_port: 8080
+ timeout: 10
+ retries: 6
+
+.. _Exoscale platform: https://exoscale.com
diff --git a/doc/rtd/reference/datasources/fallback.rst b/doc/rtd/reference/datasources/fallback.rst
new file mode 100644
index 00000000..98283c0b
--- /dev/null
+++ b/doc/rtd/reference/datasources/fallback.rst
@@ -0,0 +1,19 @@
+.. _datasource_fallback:
+
+Fallback/no datasource
+**********************
+
+This is the fallback datasource when no other datasource can be selected. It
+is the equivalent of an empty datasource, in that it provides an empty string
+as user data, and an empty dictionary as metadata.
+
+It is useful for testing, as well as for occasions when you do not need an
+actual datasource to meet your instance requirements (i.e. you just want to
+run modules that are not concerned with any external data).
+
+It is typically put at the end of the datasource search list so that if all
+other datasources are not matched, then this one will be so that the user is
+not left with an inaccessible instance.
+
+.. note::
+ The instance id that this datasource provides is ``iid-datasource-none``.
diff --git a/doc/rtd/reference/datasources/gce.rst b/doc/rtd/reference/datasources/gce.rst
new file mode 100644
index 00000000..5f0dc77b
--- /dev/null
+++ b/doc/rtd/reference/datasources/gce.rst
@@ -0,0 +1,52 @@
+.. _datasource_gce:
+
+Google Compute Engine
+*********************
+
+The GCE datasource gets its data from the internal compute metadata server.
+Metadata can be queried at the URL
+:file:`http://metadata.google.internal/computeMetadata/v1/`
+from within an instance. For more information see the `GCE metadata docs`_.
+
+Currently, the default project and instance level metadata keys
+``project/attributes/sshKeys`` and ``instance/attributes/ssh-keys`` are merged
+to provide ``public-keys``.
+
+``user-data`` and ``user-data-encoding`` can be provided to ``cloud-init`` by
+setting those custom metadata keys for an *instance*.
+
+Configuration
+=============
+
+The following configuration can be set for the datasource in system
+configuration (in :file:`/etc/cloud/cloud.cfg` or
+:file:`/etc/cloud/cloud.cfg.d/`).
+
+The settings that may be configured are:
+
+* ``retries``
+
+ The number of retries that should be attempted for a http request.
+ This value is used only after ``metadata_url`` is selected.
+
+ Default: 5
+
+* ``sec_between_retries``
+
+ The amount of wait time between retries when crawling the metadata service.
+
+ Default: 1
+
+Example
+-------
+
+An example configuration with the default values is provided below:
+
+.. code-block:: yaml
+
+ datasource:
+ GCE:
+ retries: 5
+ sec_between_retries: 1
+
+.. _GCE metadata docs: https://cloud.google.com/compute/docs/storing-retrieving-metadata
diff --git a/doc/rtd/reference/datasources/lxd.rst b/doc/rtd/reference/datasources/lxd.rst
new file mode 100644
index 00000000..632f2669
--- /dev/null
+++ b/doc/rtd/reference/datasources/lxd.rst
@@ -0,0 +1,114 @@
+.. _datasource_lxd:
+
+LXD
+***
+
+The LXD datasource allows the user to provide custom user data,
+vendor data, metadata and network-config to the instance without running
+a network service (or even without having a network at all). This datasource
+performs HTTP GETs against the `LXD socket device`_ which is provided to each
+running LXD container and VM as ``/dev/lxd/sock`` and represents all
+instance-metadata as versioned HTTP routes such as:
+
+ - 1.0/meta-data
+ - 1.0/config/user.meta-data
+ - 1.0/config/user.vendor-data
+ - 1.0/config/user.user-data
+ - 1.0/config/user.<any-custom-key>
+
+The LXD socket device ``/dev/lxd/sock`` is only present on containers and VMs
+when the instance configuration has ``security.devlxd=true`` (default).
+Disabling the ``security.devlxd`` configuration setting at initial launch will
+ensure that ``cloud-init`` uses the :ref:`datasource_nocloud` datasource.
+Disabling ``security.devlxd`` over the life of the container will result in
+warnings from ``cloud-init``, and ``cloud-init`` will keep the
+originally-detected LXD datasource.
+
+The LXD datasource is detected as viable by ``ds-identify`` during ``systemd``
+generator time when either ``/dev/lxd/sock`` exists, or
+``/sys/class/dmi/id/board_name`` matches "LXD".
+
+The LXD datasource provides ``cloud-init`` with the ability to react to
+metadata, vendor data, user data and network-config changes, and to render the
+updated configuration across a system reboot.
+
+To modify which metadata, vendor data or user data are provided to the
+launched container, use either LXD profiles or
+``lxc launch ... -c <key>="<value>"`` at initial container launch, by setting
+one of the following keys:
+
+- ``user.meta-data``: YAML metadata which will be appended to base metadata.
+- ``user.vendor-data``: YAML which overrides any metadata values.
+- ``user.network-config``: YAML representing either :ref:`network_config_v1` or
+ :ref:`network_config_v2` format.
+- ``user.user-data``: YAML which takes precedence and overrides both metadata
+ and vendor data values.
+- ``user.any-key``: Custom user configuration key and value pairs, which can be
+ passed to ``cloud-init``. Those keys/values will be present in instance-data
+ which can be used by both `#template: jinja` #cloud-config templates and
+ the :command:`cloud-init query` command.
+
+.. note::
+ LXD version 4.22 introduced a new scope of config keys prefaced by
+ ``cloud-init.``, which are preferred above the related ``user.*`` keys:
+
+ - ``cloud-init.meta-data``
+ - ``cloud-init.vendor-data``
+ - ``cloud-init.network-config``
+ - ``cloud-init.user-data``
+
+Configuration
+=============
+
+By default, network configuration from this datasource will be:
+
+.. code-block:: yaml
+
+ version: 1
+ config:
+ - type: physical
+ name: eth0
+ subnets:
+ - type: dhcp
+ control: auto
+
+This datasource is intended to replace :ref:`datasource_nocloud`
+datasource for LXD instances with a more direct support for LXD APIs instead
+of static NoCloud seed files.
+
+Hotplug
+=======
+
+Network hotplug functionality is supported for the LXD datasource as described
+in the :ref:`events` documentation. As hotplug functionality relies on the
+cloud-provided network metadata, the LXD datasource will only meaningfully
+react to a hotplug event if it has the configuration necessary to respond to
+the change. Practically, this means that even with hotplug enabled, **the
+default behavior for adding a new virtual NIC will result in no change**.
+
+To update the configuration to be used by hotplug, first pass the network
+configuration via the ``cloud-init.network-config`` (or
+``user.network-config`` on older versions).
+
+Example
+-------
+
+Given an LXD instance named ``my-lxd`` with hotplug enabled and
+an LXD bridge named ``my-bridge``, the following will allow for additional
+DHCP configuration of ``eth1``:
+
+.. code-block:: shell-session
+
+ $ cat /tmp/cloud-network-config.yaml
+ version: 2
+ ethernets:
+ eth0:
+ dhcp4: true
+ eth1:
+ dhcp4: true
+
+ $ lxc config set my-lxd cloud-init.network-config="$(cat /tmp/cloud-network-config.yaml)"
+ $ lxc config device add my-lxd eth1 nic name=eth1 nictype=bridged parent=my-bridge
+ Device eth1 added to my-lxd
+
+.. _LXD socket device: https://linuxcontainers.org/lxd/docs/master/dev-lxd
diff --git a/doc/rtd/topics/datasources/maas.rst b/doc/rtd/reference/datasources/maas.rst
index eb59dab9..eb59dab9 100644
--- a/doc/rtd/topics/datasources/maas.rst
+++ b/doc/rtd/reference/datasources/maas.rst
diff --git a/doc/rtd/reference/datasources/nocloud.rst b/doc/rtd/reference/datasources/nocloud.rst
new file mode 100644
index 00000000..682c8477
--- /dev/null
+++ b/doc/rtd/reference/datasources/nocloud.rst
@@ -0,0 +1,221 @@
+.. _datasource_nocloud:
+
+NoCloud
+*******
+
+The data source ``NoCloud`` allows the user to provide user data and metadata
+to the instance without running a network service (or even without having a
+network at all).
+
+You can provide metadata and user data to a local VM boot via files on a
+`vfat`_ or `iso9660`_ filesystem. The filesystem volume label must be
+``cidata`` or ``CIDATA``.
+
+Alternatively, you can provide metadata via the kernel command line or SMBIOS
+"serial number" option. The data must be passed in the form of a string: ::
+
+ ds=nocloud[;key=val;key=val]
+
+or, ::
+
+ ds=nocloud-net[;key=val;key=val]
+
+Permitted keys
+==============
+
+The permitted keys are:
+
+* ``h`` or ``local-hostname``
+* ``i`` or ``instance-id``
+* ``s`` or ``seedfrom``
+
+With ``ds=nocloud``, the ``seedfrom`` value must start with ``/`` or
+``file://``. With ``ds=nocloud-net``, the ``seedfrom`` value must start
+with ``http://`` or ``https://`` and end with a trailing ``/``.
+
+Cloud-init performs variable expansion of the ``seedfrom`` URL for any DMI
+kernel variables present in :file:`/sys/class/dmi/id` (kenv on FreeBSD).
+Your ``seedfrom`` URL can contain variable names of the format
+``__dmi.varname__`` to indicate to the ``cloud-init`` NoCloud datasource that
+``dmi.varname`` should be expanded to the value of the DMI system attribute
+wanted.
+
+.. list-table:: Available DMI variables for expansion in ``seedfrom`` URL
+ :widths: 35 35 30
+ :header-rows: 0
+
+ * - ``dmi.baseboard-asset-tag``
+ - ``dmi.baseboard-manufacturer``
+ - ``dmi.baseboard-version``
+ * - ``dmi.bios-release-date``
+ - ``dmi.bios-vendor``
+ - ``dmi.bios-version``
+ * - ``dmi.chassis-asset-tag``
+ - ``dmi.chassis-manufacturer``
+ - ``dmi.chassis-serial-number``
+ * - ``dmi.chassis-version``
+ - ``dmi.system-manufacturer``
+ - ``dmi.system-product-name``
+ * - ``dmi.system-serial-number``
+ - ``dmi.system-uuid``
+ - ``dmi.system-version``
+
+For example, you can pass this option to QEMU: ::
+
+ -smbios type=1,serial=ds=nocloud-net;s=http://10.10.0.1:8000/__dmi.chassis-serial-number__/
+
+This will cause NoCloud to fetch the full metadata from a URL based on
+YOUR_SERIAL_NUMBER as seen in :file:`/sys/class/dmi/id/chassis_serial_number`
+(kenv on FreeBSD) from http://10.10.0.1:8000/YOUR_SERIAL_NUMBER/meta-data after
+the network initialisation is complete.
+
+File formats
+============
+
+These user data and metadata files are required as separate files at the
+same base URL: ::
+
+ /user-data
+ /meta-data
+
+Both files must be present for it to be considered a valid seed ISO.
+
+Basically, ``user-data`` is simply :ref:`user data<user_data_formats>` and
+``meta-data`` is a YAML-formatted file representing what you'd find in the EC2
+metadata service.
+
+You may also optionally provide a vendor data file adhering to
+:ref:`user data formats<user_data_formats>` at the same base URL: ::
+
+ /vendor-data
+
+Creating a disk
+===============
+
+Given a disk Ubuntu cloud image in :file:`disk.img`, you can create a
+sufficient disk by following the following example.
+
+1. Create the :file:`user-data` and :file:`meta-data` files that will be used
+ to modify the image on first boot.
+
+.. code-block:: sh
+
+ $ echo -e "instance-id: iid-local01\nlocal-hostname: cloudimg" > meta-data
+ $ echo -e "#cloud-config\npassword: passw0rd\nchpasswd: { expire: False }\nssh_pwauth: True\n" > user-data
+
+2. At this stage you have three options:
+
+ a. Create a disk to attach with some user data and metadata:
+
+ .. code-block:: sh
+
+ $ genisoimage -output seed.iso -volid cidata -joliet -rock user-data meta-data
+
+ b. Alternatively, create a ``vfat`` filesystem with the same files:
+
+ .. code-block:: sh
+
+ $ truncate --size 2M seed.iso
+ $ mkfs.vfat -n cidata seed.iso
+
+ * 2b) Option 1: mount and copy files:
+
+ .. code-block:: sh
+
+ $ sudo mount -t vfat seed.iso /mnt
+ $ sudo cp user-data meta-data /mnt
+ $ sudo umount /mnt
+
+ * 2b) Option 2: the ``mtools`` package provides ``mcopy``, which can
+ access ``vfat`` filesystems without mounting them:
+
+ .. code-block::
+
+ $ mcopy -oi seed.iso user-data meta-data
+
+3. Create a new qcow image to boot, backed by your original image:
+
+.. code-block:: sh
+
+ $ qemu-img create -f qcow2 -b disk.img -F qcow2 boot-disk.img
+
+4. Boot the image and log in as "Ubuntu" with password "passw0rd":
+
+.. code-block:: sh
+
+ $ kvm -m 256 \
+ -net nic -net user,hostfwd=tcp::2222-:22 \
+ -drive file=boot-disk.img,if=virtio \
+ -drive driver=raw,file=seed.iso,if=virtio
+
+.. note::
+ Note that "passw0rd" was set as password through the user data above. There
+ is no password set on these images.
+
+.. note::
+ The ``instance-id`` provided (``iid-local01`` above) is what is used to
+ determine if this is "first boot". So, if you are making updates to
+ user data you will also have to change the ``instance-id``, or start the
+ disk fresh.
+
+Also, you can inject an :file:`/etc/network/interfaces` file by providing the
+content for that file in the ``network-interfaces`` field of
+:file:`meta-data`.
+
+Example ``meta-data``
+---------------------
+
+::
+
+ instance-id: iid-abcdefg
+ network-interfaces: |
+ iface eth0 inet static
+ address 192.168.1.10
+ network 192.168.1.0
+ netmask 255.255.255.0
+ broadcast 192.168.1.255
+ gateway 192.168.1.254
+ hostname: myhost
+
+
+Network configuration can also be provided to ``cloud-init`` in either
+:ref:`network_config_v1` or :ref:`network_config_v2` by providing that
+YAML formatted data in a file named :file:`network-config`. If found,
+this file will override a :file:`network-interfaces` file.
+
+See an example below. Note specifically that this file does not
+have a top level ``network`` key as it is already assumed to
+be network configuration based on the filename.
+
+Example config
+--------------
+
+.. code-block:: yaml
+
+ version: 1
+ config:
+ - type: physical
+ name: interface0
+ mac_address: "52:54:00:12:34:00"
+ subnets:
+ - type: static
+ address: 192.168.1.10
+ netmask: 255.255.255.0
+ gateway: 192.168.1.254
+
+
+.. code-block:: yaml
+
+ version: 2
+ ethernets:
+ interface0:
+ match:
+ macaddress: "52:54:00:12:34:00"
+ set-name: interface0
+ addresses:
+ - 192.168.1.10/255.255.255.0
+ gateway4: 192.168.1.254
+
+
+.. _iso9660: https://en.wikipedia.org/wiki/ISO_9660
+.. _vfat: https://en.wikipedia.org/wiki/File_Allocation_Table
diff --git a/doc/rtd/reference/datasources/nwcs.rst b/doc/rtd/reference/datasources/nwcs.rst
new file mode 100644
index 00000000..19c9ddd6
--- /dev/null
+++ b/doc/rtd/reference/datasources/nwcs.rst
@@ -0,0 +1,28 @@
+.. _datasource_nwcs:
+
+NWCS
+****
+
+The NWCS datasource retrieves basic configuration values from the locally
+accessible metadata service. All data is served over HTTP from the address
+``169.254.169.254``.
+
+Configuration
+=============
+
+The NWCS datasource can be configured as follows: ::
+
+ datasource:
+ NWCS:
+ url: 'http://169.254.169.254'
+ retries: 3
+ timeout: 2
+ wait: 2
+
+* ``url``: The URL used to acquire the metadata configuration.
+* ``retries``: Determines the number of times to attempt to connect to the
+ metadata service.
+* ``timeout``: Determines the timeout (in seconds) to wait for a response from
+ the metadata service
+* ``wait``: Determines the timeout in seconds to wait before retrying after
+ accessible failure.
diff --git a/doc/rtd/topics/datasources/opennebula.rst b/doc/rtd/reference/datasources/opennebula.rst
index 65570a53..2ad9d5c3 100644
--- a/doc/rtd/topics/datasources/opennebula.rst
+++ b/doc/rtd/reference/datasources/opennebula.rst
@@ -1,66 +1,61 @@
.. _datasource_opennebula:
OpenNebula
-==========
+**********
-The `OpenNebula`_ (ON) datasource supports the contextualization disk.
+The `OpenNebula`_ (ON) datasource supports the contextualisation disk.
- See `contextualization overview`_, `contextualizing VMs`_ and
- `network configuration`_ in the public documentation for
- more information.
+.. THESE LINKS ARE BROKEN
+.. See `contextualization overview`_, `contextualizing VMs`_ and
+.. `network configuration`_ in the public documentation for
+.. more information.
-OpenNebula's virtual machines are contextualized (parametrized) by
-CD-ROM image, which contains a shell script *context.sh* with
+OpenNebula's virtual machines are contextualised (parametrised) by
+CD-ROM image, which contains a shell script :file:`context.sh`, with
custom variables defined on virtual machine start. There are no
-fixed contextualization variables, but the datasource accepts
+fixed contextualisation variables, but the datasource accepts
many used and recommended across the documentation.
Datasource configuration
-~~~~~~~~~~~~~~~~~~~~~~~~~
+========================
-Datasource accepts following configuration options.
-
-::
+Datasource accepts the following configuration options: ::
dsmode:
values: local, net, disabled
default: net
-Tells if this datasource will be processed in 'local' (pre-networking) or
-'net' (post-networking) stage or even completely 'disabled'.
+These specify whether the datasource will be processed in ``local``
+(pre-networking) stage, ``net`` (post-networking) stage or be ``disabled``.
::
parseuser:
default: nobody
-Unprivileged system user used for contextualization script
-processing.
+Unprivileged system user used for contextualisation script processing.
-Contextualization disk
-~~~~~~~~~~~~~~~~~~~~~~
+Contextualisation disk
+======================
The following criteria are required:
1. Must be formatted with `iso9660`_ filesystem
- or have a *filesystem* label of **CONTEXT** or **CDROM**
-2. Must contain file *context.sh* with contextualization variables.
- File is generated by OpenNebula, it has a KEY='VALUE' format and
- can be easily read by bash
+ or have a *filesystem* label of ``CONTEXT`` or ``CDROM``.
+2. Must contain the file :file:`context.sh` with contextualisation variables.
+ The file is generated by OpenNebula and has a ``KEY='VALUE'`` format that
+ can be easily read by bash.
-Contextualization variables
-~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Contextualisation variables
+===========================
-There are no fixed contextualization variables in OpenNebula, no standard.
-Following variables were found on various places and revisions of
+There are no fixed or standard contextualisation variables in OpenNebula.
+The following variables were found in various places and in revisions of
the OpenNebula documentation. Where multiple similar variables are
-specified, only first found is taken.
-
-::
-
- DSMODE
+specified, only the one found first is taken.
-Datasource mode configuration override. Values: local, net, disabled.
+* ``DSMODE``: Datasource mode configuration override. Values are: ``local``,
+ ``net``, ``disabled``.
::
@@ -94,9 +89,10 @@ Instance hostname.
IP_PUBLIC
ETH0_IP
-If no hostname has been specified, cloud-init will try to create hostname
-from instance's IP address in 'local' dsmode. In 'net' dsmode, cloud-init
-tries to resolve one of its IP addresses to get hostname.
+If no hostname has been specified, ``cloud-init`` will try to create a
+hostname from the instance's IP address in ``local`` dsmode. In ``net``
+dsmode, ``cloud-init`` tries to resolve one of its IP addresses to get
+the hostname.
::
@@ -110,13 +106,13 @@ One or multiple SSH keys (separated by newlines) can be specified.
USER_DATA
USERDATA
-cloud-init user data.
+``Cloud-init`` user data.
Example configuration
-~~~~~~~~~~~~~~~~~~~~~
+=====================
-This example cloud-init configuration (*cloud.cfg*) enables
-OpenNebula datasource only in 'net' mode.
+This example ``cloud-init`` configuration (:file:`cloud.cfg`) enables
+OpenNebula datasource only in ``net`` mode.
::
@@ -128,7 +124,7 @@ OpenNebula datasource only in 'net' mode.
parseuser: nobody
Example VM's context section
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+============================
.. code-block:: sh
@@ -153,4 +149,3 @@ Example VM's context section
.. _contextualizing VMs: http://opennebula.org/documentation:documentation:cong
.. _network configuration: http://opennebula.org/documentation:documentation:cong#network_configuration
.. _iso9660: https://en.wikipedia.org/wiki/ISO_9660
-.. vi: textwidth=79
diff --git a/doc/rtd/reference/datasources/openstack.rst b/doc/rtd/reference/datasources/openstack.rst
new file mode 100644
index 00000000..f8fd2eb4
--- /dev/null
+++ b/doc/rtd/reference/datasources/openstack.rst
@@ -0,0 +1,128 @@
+.. _datasource_openstack:
+
+OpenStack
+*********
+
+This datasource supports reading data from the `OpenStack Metadata Service`_.
+
+Discovery
+=========
+
+To determine whether a platform looks like it may be OpenStack, ``cloud-init``
+checks the following environment attributes as a potential OpenStack platform:
+
+* May be OpenStack **if**:
+
+ * ``non-x86 cpu architecture``: because DMI data is buggy on some arches.
+
+* Is OpenStack **if** x86 architecture and **ANY** of the following:
+
+ * ``/proc/1/environ``: ``Nova-lxd`` contains
+ ``product_name=OpenStack Nova``.
+ * ``DMI product_name``: Either ``Openstack Nova`` or ``OpenStack Compute``.
+ * ``DMI chassis_asset_tag`` is ``HUAWEICLOUD``, ``OpenTelekomCloud``,
+ ``SAP CCloud VM``, ``OpenStack Nova`` (since 19.2) or
+ ``OpenStack Compute`` (since 19.2).
+
+Configuration
+=============
+
+The following configuration can be set for the datasource in system
+configuration (in :file:`/etc/cloud/cloud.cfg` or
+:file:`/etc/cloud/cloud.cfg.d/`).
+
+The settings that may be configured are as follows:
+
+``metadata_urls``
+-----------------
+
+This list of URLs will be searched for an OpenStack metadata service. The
+first entry that successfully returns a 200 response for ``<url>/openstack``
+will be selected.
+
+Default: ['http://169.254.169.254'])
+
+``max_wait``
+------------
+
+The maximum amount of clock time (in seconds) that should be spent searching
+``metadata_urls``. A value less than zero will result in only one request
+being made, to the first in the list.
+
+Default: -1
+
+``timeout``
+-----------
+
+The timeout value provided to ``urlopen`` for each individual http request.
+This is used both when selecting a ``metadata_url`` and when crawling the
+metadata service.
+
+Default: 10
+
+``retries``
+-----------
+
+The number of retries that should be attempted for an http request. This
+value is used only after ``metadata_url`` is selected.
+
+Default: 5
+
+``apply_network_config``
+------------------------
+
+A boolean specifying whether to configure the network for the instance based
+on :file:`network_data.json` provided by the metadata service. When False,
+only configure DHCP on the primary NIC for this instance.
+
+Default: True
+
+Example configuration
+=====================
+
+An example configuration with the default values is provided below:
+
+.. code-block:: yaml
+
+ datasource:
+ OpenStack:
+ metadata_urls: ["http://169.254.169.254"]
+ max_wait: -1
+ timeout: 10
+ retries: 5
+ apply_network_config: True
+
+
+Vendor Data
+===========
+
+The OpenStack metadata server can be configured to serve up vendor data,
+which is available to all instances for consumption. OpenStack vendor data is
+generally a JSON object.
+
+``Cloud-init`` will look for configuration in the ``cloud-init`` attribute
+of the vendor data JSON object. ``Cloud-init`` processes this configuration
+using the same handlers as user data, so any formats that work for user
+data should work for vendor data.
+
+For example, configuring the following as vendor data in OpenStack would
+upgrade packages and install ``htop`` on all instances:
+
+.. code-block:: json
+
+ {"cloud-init": "#cloud-config\npackage_upgrade: True\npackages:\n - htop"}
+
+For more general information about how ``cloud-init`` handles vendor data,
+including how it can be disabled by users on instances, see our
+:ref:`explanation topic<vendordata>`.
+
+OpenStack can also be configured to provide "dynamic vendordata"
+which is provided by the DynamicJSON provider and appears under a
+different metadata path, :file:`/vendor_data2.json`.
+
+``Cloud-init`` will look for a ``cloud-init`` at the :file:`vendor_data2`
+path; if found, settings are applied after (and, hence, overriding) the
+settings from static vendor data. Both sets of vendor data can be overridden
+by user data.
+
+.. _OpenStack Metadata Service: https://docs.openstack.org/nova/latest/admin/metadata-service.html
diff --git a/doc/rtd/reference/datasources/oracle.rst b/doc/rtd/reference/datasources/oracle.rst
new file mode 100644
index 00000000..74bfb3e3
--- /dev/null
+++ b/doc/rtd/reference/datasources/oracle.rst
@@ -0,0 +1,53 @@
+.. _datasource_oracle:
+
+Oracle
+******
+
+This datasource reads metadata, vendor data and user data from
+`Oracle Compute Infrastructure`_ (OCI).
+
+Oracle platform
+===============
+
+OCI provides bare metal and virtual machines. In both cases, the platform
+identifies itself via DMI data in the chassis asset tag with the string
+``'OracleCloud.com'``.
+
+Oracle's platform provides a metadata service that mimics the ``2013-10-17``
+version of OpenStack metadata service. Initially, support for Oracle was done
+via the OpenStack datasource.
+
+``Cloud-init`` has a specific datasource for Oracle in order to:
+
+a. Allow and support the future growth of the OCI platform.
+b. Address small differences between OpenStack and Oracle metadata
+ implementation.
+
+Configuration
+=============
+
+The following configuration can be set for the datasource in system
+configuration (in :file:`/etc/cloud/cloud.cfg` or
+:file:`/etc/cloud/cloud.cfg.d/`).
+
+``configure_secondary_nics``
+----------------------------
+
+A boolean, defaulting to False. If set to True on an OCI Virtual Machine,
+``cloud-init`` will fetch networking metadata from Oracle's IMDS and use it
+to configure the non-primary network interface controllers in the system. If
+set to True on an OCI Bare Metal Machine, it will have no effect (though this
+may change in the future).
+
+Example configuration
+---------------------
+
+An example configuration with the default values is provided below:
+
+.. code-block:: yaml
+
+ datasource:
+ Oracle:
+ configure_secondary_nics: false
+
+.. _Oracle Compute Infrastructure: https://cloud.oracle.com/
diff --git a/doc/rtd/reference/datasources/ovf.rst b/doc/rtd/reference/datasources/ovf.rst
new file mode 100644
index 00000000..5a7af4fc
--- /dev/null
+++ b/doc/rtd/reference/datasources/ovf.rst
@@ -0,0 +1,12 @@
+.. _datasource_ovf:
+
+OVF
+***
+
+The OVF datasource provides a datasource for reading data from an
+`Open Virtualization Format`_ ISO transport.
+
+For further information see a full working example in ``cloud-init``'s
+source code tree in :file:`doc/sources/ovf`.
+
+.. _Open Virtualization Format: https://en.wikipedia.org/wiki/Open_Virtualization_Format
diff --git a/doc/rtd/reference/datasources/rbxcloud.rst b/doc/rtd/reference/datasources/rbxcloud.rst
new file mode 100644
index 00000000..44b30b3e
--- /dev/null
+++ b/doc/rtd/reference/datasources/rbxcloud.rst
@@ -0,0 +1,23 @@
+.. _datasource_rbx:
+
+Rbx Cloud
+*********
+
+The Rbx datasource consumes the metadata drive available on the `HyperOne`_
+and `Rootbox`_ platforms.
+
+This datasource supports network configurations, hostname, user accounts and
+user metadata.
+
+Metadata drive
+==============
+
+Drive metadata is a `FAT`_-formatted partition with the ``CLOUDMD`` or
+``cloudmd`` label on the system disk. Its contents are refreshed each time
+the virtual machine is restarted, if the partition exists. For more information
+see `HyperOne Virtual Machine docs`_.
+
+.. _HyperOne: http://www.hyperone.com/
+.. _Rootbox: https://rootbox.com/
+.. _HyperOne Virtual Machine docs: http://www.hyperone.com/
+.. _FAT: https://en.wikipedia.org/wiki/File_Allocation_Table
diff --git a/doc/rtd/reference/datasources/smartos.rst b/doc/rtd/reference/datasources/smartos.rst
new file mode 100644
index 00000000..fba87931
--- /dev/null
+++ b/doc/rtd/reference/datasources/smartos.rst
@@ -0,0 +1,181 @@
+.. _datasource_smartos:
+
+SmartOS Datasource
+******************
+
+This datasource finds metadata and user data from the SmartOS virtualisation
+platform (i.e., Joyent).
+
+Please see http://smartos.org/ for information about SmartOS.
+
+SmartOS platform
+================
+
+The SmartOS virtualisation platform uses metadata from the instance via the
+second serial console. On Linux, this is :file:`/dev/ttyS1`. The data is
+provided via a simple protocol:
+
+* Something queries for the data,
+* the console responds with the status, and
+* if "SUCCESS" returns until a single ".\n".
+
+New versions of the SmartOS tooling will include support for Base64-encoded
+data.
+
+Metadata channels
+=================
+
+``Cloud-init`` supports three modes of delivering user data and metadata via
+the flexible channels of SmartOS.
+
+1. User data is written to :file:`/var/db/user-data`:
+
+ - As per the spec, user data is for consumption by the end user, not
+ provisioning tools.
+ - ``Cloud-init`` ignores this channel, other than writing it to disk.
+ - Removal of the ``meta-data`` key means that :file:`/var/db/user-data`
+ gets removed.
+ - A backup of previous metadata is maintained as
+ :file:`/var/db/user-data.<timestamp>`. ``<timestamp>`` is the epoch time
+ when ``cloud-init`` ran.
+
+2. ``user-script`` is written to
+ :file:`/var/lib/cloud/scripts/per-boot/99_user_data`:
+
+ - This is executed each boot.
+ - A link is created to :file:`/var/db/user-script`.
+ - Previous versions of ``user-script`` is written to
+ :file:`/var/lib/cloud/scripts/per-boot.backup/99_user_script.<timestamp>.`
+ - <timestamp> is the epoch time when ``cloud-init`` ran.
+ - When the ``user-script`` metadata key goes missing, ``user-script`` is
+ removed from the file system, although a backup is maintained.
+ - If the script does not start with a shebang (i.e., it starts with
+ #!<executable>), or it is not an executable, ``cloud-init`` will add a
+ shebang of "#!/bin/bash".
+
+3. ``Cloud-init`` user data is treated like on other Clouds.
+
+ - This channel is used for delivering ``_all_ cloud-init`` instructions.
+ - Scripts delivered over this channel must be well formed (i.e., they must
+ have a shebang).
+
+``Cloud-init`` supports reading the traditional metadata fields supported by
+the SmartOS tools. These are:
+
+* ``root_authorized_keys``
+* ``hostname``
+* ``enable_motd_sys_info``
+* ``iptables_disable``
+
+.. note::
+ At this time, ``iptables_disable`` and ``enable_motd_sys_info`` are read
+ but are not actioned.
+
+Disabling ``user-script``
+=========================
+
+``Cloud-init`` uses the per-boot script functionality to handle the execution
+of the ``user-script``. If you want to prevent this, use a cloud-config of:
+
+.. code-block:: yaml
+
+ #cloud-config
+ cloud_final_modules:
+ - scripts-per-once
+ - scripts-per-instance
+ - scripts-user
+ - ssh-authkey-fingerprints
+ - keys-to-console
+ - phone-home
+ - final-message
+ - power-state-change
+
+Alternatively you can use the JSON patch method:
+
+.. code-block:: yaml
+
+ #cloud-config-jsonp
+ [
+ { "op": "replace",
+ "path": "/cloud_final_modules",
+ "value": ["scripts-per-once",
+ "scripts-per-instance",
+ "scripts-user",
+ "ssh-authkey-fingerprints",
+ "keys-to-console",
+ "phone-home",
+ "final-message",
+ "power-state-change"]
+ }
+ ]
+
+The default cloud-config includes "script-per-boot". ``Cloud-init`` will still
+ingest and write the user data, but will not execute it when you disable
+the per-boot script handling.
+
+The cloud-config needs to be delivered over the ``cloud-init:user-data``
+channel in order for ``cloud-init`` to ingest it.
+
+.. note::
+ Unless you have an explicit use-case, it is recommended that you do not
+ disable the per-boot script execution, especially if you are using
+ any of the life-cycle management features of SmartOS.
+
+Base64
+======
+
+The following are exempt from Base64 encoding, owing to the fact that they
+are provided by SmartOS:
+
+* ``root_authorized_keys``
+* ``enable_motd_sys_info``
+* ``iptables_disable``
+* ``user-data``
+* ``user-script``
+
+This list can be changed through the
+:ref:`datasource base configuration<base_config-Datasource>` variable
+``no_base64_decode``.
+
+This means that ``user-script``, ``user-data`` and other values can be Base64
+encoded. Since ``cloud-init`` can only guess whether or not something
+is truly Base64 encoded, the following metadata keys are hints as to whether
+or not to Base64 decode something:
+
+* ``base64_all``: Except for excluded keys, attempt to Base64 decode the
+ values. If the value fails to decode properly, it will be returned in its
+ text.
+* ``base64_keys``: A comma-delimited list of which keys are Base64 encoded.
+* ``b64-<key>``: For any key, if an entry exists in the metadata for
+ ``'b64-<key>'``, then ``'b64-<key>'`` is expected to be a plain-text boolean
+ indicating whether or not its value is encoded.
+* ``no_base64_decode``: This is a configuration setting
+ (i.e., :file:`/etc/cloud/cloud.cfg.d`) that sets which values should not
+ be Base64 decoded.
+
+``disk_aliases`` and ephemeral disk
+===================================
+
+By default, SmartOS only supports a single ephemeral disk. That disk is
+completely empty (un-partitioned, with no filesystem).
+
+The SmartOS datasource has built-in cloud-config which instructs the
+``disk_setup`` module to partition and format the ephemeral disk.
+
+You can control the ``disk_setup`` in 2 ways:
+
+1. Through the datasource config, you can change the 'alias' of ``ephermeral0``
+ to reference another device. The default is:
+
+ .. code-block::
+
+ 'disk_aliases': {'ephemeral0': '/dev/vdb'}
+
+ This means that anywhere ``disk_setup`` sees a device named 'ephemeral0',
+ then :file:`/dev/vdb` will be substituted.
+
+2. You can provide ``disk_setup`` or ``fs_setup`` data in ``user-data`` to
+ overwrite the datasource's built-in values.
+
+See :file:`doc/examples/cloud-config-disk-setup.txt` for information on
+``disk_setup``.
diff --git a/doc/rtd/reference/datasources/upcloud.rst b/doc/rtd/reference/datasources/upcloud.rst
new file mode 100644
index 00000000..21b95922
--- /dev/null
+++ b/doc/rtd/reference/datasources/upcloud.rst
@@ -0,0 +1,22 @@
+.. _datasource_upcloud:
+
+UpCloud
+*******
+
+The `UpCloud`_ datasource consumes information from UpCloud's `metadata
+service`_. This metadata service serves information about the
+running server via HTTP over the address ``169.254.169.254`` available in
+every DHCP-configured interface. The metadata API endpoints are fully
+described in `UpCloud API documentation`_.
+
+Providing user data
+===================
+
+When creating a server, user data is provided by specifying it as
+``user_data`` in the API or via the server creation tool in the control panel.
+User data is immutable during the server's lifetime, and can be removed by
+deleting the server.
+
+.. _UpCloud: https://upcloud.com/
+.. _metadata service: https://upcloud.com/community/tutorials/upcloud-metadata-service/
+.. _UpCloud API documentation: https://developers.upcloud.com/1.3/8-servers/#metadata-service
diff --git a/doc/rtd/reference/datasources/vmware.rst b/doc/rtd/reference/datasources/vmware.rst
new file mode 100644
index 00000000..5e060769
--- /dev/null
+++ b/doc/rtd/reference/datasources/vmware.rst
@@ -0,0 +1,451 @@
+.. _datasource_vmware:
+
+VMware
+******
+
+This datasource is for use with systems running on a VMware platform such as
+vSphere and currently supports the following data transports:
+
+* `Guest OS Customization`_
+* `GuestInfo keys`_
+
+The configuration method is dependent upon the transport.
+
+Guest OS customisation
+======================
+
+The following configuration can be set for this datasource in ``cloud-init``
+configuration (in :file:`/etc/cloud/cloud.cfg` or
+:file:`/etc/cloud/cloud.cfg.d/`).
+
+System configuration
+--------------------
+
+* ``disable_vmware_customization``: true (disable) or false (enable) the VMware
+ traditional Linux guest customisation. Traditional Linux guest customisation
+ is customising a Linux virtual machine with a
+ `traditional Linux customisation specification`_. Setting this configuration
+ to false is required to make sure this datasource is found in ``ds-identify``
+ when using Guest OS customisation transport.
+
+ Default: true
+
+Datasource configuration
+------------------------
+
+* ``allow_raw_data``: true (enable) or false (disable) the VMware customisation
+ using ``cloud-init`` metadata and user data directly. Since vSphere 7.0
+ Update 3 version, users can create a Linux customisation specification with
+ minimal ``cloud-init`` metadata and user data, and apply this specification
+ to a virtual machine. This datasource will parse the metadata and user data
+ and configure the virtual machine with them. See
+ `Guest customisation using cloud-init`_ for more information.
+
+ Default: true
+
+* ``vmware_cust_file_max_wait``: The maximum amount of clock time (in seconds)
+ that should be spent waiting for VMware customisation files.
+
+ Default: 15
+
+Configuration examples
+----------------------
+
+1. Create the :file:`/etc/cloud/cloud.cfg.d/99-vmware-guest-customization.cfg`
+ file with the following content, which will enable VMware customisation and
+ set the maximum waiting time for the VMware customisation file to 10
+ seconds:
+
+ .. code-block:: yaml
+
+ disable_vmware_customization: false
+ datasource:
+ VMware:
+ vmware_cust_file_max_wait: 10
+
+2. Create the :file:`/etc/cloud/cloud.cfg.d/99-vmware-guest-customization.cfg`
+ file with the following content, which will enable VMware customisation but
+ will only try to apply a traditional Linux Guest Customisation
+ configuration, and set the maximum waiting time for the VMware
+ customisation file to 10 seconds:
+
+ .. code-block:: yaml
+
+ disable_vmware_customization: false
+ datasource:
+ VMware:
+ allow_raw_data: false
+ vmware_cust_file_max_wait: 10
+
+VMware Tools configuration
+--------------------------
+
+`VMware Tools`_ is required for this datasource's configuration settings, as
+well as vCloud and vSphere admin configuration. Users can change the VMware
+Tools configuration options with the following command:
+
+.. code-block:: shell
+
+ vmware-toolbox-cmd config set <section> <key> <value>
+
+The following VMware Tools configuration option affects this datasource's
+behaviour when applying customisation configuration with custom scripts:
+
+* ``[deploypkg] enable-custom-scripts``: If this option is absent in VMware
+ Tools configuration, the custom script is disabled by default for security
+ reasons. Some VMware products could change this default behaviour (for
+ example: enabled by default) via customisation of the specification settings.
+
+ VMware admins can refer to `customization configuration`_ and set the
+ customisation specification settings.
+
+For more information, see `VMware vSphere Product Documentation`_ and specific
+VMware Tools configuration options.
+
+GuestInfo keys
+==============
+
+One method of providing meta, user, and vendor data is by setting the following
+key/value pairs on a VM's ``extraConfig`` `property`_:
+
+.. list-table::
+ :header-rows: 1
+
+ * - Property
+ - Description
+ * - ``guestinfo.metadata``
+ - A YAML or JSON document containing the ``cloud-init`` metadata.
+ * - ``guestinfo.metadata.encoding``
+ - The encoding type for ``guestinfo.metadata``.
+ * - ``guestinfo.userdata``
+ - A YAML document containing the ``cloud-init`` user data.
+ * - ``guestinfo.userdata.encoding``
+ - The encoding type for ``guestinfo.userdata``.
+ * - ``guestinfo.vendordata``
+ - A YAML document containing the ``cloud-init`` vendor data.
+ * - ``guestinfo.vendordata.encoding``
+ - The encoding type for ``guestinfo.vendordata``.
+
+
+All ``guestinfo.*.encoding`` values may be set to ``base64`` or
+``gzip+base64``.
+
+Features
+========
+
+This section reviews several features available in this datasource.
+
+Instance data and lazy networks
+-------------------------------
+
+One of the hallmarks of ``cloud-init`` is
+:ref:`its use of instance-data and JINJA queries <instancedata-Using>` -- the
+ability to write queries in user and vendor data that reference runtime
+information present in :file:`/run/cloud-init/instance-data.json`. This works
+well when the metadata provides all of the information up front, such as the
+network configuration. For systems that rely on DHCP, however, this
+information may not be available when the metadata is persisted to disk.
+
+This datasource ensures that even if the instance is using DHCP to configure
+networking, the same details about the configured network are available in
+:file:`/run/cloud-init/instance-data.json` as if static networking was used.
+This information collected at runtime is easy to demonstrate by executing the
+datasource on the command line. From the root of this repository, run the
+following command:
+
+.. code-block:: bash
+
+ PYTHONPATH="$(pwd)" python3 cloudinit/sources/DataSourceVMware.py
+
+The above command will result in output similar to the below JSON:
+
+.. code-block:: json
+
+ {
+ "hostname": "akutz.localhost",
+ "local-hostname": "akutz.localhost",
+ "local-ipv4": "192.168.0.188",
+ "local_hostname": "akutz.localhost",
+ "network": {
+ "config": {
+ "dhcp": true
+ },
+ "interfaces": {
+ "by-ipv4": {
+ "172.0.0.2": {
+ "netmask": "255.255.255.255",
+ "peer": "172.0.0.2"
+ },
+ "192.168.0.188": {
+ "broadcast": "192.168.0.255",
+ "mac": "64:4b:f0:18:9a:21",
+ "netmask": "255.255.255.0"
+ }
+ },
+ "by-ipv6": {
+ "fd8e:d25e:c5b6:1:1f5:b2fd:8973:22f2": {
+ "flags": 208,
+ "mac": "64:4b:f0:18:9a:21",
+ "netmask": "ffff:ffff:ffff:ffff::/64"
+ }
+ },
+ "by-mac": {
+ "64:4b:f0:18:9a:21": {
+ "ipv4": [
+ {
+ "addr": "192.168.0.188",
+ "broadcast": "192.168.0.255",
+ "netmask": "255.255.255.0"
+ }
+ ],
+ "ipv6": [
+ {
+ "addr": "fd8e:d25e:c5b6:1:1f5:b2fd:8973:22f2",
+ "flags": 208,
+ "netmask": "ffff:ffff:ffff:ffff::/64"
+ }
+ ]
+ },
+ "ac:de:48:00:11:22": {
+ "ipv6": []
+ }
+ }
+ }
+ },
+ "wait-on-network": {
+ "ipv4": true,
+ "ipv6": "false"
+ }
+ }
+
+
+Redacting sensitive information (GuestInfo keys transport only)
+---------------------------------------------------------------
+
+Sometimes the ``cloud-init`` user data might contain sensitive information,
+and it may be desirable to have the ``guestinfo.userdata`` key (or other
+``guestinfo`` keys) redacted as soon as its data is read by the datasource.
+This is possible by adding the following to the metadata:
+
+.. code-block:: yaml
+
+ redact: # formerly named cleanup-guestinfo, which will also work
+ - userdata
+ - vendordata
+
+When the above snippet is added to the metadata, the datasource will iterate
+over the elements in the ``redact`` array and clear each of the keys. For
+example, when the ``guestinfo`` transport is used, the above snippet will cause
+the following commands to be executed:
+
+.. code-block:: shell
+
+ vmware-rpctool "info-set guestinfo.userdata ---"
+ vmware-rpctool "info-set guestinfo.userdata.encoding "
+ vmware-rpctool "info-set guestinfo.vendordata ---"
+ vmware-rpctool "info-set guestinfo.vendordata.encoding "
+
+Please note that keys are set to the valid YAML string ``---`` as it is not
+possible remove an existing key from the ``guestinfo`` key-space. A key's
+analogous encoding property will be set to a single white-space character,
+causing the datasource to treat the actual key value as plain-text, thereby
+loading it as an empty YAML doc (hence the aforementioned ``---``\ ).
+
+Reading the local IP addresses
+------------------------------
+
+This datasource automatically discovers the local IPv4 and IPv6 addresses for
+a guest operating system based on the default routes. However, when inspecting
+a VM externally, it's not possible to know what the *default* IP address is for
+the guest OS. That's why this datasource sets the discovered, local IPv4 and
+IPv6 addresses back in the ``guestinfo`` namespace as the following keys:
+
+* ``guestinfo.local-ipv4``
+* ``guestinfo.local-ipv6``
+
+It is possible that a host may not have any default, local IP addresses. It's
+also possible the reported, local addresses are link-local addresses. But these
+two keys may be used to discover what this datasource determined were the local
+IPv4 and IPv6 addresses for a host.
+
+Waiting on the network
+----------------------
+
+Sometimes ``cloud-init`` may bring up the network, but it will not finish
+coming online before the datasource's ``setup`` function is called, resulting
+in a :file:`/var/run/cloud-init/instance-data.json` file that does not have the
+correct network information. It is possible to instruct the datasource to wait
+until an IPv4 or IPv6 address is available before writing the instance data
+with the following metadata properties:
+
+.. code-block:: yaml
+
+ wait-on-network:
+ ipv4: true
+ ipv6: true
+
+If either of the above values are true, then the datasource will sleep for a
+second, check the network status, and repeat until one or both addresses from
+the specified families are available.
+
+Walkthrough of GuestInfo keys transport
+=======================================
+
+The following series of steps is a demonstration of how to configure a VM with
+this datasource using the GuestInfo keys transport:
+
+#. Create the metadata file for the VM. Save the following YAML to a file named
+ :file:`metadata.yaml`\:
+
+ .. code-block:: yaml
+
+ instance-id: cloud-vm
+ local-hostname: cloud-vm
+ network:
+ version: 2
+ ethernets:
+ nics:
+ match:
+ name: ens*
+ dhcp4: yes
+
+#. Create the userdata file :file:`userdata.yaml`\:
+
+ .. code-block:: yaml
+
+ #cloud-config
+
+ users:
+ - default
+ - name: akutz
+ primary_group: akutz
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ groups: sudo, wheel
+ lock_passwd: true
+ ssh_authorized_keys:
+ - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDE0c5FczvcGSh/tG4iw+Fhfi/O5/EvUM/96js65tly4++YTXK1d9jcznPS5ruDlbIZ30oveCBd3kT8LLVFwzh6hepYTf0YmCTpF4eDunyqmpCXDvVscQYRXyasEm5olGmVe05RrCJSeSShAeptv4ueIn40kZKOghinGWLDSZG4+FFfgrmcMCpx5YSCtX2gvnEYZJr0czt4rxOZuuP7PkJKgC/mt2PcPjooeX00vAj81jjU2f3XKrjjz2u2+KIt9eba+vOQ6HiC8c2IzRkUAJ5i1atLy8RIbejo23+0P4N2jjk17QySFOVHwPBDTYb0/0M/4ideeU74EN/CgVsvO6JrLsPBR4dojkV5qNbMNxIVv5cUwIy2ThlLgqpNCeFIDLCWNZEFKlEuNeSQ2mPtIO7ETxEL2Cz5y/7AIuildzYMc6wi2bofRC8HmQ7rMXRWdwLKWsR0L7SKjHblIwarxOGqLnUI+k2E71YoP7SZSlxaKi17pqkr0OMCF+kKqvcvHAQuwGqyumTEWOlH6TCx1dSPrW+pVCZSHSJtSTfDW2uzL6y8k10MT06+pVunSrWo5LHAXcS91htHV1M1UrH/tZKSpjYtjMb5+RonfhaFRNzvj7cCE1f3Kp8UVqAdcGBTtReoE8eRUT63qIxjw03a7VwAyB2w+9cu1R9/vAo8SBeRqw== sakutz@gmail.com
+
+#. Please note this step requires that the VM be powered off. All of the
+ commands below use the VMware CLI tool, `govc`_.
+
+ Go ahead and assign the path to the VM to the environment variable ``VM``\:
+
+ .. code-block:: shell
+
+ export VM="/inventory/path/to/the/vm"
+
+#. Power off the VM:
+
+ .. raw:: html
+
+ <hr />
+
+ &#x26a0;&#xfe0f; <strong>First Boot Mode</strong>
+
+ To ensure the next power-on operation results in a first-boot scenario for
+ ``cloud-init``, it may be necessary to run the following command just before
+ powering off the VM:
+
+ .. code-block:: bash
+
+ cloud-init clean --logs --machine-id
+
+ Otherwise ``cloud-init`` may not run in first-boot mode. For more
+ information on how the boot mode is determined, please see the
+ :ref:`First Boot Documentation <boot-First_boot_determination>`.
+
+ .. raw:: html
+
+ <hr />
+
+ .. code-block:: shell
+
+ govc vm.power -off "${VM}"
+
+#. Export the environment variables that contain the ``cloud-init`` metadata
+ and user data:
+
+ .. code-block:: shell
+
+ export METADATA=$(gzip -c9 <metadata.yaml | { base64 -w0 2>/dev/null || base64; }) \
+ USERDATA=$(gzip -c9 <userdata.yaml | { base64 -w0 2>/dev/null || base64; })
+
+#. Assign the metadata and user data to the VM:
+
+ .. code-block:: shell
+
+ govc vm.change -vm "${VM}" \
+ -e guestinfo.metadata="${METADATA}" \
+ -e guestinfo.metadata.encoding="gzip+base64" \
+ -e guestinfo.userdata="${USERDATA}" \
+ -e guestinfo.userdata.encoding="gzip+base64"
+
+ .. note::
+ Please note the above commands include specifying the encoding for the
+ properties. This is important as it informs the datasource how to decode
+ the data for ``cloud-init``. Valid values for ``metadata.encoding`` and
+ ``userdata.encoding`` include:
+
+ * ``base64``
+ * ``gzip+base64``
+
+#. Power on the VM:
+
+ .. code-block:: shell
+
+ govc vm.power -on "${VM}"
+
+If all went according to plan, the CentOS box is:
+
+* Locked down, allowing SSH access only for the user in the user data.
+* Configured for a dynamic IP address via DHCP.
+* Has a hostname of ``cloud-vm``.
+
+Examples of common configurations
+=================================
+
+Setting the hostname
+--------------------
+
+The hostname is set by way of the metadata key ``local-hostname``.
+
+Setting the instance ID
+-----------------------
+
+The instance ID may be set by way of the metadata key ``instance-id``. However,
+if this value is absent then the instance ID is read from the file
+:file:`/sys/class/dmi/id/product_uuid`.
+
+Providing public SSH keys
+-------------------------
+
+The public SSH keys may be set by way of the metadata key ``public-keys-data``.
+Each newline-terminated string will be interpreted as a separate SSH public
+key, which will be placed in distro's default user's
+:file:`~/.ssh/authorized_keys`. If the value is empty or absent, then nothing
+will be written to :file:`~/.ssh/authorized_keys`.
+
+Configuring the network
+-----------------------
+
+The network is configured by setting the metadata key ``network`` with a value
+consistent with Network Config :ref:`Version 1 <network_config_v1>` or
+:ref:`Version 2 <network_config_v2>`, depending on the Linux distro's version
+of ``cloud-init``.
+
+The metadata key ``network.encoding`` may be used to indicate the format of
+the metadata key ``network``. Valid encodings are ``base64`` and
+``gzip+base64``.
+
+
+.. LINKS
+.. _Guest OS Customization: https://docs.vmware.com/en/VMware-vSphere/8.0/vsphere-vm-administration/GUID-58E346FF-83AE-42B8-BE58-253641D257BC.html
+.. _GuestInfo keys: https://github.com/vmware/govmomi/blob/master/govc/USAGE.md
+.. _traditional Linux customisation specification: https://docs.vmware.com/en/VMware-vSphere/8.0/vsphere-vm-administration/GUID-EB5F090E-723C-4470-B640-50B35D1EC016.html#GUID-9A5093A5-C54F-4502-941B-3F9C0F573A39__GUID-40C60643-A2EB-4B05-8927-B51AF7A6CC5E
+.. _Guest customisation using cloud-init: https://developer.vmware.com/docs/17020/vsphere-web-services-sdk-programming-guide--8-0-/GUID-75E27FA9-2E40-4CBF-BF3D-22DCFC8F11F7.html
+.. _VMware Tools: https://docs.vmware.com/en/VMware-Tools/index.html
+.. _customization configuration: https://github.com/canonical/cloud-init/blob/main/cloudinit/sources/helpers/vmware/imc/config.py
+.. _VMware vSphere Product Documentation: https://docs.vmware.com/en/VMware-vSphere/8.0/vsphere-vm-administration/GUID-EB5F090E-723C-4470-B640-50B35D1EC016.html#GUID-9A5093A5-C54F-4502-941B-3F9C0F573A39__GUID-40C60643-A2EB-4B05-8927-B51AF7A6CC5E
+.. _property: https://vdc-repo.vmware.com/vmwb-repository/dcr-public/723e7f8b-4f21-448b-a830-5f22fd931b01/5a8257bd-7f41-4423-9a73-03307535bd42/doc/vim.vm.ConfigInfo.html
+.. _govc: https://github.com/vmware/govmomi/blob/master/govc
+
diff --git a/doc/rtd/reference/datasources/vultr.rst b/doc/rtd/reference/datasources/vultr.rst
new file mode 100644
index 00000000..1115c29e
--- /dev/null
+++ b/doc/rtd/reference/datasources/vultr.rst
@@ -0,0 +1,32 @@
+.. _datasource_vultr:
+
+Vultr
+*****
+
+The `Vultr`_ datasource retrieves basic configuration values from the locally
+accessible metadata service. All data is served over HTTP from the address
+``169.254.169.254``. The endpoints are documented in the
+`metadata service documentation`_.
+
+Configuration
+=============
+
+Vultr's datasource can be configured as follows: ::
+
+ datasource:
+ Vultr:
+ url: 'http://169.254.169.254'
+ retries: 3
+ timeout: 2
+ wait: 2
+
+* ``url``: The URL used to acquire the metadata configuration.
+* ``retries``: Determines the number of times to attempt to connect to the
+ metadata service.
+* ``timeout``: Determines the timeout (in seconds) to wait for a response from
+ the metadata service.
+* ``wait``: Determines the timeout (in seconds) to wait before retrying after
+ accessible failure.
+
+.. _Vultr: https://www.vultr.com/
+.. _metadata service documentation: https://www.vultr.com/metadata/
diff --git a/doc/rtd/reference/datasources/zstack.rst b/doc/rtd/reference/datasources/zstack.rst
new file mode 100644
index 00000000..e1fefd21
--- /dev/null
+++ b/doc/rtd/reference/datasources/zstack.rst
@@ -0,0 +1,36 @@
+.. _datasource_zstack:
+
+ZStack
+******
+
+ZStack platform provides an AWS EC2 metadata service, but with different
+datasource identity. More information about ZStack can be found at
+`ZStack`_.
+
+Discovery
+=========
+
+To determine whether a VM is running on the ZStack platform, ``cloud-init``
+checks DMI information via ``dmidecode -s chassis-asset-tag``. If the output
+ends with ``.zstack.io``, it's running on the ZStack platform.
+
+Metadata
+--------
+
+The same way as with EC2, instance metadata can be queried at: ::
+
+ GET http://169.254.169.254/2009-04-04/meta-data/
+ instance-id
+ local-hostname
+
+User data
+---------
+
+The same way as with EC2, instance user data can be queried at: ::
+
+ GET http://169.254.169.254/2009-04-04/user-data/
+ meta_data.json
+ user_data
+ password
+
+.. _ZStack: https://www.zstack.io
diff --git a/doc/rtd/topics/examples.rst b/doc/rtd/reference/examples.rst
index 3f260947..c9829e49 100644
--- a/doc/rtd/topics/examples.rst
+++ b/doc/rtd/reference/examples.rst
@@ -1,6 +1,5 @@
.. _yaml_examples:
-*********************
Cloud config examples
*********************
@@ -11,7 +10,6 @@ Including users and groups
:language: yaml
:linenos:
-
Writing out arbitrary files
===========================
@@ -19,7 +17,6 @@ Writing out arbitrary files
:language: yaml
:linenos:
-
Adding a yum repository
=======================
@@ -27,8 +24,8 @@ Adding a yum repository
:language: yaml
:linenos:
-Configure an instances trusted CA certificates
-==============================================
+Configure an instance's trusted CA certificates
+===============================================
.. literalinclude:: ../../examples/cloud-config-ca-certs.txt
:language: yaml
@@ -48,14 +45,14 @@ Install and run `ansible-pull`
:language: yaml
:linenos:
-Configure Instance to be Managed by Ansible
+Configure instance to be managed by Ansible
===========================================
.. literalinclude:: ../../examples/cloud-config-ansible-managed.txt
:language: yaml
:linenos:
-Configure Instance to be An Ansible Controller
+Configure instance to be an Ansible controller
==============================================
.. literalinclude:: ../../examples/cloud-config-ansible-controller.txt
@@ -108,8 +105,8 @@ Adjust mount points mounted
:language: yaml
:linenos:
-Configure instances SSH keys
-============================
+``Configure instance's SSH keys``
+=================================
.. literalinclude:: ../../examples/cloud-config-ssh-keys.txt
:language: yaml
diff --git a/doc/rtd/reference/faq.rst b/doc/rtd/reference/faq.rst
new file mode 100644
index 00000000..87aade59
--- /dev/null
+++ b/doc/rtd/reference/faq.rst
@@ -0,0 +1,303 @@
+.. _faq:
+
+FAQ
+***
+
+How do I get help?
+==================
+
+Having trouble? We would like to help!
+
+- First go through this page with answers to common questions
+- Use the search bar at the upper left to search our documentation
+- Ask questions in the ``#cloud-init`` `IRC channel on Libera`_
+- Join and ask questions on the ``cloud-init`` `mailing list`_
+- Find a bug? Check out the :ref:`reporting_bugs` topic to find out how to
+ report one
+
+Where are the logs?
+===================
+
+``Cloud-init`` uses two files to log to:
+
+- :file:`/var/log/cloud-init-output.log`: Captures the output from each stage
+ of ``cloud-init`` when it runs.
+- :file:`/var/log/cloud-init.log`: Very detailed log with debugging output,
+ detailing each action taken.
+- :file:`/run/cloud-init`: contains logs about how ``cloud-init`` decided to
+ enable or disable itself, as well as what platforms/datasources were
+ detected. These logs are most useful when trying to determine what
+ ``cloud-init`` did or did not run.
+
+Be aware that each time a system boots, new logs are appended to the files in
+:file:`/var/log`. Therefore, the files may have information present from more
+than one boot.
+
+When reviewing these logs look for any errors or Python tracebacks to check
+for any errors.
+
+Where are the configuration files?
+==================================
+
+``Cloud-init`` config is provided in two places:
+
+- :file:`/etc/cloud/cloud.cfg`
+- :file:`/etc/cloud/cloud.cfg.d/*.cfg`
+
+These files can define the modules that run during instance initialisation,
+the datasources to evaluate on boot, as well as other settings.
+
+See the :ref:`configuration sources explanation<configuration>` and
+:ref:`configuration reference<base_config_reference>` pages for more details.
+
+Where are the data files?
+=========================
+
+Inside the :file:`/var/lib/cloud/` directory there are two important
+subdirectories:
+
+:file:`instance`
+----------------
+
+The :file:`/var/lib/cloud/instance` directory is a symbolic link that points
+to the most recently used :file:`instance-id` directory. This folder contains
+the information ``cloud-init`` received from datasources, including vendor and
+user data. This can be helpful to review to ensure the correct data was passed.
+
+It also contains the :file:`datasource` file that contains the full information
+about which datasource was identified and used to set up the system.
+
+Finally, the :file:`boot-finished` file is the last thing that
+``cloud-init`` does.
+
+:file:`data`
+------------
+
+The :file:`/var/lib/cloud/data` directory contain information related to the
+previous boot:
+
+* :file:`instance-id`: ID of the instance as discovered by ``cloud-init``.
+ Changing this file has no effect.
+* :file:`result.json`: JSON file that will show both the datasource used to
+ set up the instance, and whether any errors occurred.
+* :file:`status.json`: JSON file showing the datasource used, a breakdown of
+ all four modules, whether any errors occurred, and the start and stop times.
+
+What datasource am I using?
+===========================
+
+To correctly set up an instance, ``cloud-init`` must correctly identify the
+cloud that it is on. Therefore, knowing which datasource is used on an
+instance launch can aid in debugging.
+
+To find out which datasource is being used run the :command:`cloud-id` command:
+
+.. code-block:: shell-session
+
+ $ cloud-id
+
+This will tell you which datasource is being used, for example:
+
+.. code-block::
+
+ nocloud
+
+If the ``cloud-id`` is not what is expected, then running the
+:file:`ds-identify` script in debug mode and providing that in a bug can aid
+in resolving any issues:
+
+.. code-block:: shell-session
+
+ $ sudo DEBUG_LEVEL=2 DI_LOG=stderr /usr/lib/cloud-init/ds-identify --force
+
+The ``force`` parameter allows the command to be run again since the instance
+has already launched. The other options increase the verbosity of logging and
+put the logs to :file:`STDERR`.
+
+How can I re-run datasource detection and ``cloud-init``?
+=========================================================
+
+If a user is developing a new datasource or working on debugging an issue it
+may be useful to re-run datasource detection and the initial setup of
+``cloud-init``.
+
+To do this, force :file:`ds-identify` to re-run, clean up any logs, and
+re-run ``cloud-init``:
+
+.. code-block:: shell-session
+
+ $ sudo DI_LOG=stderr /usr/lib/cloud-init/ds-identify --force
+ $ sudo cloud-init clean --logs
+ $ sudo cloud-init init --local
+ $ sudo cloud-init init
+
+.. warning::
+
+ These commands will re-run ``cloud-init`` as if this were first boot of a
+ system: this will, at the very least, cycle SSH host keys and may do
+ substantially more. **Do not run these commands on production systems.**
+
+How can I debug my user data?
+=============================
+
+Two of the most common issues with cloud config user data are:
+
+1. Incorrectly formatted YAML
+2. First line does not contain ``#cloud-config``
+
+Static user data validation
+---------------------------
+
+To verify your cloud config is valid YAML you may use `validate-yaml.py`_.
+
+To ensure that the keys and values in your user data are correct, you may run:
+
+.. code-block:: shell-session
+
+ $ cloud-init schema --system --annotate
+
+or to test YAML in a file:
+
+.. code-block:: shell-session
+
+ $ cloud-init schema -c test.yml --annotate
+
+Log analysis
+------------
+
+If you can log into your system, the best way to debug your system is to
+check the contents of the log files :file:`/var/log/cloud-init.log` and
+:file:`/var/log/cloud-init-output.log` for warnings, errors, and
+tracebacks. Tracebacks are always reportable bugs.
+
+
+Why did ``cloud-init`` never complete?
+======================================
+
+To check if ``cloud-init`` is running still, run:
+
+.. code-block:: shell-session
+
+ $ cloud-init status
+
+To wait for ``cloud-init`` to complete, run:
+
+.. code-block:: shell-session
+
+ $ cloud-init status --wait
+
+There are a number of reasons that ``cloud-init`` might never complete. This
+list is not exhaustive, but attempts to enumerate potential causes:
+
+External reasons
+----------------
+
+- Failed dependent services in the boot.
+- Bugs in the kernel or drivers.
+- Bugs in external userspace tools that are called by ``cloud-init``.
+
+Internal reasons
+----------------
+
+- A command in ``bootcmd`` or ``runcmd`` that never completes (e.g., running
+ :command:`cloud-init status --wait` will wait forever on itself and never
+ complete).
+- Non-standard configurations that disable timeouts or set extremely high
+ values ("never" is used in a loose sense here).
+
+Failing to complete on ``systemd``
+----------------------------------
+
+``Cloud-init`` consists of multiple services on ``systemd``. If a service
+that ``cloud-init`` depends on stalls, ``cloud-init`` will not continue.
+If reporting a bug related to ``cloud-init`` failing to complete on
+``systemd``, please make sure to include the following logs.
+
+.. code-block:: shell-session
+
+ $ systemd-analyze critical-chain cloud-init.target
+ $ journalctl --boot=-1
+ $ systemctl --failed
+
+``autoinstall``, ``preruncmd``, ``postruncmd``
+==============================================
+
+Since ``cloud-init`` ignores top level user data ``cloud-config`` keys, other
+projects such as `Juju`_ and `Subiquity autoinstaller`_ use a YAML-formatted
+config that combines ``cloud-init``'s user data cloud-config YAML format with
+their custom YAML keys. Since ``cloud-init`` ignores unused top level keys,
+these combined YAML configurations may be valid ``cloud-config`` files,
+however keys such as ``autoinstall``, ``preruncmd``, and ``postruncmd`` are
+not used by ``cloud-init`` to configure anything.
+
+Please direct bugs and questions about other projects that use ``cloud-init``
+to their respective support channels. For Subiquity autoinstaller that is via
+IRC (``#ubuntu-server`` on Libera) or Discourse. For Juju support see their
+`discourse page`_.
+
+Where can I learn more?
+=======================
+
+Below are some videos, blog posts, and white papers about ``cloud-init`` from a
+variety of sources.
+
+Videos:
+
+- `cloud-init - The Good Parts`_
+- `Perfect Proxmox Template with Cloud Image and Cloud Init`_
+ [proxmox, cloud-init, template]
+- `cloud-init - Building clouds one Linux box at a time (Video)`_
+- `Metadata and cloud-init`_
+- `Introduction to cloud-init`_
+
+Blog Posts:
+
+- `cloud-init - The cross-cloud Magic Sauce (PDF)`_
+- `cloud-init - Building clouds one Linux box at a time (PDF)`_
+- `The beauty of cloud-init`_
+- `Cloud-init Getting Started`_ [fedora, libvirt, cloud-init]
+- `Build Azure Devops Agents With Linux cloud-init for Dotnet Development`_
+ [terraform, azure, devops, docker, dotnet, cloud-init]
+- `Cloud-init Getting Started`_ [fedora, libvirt, cloud-init]
+- `Setup Neovim cloud-init Completion`_
+ [neovim, yaml, Language Server Protocol, jsonschema, cloud-init]
+
+Events:
+
+- `cloud-init Summit 2019`_
+- `cloud-init Summit 2018`_
+- `cloud-init Summit 2017`_
+
+
+Whitepapers:
+
+- `Utilising cloud-init on Microsoft Azure (Whitepaper)`_
+- `Cloud Instance Initialization with cloud-init (Whitepaper)`_
+
+.. _mailing list: https://launchpad.net/~cloud-init
+.. _IRC channel on Libera: https://kiwiirc.com/nextclient/irc.libera.chat/cloud-init
+.. _validate-yaml.py: https://github.com/canonical/cloud-init/blob/main/tools/validate-yaml.py
+.. _Juju: https://ubuntu.com/blog/topics/juju
+.. _discourse page: https://discourse.charmhub.io
+
+.. _cloud-init - The Good Parts: https://www.youtube.com/watch?v=2_m6EUo6VOI
+.. _Utilising cloud-init on Microsoft Azure (Whitepaper): https://ubuntu.com/engage/azure-cloud-init-whitepaper
+.. _Cloud Instance Initialization with cloud-init (Whitepaper): https://ubuntu.com/blog/cloud-instance-initialisation-with-cloud-init
+
+.. _cloud-init - The cross-cloud Magic Sauce (PDF): https://events.linuxfoundation.org/wp-content/uploads/2017/12/cloud-init-The-cross-cloud-Magic-Sauce-Scott-Moser-Chad-Smith-Canonical.pdf
+.. _cloud-init - Building clouds one Linux box at a time (Video): https://www.youtube.com/watch?v=1joQfUZQcPg
+.. _cloud-init - Building clouds one Linux box at a time (PDF): https://web.archive.org/web/20181111020605/https://annex.debconf.org/debconf-share/debconf17/slides/164-cloud-init_Building_clouds_one_Linux_box_at_a_time.pdf
+.. _Metadata and cloud-init: https://www.youtube.com/watch?v=RHVhIWifVqU
+.. _The beauty of cloud-init: https://web.archive.org/web/20180830161317/http://brandon.fuller.name/archives/2011/05/02/06.40.57/
+.. _Introduction to cloud-init: http://www.youtube.com/watch?v=-zL3BdbKyGY
+.. _Build Azure Devops Agents With Linux cloud-init for Dotnet Development: https://codingsoul.org/2022/04/25/build-azure-devops-agents-with-linux-cloud-init-for-dotnet-development/
+.. _Perfect Proxmox Template with Cloud Image and Cloud Init: https://www.youtube.com/watch?v=shiIi38cJe4
+.. _Cloud-init Getting Started: https://blog.while-true-do.io/cloud-init-getting-started/
+.. _Setup Neovim cloud-init Completion: https://phoenix-labs.xyz/blog/setup-neovim-cloud-init-completion/
+
+.. _cloud-init Summit 2019: https://powersj.io/post/cloud-init-summit19/
+.. _cloud-init Summit 2018: https://powersj.io/post/cloud-init-summit18/
+.. _cloud-init Summit 2017: https://powersj.io/post/cloud-init-summit17/
+.. _Subiquity autoinstaller: https://ubuntu.com/server/docs/install/autoinstall
+.. _juju_project: https://discourse.charmhub.io/t/model-config-cloudinit-userdata/512
+.. _discourse page: https://discourse.charmhub.io
diff --git a/doc/rtd/reference/index.rst b/doc/rtd/reference/index.rst
new file mode 100644
index 00000000..5610490b
--- /dev/null
+++ b/doc/rtd/reference/index.rst
@@ -0,0 +1,21 @@
+Reference
+*********
+
+Our reference section contains support information for ``cloud-init``.
+This includes details on the network requirements, API definitions, support
+matrices and so on.
+
+-----
+
+.. toctree::
+ :maxdepth: 1
+
+ modules.rst
+ examples.rst
+ cli.rst
+ availability.rst
+ faq.rst
+ merging.rst
+ datasources.rst
+ network-config.rst
+ base_config_reference.rst
diff --git a/doc/rtd/reference/merging.rst b/doc/rtd/reference/merging.rst
new file mode 100644
index 00000000..62efffdb
--- /dev/null
+++ b/doc/rtd/reference/merging.rst
@@ -0,0 +1,292 @@
+.. _merging_user_data:
+
+Merging user data sections
+**************************
+
+The ability to merge user data sections is a feature that was implemented by
+popular request. It was identified that there should be a way to specify how
+cloud-config YAML "dictionaries" provided as user data are handled when there
+are multiple YAML files to be merged together (e.g., when performing an
+#include).
+
+The previous merging algorithm was very simple and would only overwrite
+(and not append). So, it was decided to create a new and improved way to merge
+dictionaries (and their contained objects) together in a customisable way,
+thus allowing users who provide cloud-config user data to determine exactly
+how their objects will be merged.
+
+For example:
+
+.. code-block:: yaml
+
+ #cloud-config (1)
+ runcmd:
+ - bash1
+ - bash2
+
+ #cloud-config (2)
+ runcmd:
+ - bash3
+ - bash4
+
+The previous way of merging the two objects above would result in a final
+cloud-config object that contains the following:
+
+.. code-block:: yaml
+
+ #cloud-config (merged)
+ runcmd:
+ - bash3
+ - bash4
+
+Typically this is not what users want - instead they would prefer:
+
+.. code-block:: yaml
+
+ #cloud-config (merged)
+ runcmd:
+ - bash1
+ - bash2
+ - bash3
+ - bash4
+
+This change makes it easier to combine the various cloud-config objects you
+have into a more useful list. In this way, we reduce the duplication necessary
+to accomplish the same result with the previous method.
+
+Built-in mergers
+================
+
+``Cloud-init`` provides merging for the following built-in types:
+
+- :command:`Dict`
+- :command:`List`
+- :command:`String`
+
+``Dict``
+--------
+
+The :command:`Dict` merger has the following options, which control what is
+done with values contained within the config.
+
+- :command:`allow_delete`: Existing values not present in the new value can be
+ deleted. Defaults to ``False``.
+- :command:`no_replace`: Do not replace an existing value if one is already
+ present. Enabled by default.
+- :command:`replace`: Overwrite existing values with new ones.
+
+``List``
+--------
+
+The :command:`List` merger has the following options, which control what is
+done with the values contained within the config.
+
+- :command:`append`: Add new value to the end of the list. Defaults to
+ ``False``.
+- :command:`prepend`: Add new values to the start of the list. Defaults to
+ ``False``.
+- :command:`no_replace`: Do not replace an existing value if one is already
+ present. Enabled by default.
+- :command:`replace`: Overwrite existing values with new ones.
+
+String
+------
+
+The :command:`Str` merger has the following options, which control what is
+done with the values contained within the config.
+
+- :command:`append`: Add new value to the end of the string. Defaults to
+ False.
+
+Common options
+--------------
+
+These are the common options for all merge types, which control how recursive
+merging is done on other types.
+
+- :command:`recurse_dict`: If ``True``, merge the new values of the
+ dictionary. Defaults to ``True``.
+- :command:`recurse_list`: If ``True``, merge the new values of the list.
+ Defaults to ``False``.
+- :command:`recurse_array`: Alias for ``recurse_list``.
+- :command:`recurse_str`: If ``True``, merge the new values of the string.
+ Defaults to False.
+
+Customisation
+=============
+
+Because the above merging algorithm may not always be desired (just as the
+previous merging algorithm was not always the preferred one), the concept of
+customised merging was introduced through `merge classes`.
+
+A `merge class` is a class definition providing functions that can be used
+to merge a given type with another given type.
+
+An example of one of these `merging classes` is the following:
+
+.. code-block:: python
+
+ class Merger:
+ def __init__(self, merger, opts):
+ self._merger = merger
+ self._overwrite = 'overwrite' in opts
+
+ # This merging algorithm will attempt to merge with
+ # another dictionary, on encountering any other type of object
+ # it will not merge with said object, but will instead return
+ # the original value
+ #
+ # On encountering a dictionary, it will create a new dictionary
+ # composed of the original and the one to merge with, if 'overwrite'
+ # is enabled then keys that exist in the original will be overwritten
+ # by keys in the one to merge with (and associated values). Otherwise
+ # if not in overwrite mode the 2 conflicting keys themselves will
+ # be merged.
+ def _on_dict(self, value, merge_with):
+ if not isinstance(merge_with, (dict)):
+ return value
+ merged = dict(value)
+ for (k, v) in merge_with.items():
+ if k in merged:
+ if not self._overwrite:
+ merged[k] = self._merger.merge(merged[k], v)
+ else:
+ merged[k] = v
+ else:
+ merged[k] = v
+ return merged
+
+As you can see, there is an ``_on_dict`` method here that will be given a
+source value, and a value to merge with. The result will be the merged object.
+
+This code itself is called by another merging class which "directs" the
+merging to happen by analysing the object types to merge, and attempting to
+find a known object that will merge that type. An example of this can be found
+in the :file:`mergers/__init__.py` file (see ``LookupMerger`` and
+``UnknownMerger``).
+
+So, following the typical ``cloud-init`` approach of allowing source code to
+be downloaded and used dynamically, it is possible for users to inject their
+own merging files to handle specific types of merging as they choose (the
+basic ones included will handle lists, dicts, and strings). Note how each
+merge can have options associated with it, which affect how the merging is
+performed. For example, a dictionary merger can be told to overwrite instead
+of attempting to merge, or a string merger can be told to append strings
+instead of discarding other strings to merge with.
+
+How to activate
+===============
+
+There are a few ways to activate the merging algorithms, and to customise them
+for your own usage.
+
+1. The first way involves the usage of MIME messages in ``cloud-init`` to
+ specify multi-part documents (this is one way in which multiple
+ cloud-config can be joined together into a single cloud-config). Two new
+ headers are looked for, both of which can define the way merging is done
+ (the first header to exist "wins"). These new headers (in lookup order) are
+ ``'Merge-Type'`` and ``'X-Merge-Type'``. The value should be a string which
+ will satisfy the new merging format definition (see below for this format).
+
+2. The second way is to specify the `merge type` in the body of the
+ cloud-config dictionary. There are two ways to specify this; either as a
+ string, or as a dictionary (see format below). The keys that are looked up
+ for this definition are the following (in order): ``'merge_how'``,
+ ``'merge_type'``.
+
+String format
+-------------
+
+The following string format is expected: ::
+
+ classname1(option1,option2)+classname2(option3,option4)....
+
+The ``class name`` will be connected to class names used when looking for
+the class that can be used to merge, and options provided will be given to the
+class upon construction of that class.
+
+The following example shows the default string that gets used when none is
+otherwise provided: ::
+
+ list()+dict()+str()
+
+Dictionary format
+-----------------
+
+A dictionary can be used when it specifies the same information as the
+string format (i.e., the second option above). For example:
+
+.. code-block:: python
+
+ {'merge_how': [{'name': 'list', 'settings': ['append']},
+ {'name': 'dict', 'settings': ['no_replace', 'recurse_list']},
+ {'name': 'str', 'settings': ['append']}]}
+
+This would be the dictionary equivalent of the default string format.
+
+Specifying multiple types, and what this does
+=============================================
+
+Now you may be asking yourself: "What exactly happens if I specify a
+``merge-type`` header or dictionary for every cloud-config I provide?"
+
+The answer is that when merging, a stack of ``'merging classes'`` is kept. The
+first one in the stack is the default merging class. This set of mergers
+will be used when the first cloud-config is merged with the initial empty
+cloud-config dictionary. If the cloud-config that was just merged provided a
+set of merging classes (via the above formats) then those merging classes will
+be pushed onto the stack. Now if there is a second cloud-config to be merged
+then the merging classes from the cloud-config before the first will be used
+(not the default) and so on. In this way a cloud-config can decide how it will
+merge with a cloud-config dictionary coming after it.
+
+Other uses
+==========
+
+In addition to being used for merging user data sections, the default merging
+algorithm for merging :file:`'conf.d'` YAML files (which form an initial YAML
+config for ``cloud-init``) was also changed to use this mechanism, to take
+advantage of the full benefits (and customisation) here as well. Other places
+that used the previous merging are also, similarly, now extensible (metadata
+merging, for example).
+
+Note, however, that merge algorithms are not used *across* configuration types.
+As was the case before merging was implemented, user data will overwrite
+:file:`'conf.d'` configuration without merging.
+
+Example cloud-config
+====================
+
+A common request is to include multiple ``runcmd`` directives in different
+files and merge all of the commands together. To achieve this, we must modify
+the default merging to allow for dictionaries to join list values.
+
+The first config:
+
+.. code-block:: yaml
+
+ #cloud-config
+ merge_how:
+ - name: list
+ settings: [append]
+ - name: dict
+ settings: [no_replace, recurse_list]
+
+ runcmd:
+ - bash1
+ - bash2
+
+The second config:
+
+.. code-block:: yaml
+
+ #cloud-config
+ merge_how:
+ - name: list
+ settings: [append]
+ - name: dict
+ settings: [no_replace, recurse_list]
+
+ runcmd:
+ - bash3
+ - bash4
diff --git a/doc/rtd/topics/modules.rst b/doc/rtd/reference/modules.rst
index b0ad83e4..e727f59b 100644
--- a/doc/rtd/topics/modules.rst
+++ b/doc/rtd/reference/modules.rst
@@ -1,9 +1,23 @@
.. _modules:
-
-Module Reference
+Module reference
****************
+Deprecation schedule and versions
+---------------------------------
+Keys may be documented as ``deprecated``, ``new``, or ``changed``.
+This allows cloud-init to evolve as requirements change, and to adopt
+better practices without maintaining design decisions indefinitely.
+
+Keys that have been marked as deprecated or changed may be removed or
+changed 5 years from the date of deprecation. For example, a key that is
+deprecated in version ``22.1`` (which is the first release in 2022) is
+scheduled to be removed in ``27.1`` (first release in 2027). Use of
+deprecated keys may cause warnings in the logs. In the case that a
+key's expected value changes, the key will be marked ``changed`` with a
+date. A 5 year timeline may also be expected for changed keys.
+
+
.. automodule:: cloudinit.config.cc_ansible
.. automodule:: cloudinit.config.cc_apk_configure
.. automodule:: cloudinit.config.cc_apt_configure
@@ -13,6 +27,9 @@ Module Reference
.. automodule:: cloudinit.config.cc_ca_certs
.. automodule:: cloudinit.config.cc_chef
.. automodule:: cloudinit.config.cc_disable_ec2_metadata
+
+.. _mod-disk_setup:
+
.. automodule:: cloudinit.config.cc_disk_setup
.. automodule:: cloudinit.config.cc_fan
.. automodule:: cloudinit.config.cc_final_message
@@ -27,6 +44,9 @@ Module Reference
.. automodule:: cloudinit.config.cc_mcollective
.. automodule:: cloudinit.config.cc_migrator
.. automodule:: cloudinit.config.cc_mounts
+
+.. _mod-ntp:
+
.. automodule:: cloudinit.config.cc_ntp
.. automodule:: cloudinit.config.cc_package_update_upgrade_install
.. automodule:: cloudinit.config.cc_phone_home
@@ -36,7 +56,13 @@ Module Reference
.. automodule:: cloudinit.config.cc_resolv_conf
.. automodule:: cloudinit.config.cc_rh_subscription
.. automodule:: cloudinit.config.cc_rightscale_userdata
+
+.. _mod-rsyslog:
+
.. automodule:: cloudinit.config.cc_rsyslog
+
+.. _mod-runcmd:
+
.. automodule:: cloudinit.config.cc_runcmd
.. automodule:: cloudinit.config.cc_salt_minion
.. automodule:: cloudinit.config.cc_scripts_per_boot
@@ -46,6 +72,9 @@ Module Reference
.. automodule:: cloudinit.config.cc_scripts_vendor
.. automodule:: cloudinit.config.cc_seed_random
.. automodule:: cloudinit.config.cc_set_hostname
+
+.. _mod-set_passwords:
+
.. automodule:: cloudinit.config.cc_set_passwords
.. automodule:: cloudinit.config.cc_snap
.. automodule:: cloudinit.config.cc_spacewalk
@@ -54,13 +83,14 @@ Module Reference
.. automodule:: cloudinit.config.cc_ssh_import_id
.. automodule:: cloudinit.config.cc_timezone
.. automodule:: cloudinit.config.cc_ubuntu_advantage
-.. automodule:: cloudinit.config.cc_ubuntu_autoinstall
.. automodule:: cloudinit.config.cc_ubuntu_drivers
.. automodule:: cloudinit.config.cc_update_etc_hosts
.. automodule:: cloudinit.config.cc_update_hostname
+
+.. _mod-users_groups:
+
.. automodule:: cloudinit.config.cc_users_groups
.. automodule:: cloudinit.config.cc_wireguard
.. automodule:: cloudinit.config.cc_write_files
.. automodule:: cloudinit.config.cc_yum_add_repo
.. automodule:: cloudinit.config.cc_zypper_add_repo
-.. vi: textwidth=79
diff --git a/doc/rtd/topics/network-config-format-eni.rst b/doc/rtd/reference/network-config-format-eni.rst
index 94fa0f9e..f89fdb46 100644
--- a/doc/rtd/topics/network-config-format-eni.rst
+++ b/doc/rtd/reference/network-config-format-eni.rst
@@ -1,20 +1,21 @@
.. _network_config_eni:
-Network Configuration ENI (Legacy)
-----------------------------------
+Network configuration ENI (legacy)
+**********************************
-`Cloud-init`_ supports reading and writing network config in the ``ENI``
+``Cloud-init`` supports reading and writing network config in the ``ENI``
format which is consumed by the ``ifupdown`` tool to parse and apply network
configuration.
-As an input format this is **legacy**. In cases where ENI format is available
-and another format is also available, it will prefer to use the other format.
+As an input format this is **legacy**. In cases where ENI format is available
+and another format is also available, ``cloud-init`` will prefer to use the
+other, newer format.
+
This can happen in either :ref:`datasource_nocloud` or
:ref:`datasource_openstack` datasources.
Please reference existing `documentation`_ for the
-``/etc/network/interfaces(5)`` format.
+:file:`/etc/network/interfaces(5)` format.
.. _Cloud-init: https://launchpad.net/cloud-init
.. _documentation: http://manpages.ubuntu.com/manpages/trusty/en/man5/interfaces.5.html
-.. vi: textwidth=79
diff --git a/doc/rtd/reference/network-config-format-v1.rst b/doc/rtd/reference/network-config-format-v1.rst
new file mode 100644
index 00000000..0475df9d
--- /dev/null
+++ b/doc/rtd/reference/network-config-format-v1.rst
@@ -0,0 +1,647 @@
+.. _network_config_v1:
+
+Networking config Version 1
+***************************
+
+This network configuration format lets users customise their instance's
+networking interfaces by assigning subnet configuration, virtual device
+creation (bonds, bridges, VLANs) routes and DNS configuration.
+
+Required elements of a `network config Version 1` are ``config`` and
+``version``.
+
+``Cloud-init`` will read this format from :ref:`base_config_reference`.
+
+For example, the following could be present in
+:file:`/etc/cloud/cloud.cfg.d/custom-networking.cfg`:
+
+.. code-block:: yaml
+
+ network:
+ version: 1
+ config:
+ - type: physical
+ name: eth0
+ subnets:
+ - type: dhcp
+
+The :ref:`datasource_nocloud` datasource can also provide ``cloud-init``
+networking configuration in this format.
+
+Configuration types
+===================
+
+Within the network ``config`` portion, users include a list of configuration
+types. The current list of support ``type`` values are as follows:
+
+- ``physical``: Physical
+- ``bond``: Bond
+- ``bridge``: Bridge
+- ``vlan``: VLAN
+- ``nameserver``: Nameserver
+- ``route``: Route
+
+Physical, Bond, Bridge and VLAN types may also include IP configuration under
+the key ``subnets``.
+
+- ``subnets``: Subnet/IP
+
+Physical
+--------
+
+The ``physical`` type configuration represents a "physical" network device,
+typically Ethernet-based. At least one of these entries is required for
+external network connectivity. Type ``physical`` requires only one key:
+``name``. A ``physical`` device may contain some or all of the following
+keys:
+
+``name: <desired device name>``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+A device's name must be less than 15 characters. Names exceeding the maximum
+will be truncated. This is a limitation of the Linux kernel network-device
+structure.
+
+``mac_address: <MAC Address>``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The MAC Address is a device unique identifier that most Ethernet-based network
+devices possess. Specifying a MAC Address is optional.
+Letters must be lowercase.
+
+.. note::
+ It is best practice to "quote" all MAC addresses, since an unquoted MAC
+ address might be incorrectly interpreted as an integer in `YAML`_.
+
+.. note::
+ ``Cloud-init`` will handle the persistent mapping between a device's
+ ``name`` and the ``mac_address``.
+
+``mtu: <MTU SizeBytes>``
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+The MTU key represents a device's Maximum Transmission Unit, which is the
+largest size packet or frame, specified in octets (eight-bit bytes), that can
+be sent in a packet- or frame-based network. Specifying ``mtu`` is optional.
+
+.. note::
+ The possible supported values of a device's MTU are not available at
+ configuration time. It's possible to specify a value too large or to
+ small for a device, and may be ignored by the device.
+
+Physical example
+^^^^^^^^^^^^^^^^
+
+.. code-block::
+
+ network:
+ version: 1
+ config:
+ # Simple network adapter
+ - type: physical
+ name: interface0
+ mac_address: '00:11:22:33:44:55'
+ # Second nic with Jumbo frames
+ - type: physical
+ name: jumbo0
+ mac_address: 'aa:11:22:33:44:55'
+ mtu: 9000
+ # 10G pair
+ - type: physical
+ name: gbe0
+ mac_address: 'cd:11:22:33:44:00'
+ - type: physical
+ name: gbe1
+ mac_address: 'cd:11:22:33:44:02'
+
+Bond
+----
+
+A ``bond`` type will configure a Linux software Bond with one or more network
+devices. A ``bond`` type requires the following keys:
+
+``name: <desired device name>``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+A device's name must be less than 15 characters. Names exceeding the maximum
+will be truncated. This is a limitation of the Linux kernel network-device
+structure.
+
+``mac_address: <MAC Address>``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+When specifying MAC Address on a bond this value will be assigned to the bond
+device and may be different than the MAC address of any of the underlying
+bond interfaces. Specifying a MAC Address is optional. If ``mac_address`` is
+not present, then the bond will use one of the MAC Address values from one of
+the bond interfaces.
+
+.. note::
+ It is best practice to "quote" all MAC addresses, since an unquoted MAC
+ address might be incorrectly interpreted as an integer in `YAML`_.
+
+``bond_interfaces: <List of network device names>``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The ``bond_interfaces`` key accepts a list of network device ``name`` values
+from the configuration. This list may be empty.
+
+``mtu: <MTU SizeBytes>``
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+The MTU key represents a device's Maximum Transmission Unit, the largest size
+packet or frame, specified in octets (eight-bit bytes), that can be sent in a
+packet- or frame-based network. Specifying ``mtu`` is optional.
+
+.. note::
+ The possible supported values of a device's MTU are not available at
+ configuration time. It's possible to specify a value too large or to
+ small for a device, and may be ignored by the device.
+
+``params: <Dictionary of key: value bonding parameter pairs>``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+The ``params`` key in a bond holds a dictionary of bonding parameters.
+This dictionary may be empty. For more details on what the various bonding
+parameters mean please read the Linux Kernel :file:`Bonding.txt`.
+
+Valid ``params`` keys are:
+
+ - ``active_slave``: Set bond attribute
+ - ``ad_actor_key``: Set bond attribute
+ - ``ad_actor_sys_prio``: Set bond attribute
+ - ``ad_actor_system``: Set bond attribute
+ - ``ad_aggregator``: Set bond attribute
+ - ``ad_num_ports``: Set bond attribute
+ - ``ad_partner_key``: Set bond attribute
+ - ``ad_partner_mac``: Set bond attribute
+ - ``ad_select``: Set bond attribute
+ - ``ad_user_port_key``: Set bond attribute
+ - ``all_slaves_active``: Set bond attribute
+ - ``arp_all_targets``: Set bond attribute
+ - ``arp_interval``: Set bond attribute
+ - ``arp_ip_target``: Set bond attribute
+ - ``arp_validate``: Set bond attribute
+ - ``downdelay``: Set bond attribute
+ - ``fail_over_mac``: Set bond attribute
+ - ``lacp_rate``: Set bond attribute
+ - ``lp_interval``: Set bond attribute
+ - ``miimon``: Set bond attribute
+ - ``mii_status``: Set bond attribute
+ - ``min_links``: Set bond attribute
+ - ``mode``: Set bond attribute
+ - ``num_grat_arp``: Set bond attribute
+ - ``num_unsol_na``: Set bond attribute
+ - ``packets_per_slave``: Set bond attribute
+ - ``primary``: Set bond attribute
+ - ``primary_reselect``: Set bond attribute
+ - ``queue_id``: Set bond attribute
+ - ``resend_igmp``: Set bond attribute
+ - ``slaves``: Set bond attribute
+ - ``tlb_dynamic_lb``: Set bond attribute
+ - ``updelay``: Set bond attribute
+ - ``use_carrier``: Set bond attribute
+ - ``xmit_hash_policy``: Set bond attribute
+
+Bond example
+^^^^^^^^^^^^
+
+.. code-block::
+
+ network:
+ version: 1
+ config:
+ # Simple network adapter
+ - type: physical
+ name: interface0
+ mac_address: '00:11:22:33:44:55'
+ # 10G pair
+ - type: physical
+ name: gbe0
+ mac_address: 'cd:11:22:33:44:00'
+ - type: physical
+ name: gbe1
+ mac_address: 'cd:11:22:33:44:02'
+ - type: bond
+ name: bond0
+ bond_interfaces:
+ - gbe0
+ - gbe1
+ params:
+ bond-mode: active-backup
+
+Bridge
+------
+
+Type ``bridge`` requires the following keys:
+
+- ``name``: Set the name of the bridge.
+- ``bridge_interfaces``: Specify the ports of a bridge via their ``name``.
+ This list may be empty.
+- ``params``: A list of bridge params. For more details, please read the
+ ``bridge-utils-interfaces`` manpage.
+
+Valid keys are:
+
+ - ``bridge_ageing``: Set the bridge's ageing value.
+ - ``bridge_bridgeprio``: Set the bridge device network priority.
+ - ``bridge_fd``: Set the bridge's forward delay.
+ - ``bridge_hello``: Set the bridge's hello value.
+ - ``bridge_hw``: Set the bridge's MAC address.
+ - ``bridge_maxage``: Set the bridge's maxage value.
+ - ``bridge_maxwait``: Set how long network scripts should wait for the
+ bridge to be up.
+ - ``bridge_pathcost``: Set the cost of a specific port on the bridge.
+ - ``bridge_portprio``: Set the priority of a specific port on the bridge.
+ - ``bridge_ports``: List of devices that are part of the bridge.
+ - ``bridge_stp``: Set spanning tree protocol on or off.
+ - ``bridge_waitport``: Set amount of time in seconds to wait on specific
+ ports to become available.
+
+Bridge example
+^^^^^^^^^^^^^^
+
+.. code-block::
+
+ network:
+ version: 1
+ config:
+ # Simple network adapter
+ - type: physical
+ name: interface0
+ mac_address: '00:11:22:33:44:55'
+ # Second nic with Jumbo frames
+ - type: physical
+ name: jumbo0
+ mac_address: 'aa:11:22:33:44:55'
+ mtu: 9000
+ - type: bridge
+ name: br0
+ bridge_interfaces:
+ - jumbo0
+ params:
+ bridge_ageing: 250
+ bridge_bridgeprio: 22
+ bridge_fd: 1
+ bridge_hello: 1
+ bridge_maxage: 10
+ bridge_maxwait: 0
+ bridge_pathcost:
+ - jumbo0 75
+ bridge_pathprio:
+ - jumbo0 28
+ bridge_stp: 'off'
+ bridge_maxwait:
+ - jumbo0 0
+
+VLAN
+----
+
+Type ``vlan`` requires the following keys:
+
+- ``name``: Set the name of the VLAN
+- ``vlan_link``: Specify the underlying link via its ``name``.
+- ``vlan_id``: Specify the VLAN numeric id.
+
+The following optional keys are supported:
+
+``mtu: <MTU SizeBytes>``
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+The MTU key represents a device's Maximum Transmission Unit, the largest size
+packet or frame, specified in octets (eight-bit bytes), that can be sent in a
+packet- or frame-based network. Specifying ``mtu`` is optional.
+
+.. note::
+ The possible supported values of a device's MTU are not available at
+ configuration time. It's possible to specify a value too large or to
+ small for a device and may be ignored by the device.
+
+VLAN example
+^^^^^^^^^^^^
+
+.. code-block::
+
+ network:
+ version: 1
+ config:
+ # Physical interfaces.
+ - type: physical
+ name: eth0
+ mac_address: 'c0:d6:9f:2c:e8:80'
+ # VLAN interface.
+ - type: vlan
+ name: eth0.101
+ vlan_link: eth0
+ vlan_id: 101
+ mtu: 1500
+
+Nameserver
+----------
+
+Users can specify a ``nameserver`` type. Nameserver dictionaries include
+the following keys:
+
+- ``address``: List of IPv4 or IPv6 address of nameservers.
+- ``search``: List of hostnames to include in the :file:`resolv.conf` search
+ path.
+- ``interface``: Optional. Ties the nameserver definition to the specified
+ interface. The value specified here must match the ``name`` of an interface
+ defined in this config. If unspecified, this nameserver will be considered
+ a global nameserver.
+
+Nameserver example
+^^^^^^^^^^^^^^^^^^
+
+.. code-block::
+
+ network:
+ version: 1
+ config:
+ - type: physical
+ name: interface0
+ mac_address: '00:11:22:33:44:55'
+ subnets:
+ - type: static
+ address: 192.168.23.14/27
+ gateway: 192.168.23.1
+ - type: nameserver
+ interface: interface0 # Ties nameserver to interface0 only
+ address:
+ - 192.168.23.2
+ - 8.8.8.8
+ search:
+ - exemplary
+
+Route
+-----
+
+Users can include static routing information as well. A ``route`` dictionary
+has the following keys:
+
+- ``destination``: IPv4 network address with CIDR netmask notation.
+- ``gateway``: IPv4 gateway address with CIDR netmask notation.
+- ``metric``: Integer which sets the network metric value for this route.
+
+Route example
+^^^^^^^^^^^^^
+
+.. code-block::
+
+ network:
+ version: 1
+ config:
+ - type: physical
+ name: interface0
+ mac_address: '00:11:22:33:44:55'
+ subnets:
+ - type: static
+ address: 192.168.23.14/24
+ gateway: 192.168.23.1
+ - type: route
+ destination: 192.168.24.0/24
+ gateway: 192.168.24.1
+ metric: 3
+
+Subnet/IP
+---------
+
+For any network device (one of the "config types") users can define a list of
+``subnets`` which contain ip configuration dictionaries. Multiple subnet
+entries will create interface aliases, allowing a single interface to use
+different ip configurations.
+
+Valid keys for ``subnets`` include the following:
+
+- ``type``: Specify the subnet type.
+- ``control``: Specify 'manual', 'auto' or 'hotplug'. Indicates how the
+ interface will be handled during boot.
+- ``address``: IPv4 or IPv6 address. It may include CIDR netmask notation.
+- ``netmask``: IPv4 subnet mask in dotted format or CIDR notation.
+- ``gateway``: IPv4 address of the default gateway for this subnet.
+- ``dns_nameservers``: Specify a list of IPv4 dns server IPs to end up in
+ :file:`resolv.conf`.
+- ``dns_search``: Specify a list of search paths to be included in
+ :file:`resolv.conf`.
+- ``routes``: Specify a list of routes for a given interface.
+
+Subnet types are one of the following:
+
+- ``dhcp4``: Configure this interface with IPv4 dhcp.
+- ``dhcp``: Alias for ``dhcp4``.
+- ``dhcp6``: Configure this interface with IPv6 dhcp.
+- ``static``: Configure this interface with a static IPv4.
+- ``static6``: Configure this interface with a static IPv6.
+- ``ipv6_dhcpv6-stateful``: Configure this interface with ``dhcp6``.
+- ``ipv6_dhcpv6-stateless``: Configure this interface with SLAAC and DHCP.
+- ``ipv6_slaac``: Configure address with SLAAC.
+
+When making use of ``dhcp`` or either of the ``ipv6_dhcpv6`` types,
+no additional configuration is needed in the subnet dictionary.
+
+Using ``ipv6_dhcpv6-stateless`` or ``ipv6_slaac`` allows the IPv6 address to be
+automatically configured with StateLess Address AutoConfiguration (`SLAAC`_).
+SLAAC requires support from the network, so verify that your cloud or network
+offering has support before trying it out. With ``ipv6_dhcpv6-stateless``,
+DHCPv6 is still used to fetch other subnet details such as gateway or DNS
+servers. If you only want to discover the address, use ``ipv6_slaac``.
+
+Subnet DHCP example
+^^^^^^^^^^^^^^^^^^^
+
+.. code-block::
+
+ network:
+ version: 1
+ config:
+ - type: physical
+ name: interface0
+ mac_address: '00:11:22:33:44:55'
+ subnets:
+ - type: dhcp
+
+Subnet static example
+^^^^^^^^^^^^^^^^^^^^^
+
+.. code-block::
+
+ network:
+ version: 1
+ config:
+ - type: physical
+ name: interface0
+ mac_address: '00:11:22:33:44:55'
+ subnets:
+ - type: static
+ address: 192.168.23.14/27
+ gateway: 192.168.23.1
+ dns_nameservers:
+ - 192.168.23.2
+ - 8.8.8.8
+ dns_search:
+ - exemplary.maas
+
+Multiple subnet example
+^^^^^^^^^^^^^^^^^^^^^^^
+
+The following will result in an ``interface0`` using DHCP and ``interface0:1``
+using the static subnet configuration:
+
+.. code-block::
+
+ network:
+ version: 1
+ config:
+ - type: physical
+ name: interface0
+ mac_address: '00:11:22:33:44:55'
+ subnets:
+ - type: dhcp
+ - type: static
+ address: 192.168.23.14/27
+ gateway: 192.168.23.1
+ dns_nameservers:
+ - 192.168.23.2
+ - 8.8.8.8
+ dns_search:
+ - exemplary
+
+Subnet with routes example
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. code-block::
+
+ network:
+ version: 1
+ config:
+ - type: physical
+ name: interface0
+ mac_address: '00:11:22:33:44:55'
+ subnets:
+ - type: dhcp
+ - type: static
+ address: 10.184.225.122
+ netmask: 255.255.255.252
+ routes:
+ - gateway: 10.184.225.121
+ netmask: 255.240.0.0
+ network: 10.176.0.0
+ - gateway: 10.184.225.121
+ netmask: 255.240.0.0
+ network: 10.208.0.0
+
+
+Multi-layered configurations
+============================
+
+Complex networking sometimes uses layers of configuration. The syntax allows
+users to build those layers one at a time. All of the virtual network devices
+supported allow specifying an underlying device by their ``name`` value.
+
+Bonded VLAN example
+-------------------
+
+.. code-block::
+
+ network:
+ version: 1
+ config:
+ # 10G pair
+ - type: physical
+ name: gbe0
+ mac_address: 'cd:11:22:33:44:00'
+ - type: physical
+ name: gbe1
+ mac_address: 'cd:11:22:33:44:02'
+ # Bond.
+ - type: bond
+ name: bond0
+ bond_interfaces:
+ - gbe0
+ - gbe1
+ params:
+ bond-mode: 802.3ad
+ bond-lacp-rate: fast
+ # A Bond VLAN.
+ - type: vlan
+ name: bond0.200
+ vlan_link: bond0
+ vlan_id: 200
+ subnets:
+ - type: dhcp4
+
+Multiple VLAN example
+---------------------
+
+.. code-block::
+
+ network:
+ version: 1
+ config:
+ - id: eth0
+ mac_address: 'd4:be:d9:a8:49:13'
+ mtu: 1500
+ name: eth0
+ subnets:
+ - address: 10.245.168.16/21
+ dns_nameservers:
+ - 10.245.168.2
+ gateway: 10.245.168.1
+ type: static
+ type: physical
+ - id: eth1
+ mac_address: 'd4:be:d9:a8:49:15'
+ mtu: 1500
+ name: eth1
+ subnets:
+ - address: 10.245.188.2/24
+ dns_nameservers: []
+ type: static
+ type: physical
+ - id: eth1.2667
+ mtu: 1500
+ name: eth1.2667
+ subnets:
+ - address: 10.245.184.2/24
+ dns_nameservers: []
+ type: static
+ type: vlan
+ vlan_id: 2667
+ vlan_link: eth1
+ - id: eth1.2668
+ mtu: 1500
+ name: eth1.2668
+ subnets:
+ - address: 10.245.185.1/24
+ dns_nameservers: []
+ type: static
+ type: vlan
+ vlan_id: 2668
+ vlan_link: eth1
+ - id: eth1.2669
+ mtu: 1500
+ name: eth1.2669
+ subnets:
+ - address: 10.245.186.1/24
+ dns_nameservers: []
+ type: static
+ type: vlan
+ vlan_id: 2669
+ vlan_link: eth1
+ - id: eth1.2670
+ mtu: 1500
+ name: eth1.2670
+ subnets:
+ - address: 10.245.187.2/24
+ dns_nameservers: []
+ type: static
+ type: vlan
+ vlan_id: 2670
+ vlan_link: eth1
+ - address: 10.245.168.2
+ search:
+ - dellstack
+ type: nameserver
+
+.. _SLAAC: https://tools.ietf.org/html/rfc4862
+
+.. _YAML: https://yaml.org/type/int.html
diff --git a/doc/rtd/topics/network-config-format-v2.rst b/doc/rtd/reference/network-config-format-v2.rst
index 53274417..7469524b 100644
--- a/doc/rtd/topics/network-config-format-v2.rst
+++ b/doc/rtd/reference/network-config-format-v2.rst
@@ -1,21 +1,22 @@
.. _network_config_v2:
-Networking Config Version 2
-===========================
+Networking config Version 2
+***************************
-Cloud-init's support for Version 2 network config is a subset of the
-version 2 format defined for the `netplan`_ tool. Cloud-init supports
-both reading and writing of Version 2; the latter support requires a
-distro with `netplan`_ present.
+``Cloud-init``'s support for Version 2 network config is a subset of the
+Version 2 format defined for the `Netplan`_ tool. ``Cloud-init`` supports
+both reading and writing of Version 2. Writing support requires a
+distro with Netplan present.
-.. _Netplan Passthrough:
+.. _Netplan_passthrough:
-Netplan Passthrough
--------------------
+Netplan passthrough
+===================
-On a system with netplan present, cloud-init will pass Version 2 configuration
-through to netplan without modification. On such systems, you do not need to
-limit yourself to the below subset of netplan's configuration format.
+On a system with Netplan present, ``cloud-init`` will pass Version 2
+configuration through to Netplan without modification. On such systems, you do
+not need to limit yourself to the below subset of Netplan's configuration
+format.
.. warning::
If you are writing or generating network configuration that may be used on
@@ -23,39 +24,37 @@ limit yourself to the below subset of netplan's configuration format.
this document, or you will see network configuration failures on
non-netplan systems.
-Version 2 Configuration Format
-------------------------------
+Version 2 configuration format
+==============================
-The ``network`` key has at least two required elements. First
-it must include ``version: 2`` and one or more of possible device
-``types``.
+The ``network`` key has at least two required elements. First, it must include
+``version: 2`` and one or more of possible device ``types``.
-Cloud-init will read this format from :ref:`base_config_reference`.
+``Cloud-init`` will read this format from :ref:`base_config_reference`.
For example the following could be present in
-``/etc/cloud/cloud.cfg.d/custom-networking.cfg``::
+:file:`/etc/cloud/cloud.cfg.d/custom-networking.cfg`: ::
network:
version: 2
ethernets: []
It may also be provided in other locations including the
-:ref:`datasource_nocloud`, see :ref:`default_behavior` for other places.
+:ref:`datasource_nocloud`. See :ref:`network_config` for other places.
Supported device ``types`` values are as follows:
-- Ethernets (``ethernets``)
-- Bonds (``bonds``)
-- Bridges (``bridges``)
-- VLANs (``vlans``)
-
-Each type block contains device definitions as a map where the keys (called
-"configuration IDs"). Each entry under the ``types`` may include IP and/or
-device configuration.
+- ``ethernets``: Ethernets
+- ``bonds``: Bonds
+- ``bridges``: Bridges
+- ``vlans``: VLANs
+Each ``type`` block contains device definitions as a map (where the keys are
+called "configuration IDs"). Each entry under the ``types`` may include IP
+and/or device configuration.
Device configuration IDs
-------------------------
+========================
The key names below the per-device-type definition maps (like ``ethernets:``)
are called "ID"s. They must be unique throughout the entire set of
@@ -66,132 +65,142 @@ currently being defined.
There are two physically/structurally different classes of device definitions,
and the ID field has a different interpretation for each:
-Physical devices (Examples: ethernet, wifi):
- These can dynamically come and go between
- reboots and even during runtime (hotplugging). In the generic case, they
- can be selected by ``match:`` rules on desired properties, such as
- name/name pattern, MAC address, driver, or device paths. In general these
- will match any number of devices (unless they refer to properties which are
- unique such as the full path or MAC address), so without further knowledge
- about the hardware these will always be considered as a group.
-
- It is valid to specify no match rules at all, in which case the ID field is
- simply the interface name to be matched. This is mostly useful if you want
- to keep simple cases simple, and it's how network device configuration has
- been done for a long time.
-
- If there are ``match``: rules, then the ID field is a purely opaque name
- which is only being used for references from definitions of compound
- devices in the config.
-
-Virtual devices (Examples: veth, bridge, bond):
- These are fully under the control of the
- config file(s) and the network stack. I. e. these devices are being created
- instead of matched. Thus ``match:`` and ``set-name:`` are not applicable for
- these, and the ID field is the name of the created virtual device.
+Physical devices (e.g., ethernet, wifi)
+---------------------------------------
+
+These can dynamically come and go between reboots and even during runtime
+(hotplugging). In the generic case, they can be selected by ``match:``
+rules on desired properties, such as name/name pattern, MAC address,
+driver, or device paths. In general these will match any number of
+devices (unless they refer to properties which are unique such as the full
+path or MAC address), so without further knowledge about the hardware,
+these will always be considered as a group.
+
+It is valid to specify no match rules at all, in which case the ID field is
+simply the interface name to be matched. This is mostly useful if you want
+to keep simple cases simple, and it's how network device configuration has
+been done for a long time.
+
+If there are ``match:`` rules, then the ID field is a purely opaque name
+which is only being used for references from definitions of compound
+devices in the config.
+
+Virtual devices (e.g., veth, bridge, bond)
+------------------------------------------
+
+These are fully under the control of the config file(s) and the network
+stack, i.e., these devices are being created instead of matched. Thus
+``match:`` and ``set-name:`` are not applicable for these, and the ID field
+is the name of the created virtual device.
Common properties for physical device types
--------------------------------------------
+===========================================
-**match**: *<(mapping)>*
+``match: <(mapping)>``
+----------------------
This selects a subset of available physical devices by various hardware
properties. The following configuration will then apply to all matching
devices, as soon as they appear. *All* specified properties must match.
The following properties for creating matches are supported:
-**name**: *<(scalar)>*
+``name: <(scalar)>``
+^^^^^^^^^^^^^^^^^^^^
Current interface name. Globs are supported, and the primary use case
for matching on names, as selecting one fixed name can be more easily
achieved with having no ``match:`` at all and just using the ID (see
-above). Note that currently only networkd supports globbing,
-NetworkManager does not.
+above). Note that currently only ``networkd`` supports globbing,
+``NetworkManager`` does not.
-**macaddress**: *<(scalar)>*
+Example: ::
-Device's MAC address in the form xx:xx:xx:xx:xx:xx. Globs are not allowed.
-Letters must be lowercase.
+ # all cards on second PCI bus
+ match:
+ name: enp2*
-.. note::
+``macaddress: <(scalar)>``
+^^^^^^^^^^^^^^^^^^^^^^^^^^
- MAC addresses must be strings. As MAC addresses which consist of only the
- digits 0-9 (i.e. no hex a-f) can be interpreted as a base 60 integer per
- the `YAML 1.1 spec`_ it is best practice to quote all MAC addresses to ensure
- they are parsed as strings regardless of value.
+Device's MAC address in the form xx:xx:xx:xx:xx:xx. Globs are not allowed.
+Letters must be lowercase.
-.. _YAML 1.1 spec: https://yaml.org/type/int.html
+Example: ::
-**driver**: *<(scalar)>*
+ # fixed MAC address
+ match:
+ macaddress: "11:22:33:aa:bb:ff"
-Kernel driver name, corresponding to the ``DRIVER`` udev property. Globs are
-supported. Matching on driver is *only* supported with networkd.
+.. note::
+ It is best practice to "quote" all MAC addresses, since an unquoted MAC
+ address might be incorrectly interpreted as an integer in `YAML`_.
-**Examples**::
+``driver: <(scalar)>``
+^^^^^^^^^^^^^^^^^^^^^^
- # all cards on second PCI bus
- match:
- name: enp2*
+Kernel driver name, corresponding to the ``DRIVER`` udev property. Globs are
+supported. Matching on driver is *only* supported with ``networkd``.
- # fixed MAC address
- match:
- macaddress: 11:22:33:aa:bb:ff
+Example: ::
# first card of driver ``ixgbe``
match:
driver: ixgbe
name: en*s0
-**set-name**: *<(scalar)>*
+``set-name: <(scalar)>``
+^^^^^^^^^^^^^^^^^^^^^^^^
When matching on unique properties such as path or MAC, or with additional
-assumptions such as "there will only ever be one wifi device",
-match rules can be written so that they only match one device. Then this
-property can be used to give that device a more specific/desirable/nicer
-name than the default from udev’s ifnames. Any additional device that
-satisfies the match rules will then fail to get renamed and keep the
-original kernel name (and dmesg will show an error).
+assumptions such as "there will only ever be one wifi device", match rules
+can be written so that they only match one device. Then this property can be
+used to give that device a more specific/desirable/nicer name than the default
+from udev’s ``ifnames``. Any additional device that satisfies the match rules
+will then fail to get renamed and keep the original kernel name (and dmesg
+will show an error).
-**wakeonlan**: *<(bool)>*
+``wakeonlan: <(bool)>``
+^^^^^^^^^^^^^^^^^^^^^^^
Enable wake on LAN. Off by default.
-
Common properties for all device types
---------------------------------------
+======================================
-**renderer**: *<(scalar)>*
+``renderer: <(scalar)>``
+------------------------
Use the given networking backend for this definition. Currently supported are
``networkd`` and ``NetworkManager``. This property can be specified globally
-in ``networks:``, for a device type (in e. g. ``ethernets:``) or
-for a particular device definition. Default is ``networkd``.
+in ``networks:``, for a device type (e.g., in ``ethernets:``) or for a
+particular device definition. Default is ``networkd``.
.. note::
+ ``Cloud-init`` only supports networkd backend if rendering ``version2``
+ config to the instance.
- Cloud-init only supports networkd backend if rendering version2 config
- to the instance.
-
-**dhcp4**: *<(bool)>*
+``dhcp4: <(bool)>``
+^^^^^^^^^^^^^^^^^^^
Enable DHCP for IPv4. Off by default.
-**dhcp6**: *<(bool)>*
+``dhcp6: <(bool)>``
+^^^^^^^^^^^^^^^^^^^
Enable DHCP for IPv6. Off by default.
-**dhcp4-overrides** and **dhcp6-overrides**: *<(mapping)>*
+``dhcp4-overrides and dhcp6-overrides: <(mapping)>``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-DHCP behavior overrides. Overrides will only have an effect if
-the corresponding DHCP type is enabled. Refer to `netplan#dhcp-overrides`_
+DHCP behaviour overrides. Overrides will only have an effect if
+the corresponding DHCP type is enabled. Refer to `Netplan#dhcp-overrides`_
for more documentation.
.. note::
+ These properties are only consumed on ``netplan`` and ``networkd``
+ renderers.
- These properties are only consumed on ``netplan`` and ``networkd``
- renderers.
-
-The ``netplan`` renderer :ref:`passes through <Netplan Passthrough>`
+The ``netplan`` renderer :ref:`passes through <Netplan_passthrough>`
everything and the ``networkd`` renderer consumes the following sub-properties:
* ``hostname`` *
@@ -205,7 +214,6 @@ everything and the ``networkd`` renderer consumes the following sub-properties:
* ``use-routes`` *
.. note::
-
Sub-properties marked with a ``*`` are unsupported for ``dhcp6-overrides``
when used with the ``networkd`` renderer.
@@ -222,31 +230,36 @@ Example: ::
use-ntp: false
use-routes: false
-**addresses**: *<(sequence of scalars)>*
+``addresses: <(sequence of scalars)>``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Add static addresses to the interface in addition to the ones received
-through DHCP or RA. Each sequence entry is in CIDR notation, i. e. of the
-form ``addr/prefixlen`` . ``addr`` is an IPv4 or IPv6 address as recognized
-by ``inet_pton``(3) and ``prefixlen`` the number of bits of the subnet.
+through DHCP or RA. Each sequence entry is in CIDR notation, i.e., of the
+form ``addr/prefixlen``. ``addr`` is an IPv4 or IPv6 address as recognised
+by ``inet_pton(3)`` and ``prefixlen`` the number of bits of the subnet.
Example: ``addresses: [192.168.14.2/24, 2001:1::1/64]``
-**gateway4**: or **gateway6**: *<(scalar)>*
+``gateway4: or gateway6: <(scalar)>``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Deprecated, see `Netplan#default-routes`_.
Set default gateway for IPv4/6, for manual address configuration. This
requires setting ``addresses`` too. Gateway IPs must be in a form
-recognized by ``inet_pton(3)``
+recognised by ``inet_pton(3)``
Example for IPv4: ``gateway4: 172.16.0.1``
Example for IPv6: ``gateway6: 2001:4::1``
-**mtu**: *<MTU SizeBytes>*
+``mtu: <MTU SizeBytes>``
+^^^^^^^^^^^^^^^^^^^^^^^^
The MTU key represents a device's Maximum Transmission Unit, the largest size
packet or frame, specified in octets (eight-bit bytes), that can be sent in a
-packet- or frame-based network. Specifying ``mtu`` is optional.
+packet- or frame-based network. Specifying ``mtu`` is optional.
-**nameservers**: *<(mapping)>*
+``nameservers: <(mapping)>``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Set DNS servers and search domains, for manual address configuration. There
are two supported fields: ``addresses:`` is a list of IPv4 or IPv6 addresses
@@ -258,10 +271,11 @@ Example: ::
search: [lab, home]
addresses: [8.8.8.8, FEDC::1]
-**routes**: *<(sequence of mapping)>*
+``routes: <(sequence of mapping)>``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-Add device specific routes. Each mapping includes a ``to``, ``via`` key
-with an IPv4 or IPv6 address as value. ``metric`` is an optional value.
+Add device specific routes. Each mapping includes a ``to``, ``via`` key
+with an IPv4 or IPv6 address as value. ``metric`` is an optional value.
Example: ::
@@ -271,14 +285,16 @@ Example: ::
metric: 3
Ethernets
-~~~~~~~~~
+---------
+
Ethernet device definitions do not support any specific properties beyond the
common ones described above.
Bonds
-~~~~~
+-----
-**interfaces** *<(sequence of scalars)>*
+``interfaces: <(sequence of scalars)>``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
All devices matching this ID list will be added to the bond.
@@ -292,60 +308,70 @@ Example: ::
bond0:
interfaces: [switchports]
-**parameters**: *<(mapping)>*
+``parameters: <(mapping)>``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
-Customization parameters for special bonding options. Time values are
+Customisation parameters for special bonding options. Time values are
specified in seconds unless otherwise specified.
-**mode**: *<(scalar)>*
+``mode: <(scalar)>``
+^^^^^^^^^^^^^^^^^^^^
Set the bonding mode used for the interfaces. The default is
``balance-rr`` (round robin). Possible values are ``balance-rr``,
``active-backup``, ``balance-xor``, ``broadcast``, ``802.3ad``,
``balance-tlb``, and ``balance-alb``.
-**lacp-rate**: *<(scalar)>*
+``lacp-rate: <(scalar)>``
+^^^^^^^^^^^^^^^^^^^^^^^^^
Set the rate at which LACPDUs are transmitted. This is only useful
in 802.3ad mode. Possible values are ``slow`` (30 seconds, default),
and ``fast`` (every second).
-**mii-monitor-interval**: *<(scalar)>*
+``mii-monitor-interval: <(scalar)>``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Specifies the interval for MII monitoring (verifying if an interface
of the bond has carrier). The default is ``0``; which disables MII
monitoring.
-**min-links**: *<(scalar)>*
+``min-links: <(scalar)>``
+^^^^^^^^^^^^^^^^^^^^^^^^^
The minimum number of links up in a bond to consider the bond
interface to be up.
-**transmit-hash-policy**: <*(scalar)>*
+``transmit-hash-policy: <(scalar)>``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Specifies the transmit hash policy for the selection of slaves. This
is only useful in balance-xor, 802.3ad and balance-tlb modes.
Possible values are ``layer2``, ``layer3+4``, ``layer2+3``,
``encap2+3``, and ``encap3+4``.
-**ad-select**: <*(scalar)>*
+``ad-select: <(scalar)>``
+^^^^^^^^^^^^^^^^^^^^^^^^^
Set the aggregation selection mode. Possible values are ``stable``,
``bandwidth``, and ``count``. This option is only used in 802.3ad mode.
-**all-slaves-active**: <*(bool)>*
+``all-slaves-active: <(bool)>``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
If the bond should drop duplicate frames received on inactive ports,
set this option to ``false``. If they should be delivered, set this
option to ``true``. The default value is false, and is the desirable
-behavior in most situations.
+behaviour in most situations.
-**arp-interval**: <*(scalar)>*
+``arp-interval: <(scalar)>``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Set the interval value for how frequently ARP link monitoring should
happen. The default value is ``0``, which disables ARP monitoring.
-**arp-ip-targets**: <*(sequence of scalars)>*
+``arp-ip-targets: <(sequence of scalars)>``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
IPs of other hosts on the link which should be sent ARP requests in
order to validate that a slave is up. This option is only used when
@@ -354,36 +380,42 @@ address must be given for ARP link monitoring to function. Only IPv4
addresses are supported. You can specify up to 16 IP addresses. The
default value is an empty list.
-**arp-validate**: <*(scalar)>*
+``arp-validate: <(scalar)>``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Configure how ARP replies are to be validated when using ARP link
monitoring. Possible values are ``none``, ``active``, ``backup``,
and ``all``.
-**arp-all-targets**: <*(scalar)>*
+``arp-all-targets: <(scalar)>``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Specify whether to use any ARP IP target being up as sufficient for
a slave to be considered up; or if all the targets must be up. This
is only used for ``active-backup`` mode when ``arp-validate`` is
enabled. Possible values are ``any`` and ``all``.
-**up-delay**: <*(scalar)>*
+``up-delay: <(scalar)>``
+^^^^^^^^^^^^^^^^^^^^^^^^
Specify the delay before enabling a link once the link is physically
up. The default value is ``0``.
-**down-delay**: <*(scalar)>*
+``down-delay: <(scalar)>``
+^^^^^^^^^^^^^^^^^^^^^^^^^^
Specify the delay before disabling a link once the link has been
lost. The default value is ``0``.
-**fail-over-mac-policy**: <*(scalar)>*
+``fail-over-mac-policy: <(scalar)>``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Set whether to set all slaves to the same MAC address when adding
them to the bond, or how else the system should handle MAC addresses.
The possible values are ``none``, ``active``, and ``follow``.
-**gratuitous-arp**: <*(scalar)>*
+``gratuitous-arp: <(scalar)>``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Specify how many ARP packets to send after failover. Once a link is
up on a new slave, a notification is sent and possibly repeated if
@@ -391,7 +423,8 @@ this value is set to a number greater than ``1``. The default value
is ``1`` and valid values are between ``1`` and ``255``. This only
affects ``active-backup`` mode.
-**packets-per-slave**: <*(scalar)>*
+``packets-per-slave: <(scalar)>``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
In ``balance-rr`` mode, specifies the number of packets to transmit
on a slave before switching to the next. When this value is set to
@@ -399,25 +432,28 @@ on a slave before switching to the next. When this value is set to
``0`` and ``65535``. The default value is ``1``. This setting is
only used in ``balance-rr`` mode.
-**primary-reselect-policy**: <*(scalar)>*
+``primary-reselect-policy: <(scalar)>``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Set the reselection policy for the primary slave. On failure of the
active slave, the system will use this policy to decide how the new
active slave will be chosen and how recovery will be handled. The
possible values are ``always``, ``better``, and ``failure``.
-**learn-packet-interval**: <*(scalar)>*
+``learn-packet-interval: <(scalar)>``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-Specify the interval between sending learning packets to each slave.
+Specify the interval between sending Learning packets to each slave.
The value range is between ``1`` and ``0x7fffffff``. The default
value is ``1``. This option only affects ``balance-tlb`` and
``balance-alb`` modes.
Bridges
-~~~~~~~
+-------
-**interfaces**: <*(sequence of scalars)>*
+``interfaces: <(sequence of scalars)>``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
All devices matching this ID list will be added to the bridge.
@@ -431,66 +467,73 @@ Example: ::
br0:
interfaces: [switchports]
-**parameters**: <*(mapping)>*
+``parameters: <(mapping)>``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
-Customization parameters for special bridging options. Time values are
-specified in seconds unless otherwise specified.
+Customisation parameters for special bridging options. Time values are
+specified in seconds unless otherwise stated.
-**ageing-time**: <*(scalar)>*
+``ageing-time: <(scalar)>``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
-Set the period of time to keep a MAC address in the forwarding
-database after a packet is received.
+Set the period of time to keep a MAC address in the forwarding database after
+a packet is received.
-**priority**: <*(scalar)>*
+``priority: <(scalar)>``
+^^^^^^^^^^^^^^^^^^^^^^^^
-Set the priority value for the bridge. This value should be an
-number between ``0`` and ``65535``. Lower values mean higher
-priority. The bridge with the higher priority will be elected as
-the root bridge.
+Set the priority value for the bridge. This value should be a number between
+``0`` and ``65535``. Lower values mean higher priority. The bridge with the
+higher priority will be elected as the root bridge.
-**forward-delay**: <*(scalar)>*
+``forward-delay: <(scalar)>``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Specify the period of time the bridge will remain in Listening and
Learning states before getting to the Forwarding state. This value
-should be set in seconds for the systemd backend, and in milliseconds
-for the NetworkManager backend.
+should be set in seconds for the ``systemd`` backend, and in milliseconds
+for the ``NetworkManager`` backend.
-**hello-time**: <*(scalar)>*
+``hello-time: <(scalar)>``
+^^^^^^^^^^^^^^^^^^^^^^^^^^
Specify the interval between two hello packets being sent out from
the root and designated bridges. Hello packets communicate
information about the network topology.
-**max-age**: <*(scalar)>*
+``max-age: <(scalar)>``
+^^^^^^^^^^^^^^^^^^^^^^^
Set the maximum age of a hello packet. If the last hello packet is
older than that value, the bridge will attempt to become the root
bridge.
-**path-cost**: <*(scalar)>*
+``path-cost: <(scalar)>``
+^^^^^^^^^^^^^^^^^^^^^^^^^
Set the cost of a path on the bridge. Faster interfaces should have
a lower cost. This allows a finer control on the network topology
so that the fastest paths are available whenever possible.
-**stp**: <*(bool)>*
+``stp: <(bool)>``
+^^^^^^^^^^^^^^^^^
Define whether the bridge should use Spanning Tree Protocol. The
default value is "true", which means that Spanning Tree should be
used.
-
VLANs
-~~~~~
+-----
-**id**: <*(scalar)>*
+``id: <(scalar)>``
+^^^^^^^^^^^^^^^^^^
VLAN ID, a number between 0 and 4094.
-**link**: <*(scalar)>*
+``link: <(scalar)>``
+^^^^^^^^^^^^^^^^^^^^
-ID of the underlying device definition on which this VLAN gets
-created.
+ID of the underlying device definition on which this VLAN gets created.
Example: ::
@@ -508,9 +551,10 @@ Example: ::
Examples
---------
-Configure an ethernet device with networkd, identified by its name, and enable
-DHCP: ::
+========
+
+Configure an ethernet device with ``networkd``, identified by its name, and
+enable DHCP: ::
network:
version: 2
@@ -571,6 +615,7 @@ This is a complex example which shows most available features: ::
link: id0
dhcp4: yes
-.. _netplan: https://netplan.io
-.. _netplan#dhcp-overrides: https://netplan.io/reference#dhcp-overrides
-.. vi: textwidth=79
+.. _Netplan: https://netplan.io
+.. _YAML: https://yaml.org/type/int.html
+.. _Netplan#default-routes: https://netplan.io/reference#default-routes
+.. _Netplan#dhcp-overrides: https://netplan.io/reference#dhcp-overrides
diff --git a/doc/rtd/reference/network-config.rst b/doc/rtd/reference/network-config.rst
new file mode 100644
index 00000000..ea331f1c
--- /dev/null
+++ b/doc/rtd/reference/network-config.rst
@@ -0,0 +1,329 @@
+.. _network_config:
+
+Network configuration
+*********************
+
+Default behaviour
+=================
+
+``Cloud-init`` searches for network configuration in order of increasing
+precedence; each item overriding the previous.
+
+- **Datasource**: For example, OpenStack may provide network config in the
+ MetaData Service.
+- **System config**: A ``network:`` entry in :file:`/etc/cloud/cloud.cfg.d/*`
+ configuration files.
+- **Kernel command line**: ``ip=`` or
+ ``network-config=<Base64 encoded YAML config string>``
+
+User data cannot change an instance's network configuration. In the absence
+of network configuration in any of the above sources, ``cloud-init`` will
+write out a network configuration that will issue a DHCP request on a "first"
+network interface.
+
+.. note::
+
+ The ``network-config`` value is expected to be a Base64 encoded YAML string
+ in :ref:`network_config_v1` or :ref:`network_config_v2` format. Optionally,
+ it can be compressed with ``gzip`` prior to Base64 encoding.
+
+Disabling network configuration
+===============================
+
+Users may disable ``cloud-init``'s network configuration capability and rely
+on other methods, such as embedded configuration or other customisations.
+
+``cloud-init`` supports the following methods for disabling ``cloud-init``.
+
+Kernel command line
+-------------------
+
+``Cloud-init`` will check for the parameter ``network-config=disabled``,
+which will automatically disable any network configuration.
+
+Example disabling kernel command line entry: ::
+
+ network-config=disabled
+
+Cloud config
+------------
+
+In the combined ``cloud-init`` configuration dictionary, merged from
+:file:`/etc/cloud/cloud.cfg` and :file:`/etc/cloud/cloud.cfg.d/*`: ::
+
+ network:
+ config: disabled
+
+If ``cloud-init``'s networking config has not been disabled, and no other
+network information is found, then it will proceed to generate a fallback
+networking configuration.
+
+Disabling network activation
+============================
+
+Some datasources may not be initialised until after the network has been
+brought up. In this case, ``cloud-init`` will attempt to bring up the
+interfaces specified by the datasource metadata using a network activator
+discovered by `cloudinit.net.activators.select_activators`_.
+
+This behaviour can be disabled in the ``cloud-init`` configuration dictionary,
+merged from :file:`/etc/cloud/cloud.cfg` and
+:file:`/etc/cloud/cloud.cfg.d/*`: ::
+
+ disable_network_activation: true
+
+Fallback network configuration
+==============================
+
+``Cloud-init`` will attempt to determine which, of any attached network
+devices, is most likely to have a connection and then generate a network
+configuration to issue a DHCP request on that interface.
+
+``Cloud-init`` runs during early boot and does not expect composed network
+devices (such as Bridges) to be available. ``Cloud-init`` does not consider
+the following interface devices as likely "first" network interfaces for
+fallback configuration; they are filtered out from being selected.
+
+- **loopback**: ``name=lo``
+- **Virtual Ethernet**: ``name=veth*``
+- **Software Bridges**: ``type=bridge``
+- **Software VLANs**: ``type=vlan``
+
+``Cloud-init`` will prefer network interfaces that indicate they are connected
+via the Linux ``carrier`` flag being set. If no interfaces are marked as
+connected, then all unfiltered interfaces are potential connections.
+
+Of the potential interfaces, ``cloud-init`` will attempt to pick the "right"
+interface given the information it has available.
+
+Finally, after selecting the "right" interface, a configuration is generated
+and applied to the system.
+
+.. note::
+ PhotonOS disables fallback networking configuration by default, leaving
+ network unrendered when no other network config is provided.
+ If fallback config is still desired on PhotonOS, it can be enabled by
+ providing ``disable_fallback_netcfg: false`` in
+ :file:`/etc/cloud/cloud.cfg:sys_config` settings.
+
+Network configuration sources
+=============================
+
+``Cloud-init`` accepts a number of different network configuration formats in
+support of different cloud substrates. The datasource for these clouds in
+``cloud-init`` will detect and consume datasource-specific network
+configuration formats for use when writing an instance's network
+configuration.
+
+The following datasources optionally provide network configuration:
+
+- :ref:`datasource_config_drive`
+
+ - `OpenStack Metadata Service Network`_
+ - :ref:`network_config_eni`
+
+- :ref:`datasource_digital_ocean`
+
+ - `DigitalOcean JSON metadata`_
+
+- :ref:`datasource_lxd`
+
+ - `LXD`_
+
+- :ref:`datasource_nocloud`
+
+ - :ref:`network_config_v1`
+ - :ref:`network_config_v2`
+ - :ref:`network_config_eni`
+
+- :ref:`datasource_opennebula`
+
+ - :ref:`network_config_eni`
+
+- :ref:`datasource_openstack`
+
+ - :ref:`network_config_eni`
+ - `OpenStack Metadata Service Network`_
+
+- :ref:`datasource_smartos`
+
+ - `SmartOS JSON Metadata`_
+
+- :ref:`datasource_upcloud`
+
+ - `UpCloud JSON metadata`_
+
+- :ref:`datasource_vultr`
+
+ - `Vultr JSON metadata`_
+
+For more information on network configuration formats:
+
+.. toctree::
+ :maxdepth: 1
+
+ network-config-format-eni.rst
+ network-config-format-v1.rst
+ network-config-format-v2.rst
+
+
+Network configuration outputs
+=============================
+
+``Cloud-init`` converts various forms of user-supplied or automatically
+generated configuration into an internal network configuration state. From
+this state, ``cloud-init`` delegates rendering of the configuration to
+distro-supported formats. The following ``renderers`` are supported in
+``cloud-init``:
+
+NetworkManager
+--------------
+
+`NetworkManager`_ is the standard Linux network configuration tool suite. It
+supports a wide range of networking setups. Configuration is typically stored
+in :file:`/etc/NetworkManager`.
+
+It is the default for a number of Linux distributions; notably Fedora,
+CentOS/RHEL, and their derivatives.
+
+ENI
+---
+
+:file:`/etc/network/interfaces` or ``ENI`` is supported by the ``ifupdown``
+package found in Alpine Linux, Debian and Ubuntu.
+
+Netplan
+-------
+
+Introduced in Ubuntu 16.10 (Yakkety Yak), `Netplan`_ has been the default
+network configuration tool in Ubuntu since 17.10 (Artful Aardvark). Netplan
+consumes :ref:`network_config_v2` input and renders network configuration for
+supported backends such as ``systemd-networkd`` and ``NetworkManager``.
+
+Sysconfig
+---------
+
+Sysconfig format is used by RHEL, CentOS, Fedora and other derivatives.
+
+NetBSD, OpenBSD, FreeBSD
+------------------------
+
+Network renders supporting BSD releases, which typically write configuration
+to :file:`/etc/rc.conf`. Unique to BSD renderers is that each renderer also
+calls something akin to `FreeBSD.start_services`_ which will invoke applicable
+network services to setup the network, making network activators unneeded
+for BSD flavors at the moment.
+
+Network output policy
+=====================
+
+The default policy for selecting a network ``renderer`` (in order of
+preference) is as follows:
+
+- ENI
+- Sysconfig
+- Netplan
+- NetworkManager
+- FreeBSD
+- NetBSD
+- OpenBSD
+- Networkd
+
+The default policy for selecting a network ``activator`` (in order of
+preference) is as follows:
+
+- **ENI**: using ``ifup``, ``ifdown`` to manage device setup/teardown
+- **Netplan**: using ``netplan apply`` to manage device setup/teardown
+- **NetworkManager**: using ``nmcli`` to manage device setup/teardown
+- **Networkd**: using ``ip`` to manage device setup/teardown
+
+When applying the policy, ``cloud-init`` checks if the current instance has the
+correct binaries and paths to support the renderer. The first renderer that
+can be used is selected. Users may override the network renderer policy by
+supplying an updated configuration in cloud-config. ::
+
+ system_info:
+ network:
+ renderers: ['netplan', 'network-manager', 'eni', 'sysconfig', 'freebsd', 'netbsd', 'openbsd']
+ activators: ['eni', 'netplan', 'network-manager', 'networkd']
+
+Network configuration tools
+===========================
+
+``Cloud-init`` contains a command used to test input/output conversion between
+formats. The :file:`tools/net-convert.py` in the ``cloud-init`` source
+repository is helpful in examining expected output for a given input
+format. If running these commands from the cloud-init source directory,
+make sure to set the correct path ``PYTHON_PATH=.``
+
+CLI Interface:
+
+.. code-block:: shell-session
+
+ $ cloud-init devel net-convert --help
+
+Example output:
+
+.. code-block::
+
+ usage: /usr/bin/cloud-init devel net-convert [-h] -p PATH -k {eni,network_data.json,yaml,azure-imds,vmware-imc} -d PATH -D
+ {alpine,arch,debian,ubuntu,freebsd,dragonfly,gentoo,cos,netbsd,openbsd,almalinux,amazon,centos,cloudlinux,eurolinux,fedora,mariner,miraclelinux,openmandriva,photon,rhel,rocky,virtuozzo,opensuse,sles,openEuler}
+ [-m name,mac] [--debug] -O {eni,netplan,networkd,sysconfig,network-manager}
+
+ options:
+ -h, --help show this help message and exit
+ -p PATH, --network-data PATH
+ The network configuration to read
+ -k {eni,network_data.json,yaml,azure-imds,vmware-imc}, --kind {eni,network_data.json,yaml,azure-imds,vmware-imc}
+ The format of the given network config
+ -d PATH, --directory PATH
+ directory to place output in
+ -D {alpine,arch,debian,ubuntu,freebsd,dragonfly,gentoo,cos,netbsd,openbsd,almalinux,amazon,centos,cloudlinux,eurolinux,fedora,mariner,miraclelinux,openmandriva,photon,rhel,rocky,virtuozzo,opensuse,sles,openEuler}, --distro {alpine,arch,debian,ubuntu,freebsd,dragonfly,gentoo,cos,netbsd,openbsd,almalinux,amazon,centos,cloudlinux,eurolinux,fedora,mariner,miraclelinux,openmandriva,photon,rhel,rocky,virtuozzo,opensuse,sles,openEuler}
+ -m name,mac, --mac name,mac
+ interface name to mac mapping
+ --debug enable debug logging to stderr.
+ -O {eni,netplan,networkd,sysconfig,network-manager}, --output-kind {eni,netplan,networkd,sysconfig,network-manager}
+ The network config format to emit
+
+Example of converting V2 to sysconfig:
+
+.. code-block:: shell-session
+
+ $ cloud-init devel net-convert --network-data v2.yaml --kind yaml \
+ --output-kind sysconfig -d target
+ $ cat target/etc/sysconfig/network-scripts/ifcfg-eth*
+
+Example output:
+
+.. code-block::
+
+ # Created by cloud-init on instance boot automatically, do not edit.
+ #
+ BOOTPROTO=static
+ DEVICE=eth7
+ IPADDR=192.168.1.5/255.255.255.0
+ NM_CONTROLLED=no
+ ONBOOT=yes
+ TYPE=Ethernet
+ USERCTL=no
+ # Created by cloud-init on instance boot automatically, do not edit.
+ #
+ BOOTPROTO=dhcp
+ DEVICE=eth9
+ NM_CONTROLLED=no
+ ONBOOT=yes
+ TYPE=Ethernet
+ USERCTL=no
+
+
+.. _Cloud-init: https://launchpad.net/cloud-init
+.. _LXD: https://linuxcontainers.org/lxd/docs/master/cloud-init/#custom-network-configuration
+.. _NetworkManager: https://networkmanager.dev
+.. _Netplan: https://netplan.io/
+.. _DigitalOcean JSON metadata: https://developers.digitalocean.com/documentation/metadata/
+.. _OpenStack Metadata Service Network: https://specs.openstack.org/openstack/nova-specs/specs/liberty/implemented/metadata-service-network-info.html
+.. _SmartOS JSON Metadata: https://eng.joyent.com/mdata/datadict.html
+.. _UpCloud JSON metadata: https://developers.upcloud.com/1.3/8-servers/#metadata-service
+.. _Vultr JSON metadata: https://www.vultr.com/metadata/
+.. _cloudinit.net.activators.select_activators: https://github.com/canonical/cloud-init/blob/main/cloudinit/net/activators.py#L279
+.. _FreeBSD.start_services: https://github.com/canonical/cloud-init/blob/main/cloudinit/net/freebsd.py#L28
diff --git a/doc/rtd/topics/analyze.rst b/doc/rtd/topics/analyze.rst
deleted file mode 100644
index 61213e28..00000000
--- a/doc/rtd/topics/analyze.rst
+++ /dev/null
@@ -1,316 +0,0 @@
-.. _analyze:
-
-Analyze
-*******
-
-The analyze subcommand was added to cloud-init in order to help analyze
-cloud-init boot time performance. It is loosely based on systemd-analyze where
-there are four subcommands:
-
-- blame
-- show
-- dump
-- boot
-
-Usage
-=====
-
-The analyze command requires one of the four subcommands:
-
-.. code-block:: shell-session
-
- $ cloud-init analyze blame
- $ cloud-init analyze show
- $ cloud-init analyze dump
- $ cloud-init analyze boot
-
-Availability
-============
-
-The analyze subcommand is generally available across all distributions with the
-exception of Gentoo and FreeBSD.
-
-Subcommands
-===========
-
-Blame
------
-
-The ``blame`` action matches ``systemd-analyze blame`` where it prints, in
-descending order, the units that took the longest to run. This output is
-highly useful for examining where cloud-init is spending its time during
-execution.
-
-.. code-block:: shell-session
-
- $ cloud-init analyze blame
- -- Boot Record 01 --
- 00.80300s (init-network/config-growpart)
- 00.64300s (init-network/config-resizefs)
- 00.62100s (init-network/config-ssh)
- 00.57300s (modules-config/config-grub-dpkg)
- 00.40300s (init-local/search-NoCloud)
- 00.38200s (init-network/config-users-groups)
- 00.19800s (modules-config/config-apt-configure)
- 00.03700s (modules-final/config-keys-to-console)
- 00.02100s (init-network/config-update_etc_hosts)
- 00.02100s (init-network/check-cache)
- 00.00800s (modules-final/config-ssh-authkey-fingerprints)
- 00.00800s (init-network/consume-vendor-data)
- 00.00600s (modules-config/config-timezone)
- 00.00500s (modules-final/config-final-message)
- 00.00400s (init-network/consume-user-data)
- 00.00400s (init-network/config-mounts)
- 00.00400s (init-network/config-disk_setup)
- 00.00400s (init-network/config-bootcmd)
- 00.00400s (init-network/activate-datasource)
- 00.00300s (init-network/config-update_hostname)
- 00.00300s (init-network/config-set_hostname)
- 00.00200s (modules-final/config-snappy)
- 00.00200s (init-network/config-rsyslog)
- 00.00200s (init-network/config-ca-certs)
- 00.00200s (init-local/check-cache)
- 00.00100s (modules-final/config-scripts-vendor)
- 00.00100s (modules-final/config-scripts-per-once)
- 00.00100s (modules-final/config-salt-minion)
- 00.00100s (modules-final/config-rightscale_userdata)
- 00.00100s (modules-final/config-phone-home)
- 00.00100s (modules-final/config-package-update-upgrade-install)
- 00.00100s (modules-final/config-fan)
- 00.00100s (modules-config/config-ubuntu-advantage)
- 00.00100s (modules-config/config-ssh-import-id)
- 00.00100s (modules-config/config-snap)
- 00.00100s (modules-config/config-set-passwords)
- 00.00100s (modules-config/config-runcmd)
- 00.00100s (modules-config/config-locale)
- 00.00100s (modules-config/config-byobu)
- 00.00100s (modules-config/config-apt-pipelining)
- 00.00100s (init-network/config-write-files)
- 00.00100s (init-network/config-seed_random)
- 00.00100s (init-network/config-migrator)
- 00.00000s (modules-final/config-ubuntu-drivers)
- 00.00000s (modules-final/config-scripts-user)
- 00.00000s (modules-final/config-scripts-per-instance)
- 00.00000s (modules-final/config-scripts-per-boot)
- 00.00000s (modules-final/config-puppet)
- 00.00000s (modules-final/config-power-state-change)
- 00.00000s (modules-final/config-mcollective)
- 00.00000s (modules-final/config-lxd)
- 00.00000s (modules-final/config-landscape)
- 00.00000s (modules-final/config-chef)
- 00.00000s (modules-config/config-snap_config)
- 00.00000s (modules-config/config-ntp)
- 00.00000s (modules-config/config-disable-ec2-metadata)
- 00.00000s (init-network/setup-datasource)
-
- 1 boot records analyzed
-
-Show
-----
-
-The ``show`` action is similar to ``systemd-analyze critical-chain`` which
-prints a list of units, the time they started and how long they took.
-Cloud-init has four stages and within each stage a number of modules may run
-depending on configuration. ``cloudinit-analyze show`` will, for each boot,
-print this information and a summary total time, per boot.
-
-The following is an abbreviated example of the show output:
-
-.. code-block:: shell-session
-
- $ cloud-init analyze show
- -- Boot Record 01 --
- The total time elapsed since completing an event is printed after the "@" character.
- The time the event takes is printed after the "+" character.
-
- Starting stage: init-local
- |``->no cache found @00.01700s +00.00200s
- |`->found local data from DataSourceNoCloud @00.11000s +00.40300s
- Finished stage: (init-local) 00.94200 seconds
-
- Starting stage: init-network
- |`->restored from cache with run check: DataSourceNoCloud [seed=/dev/sr0][dsmode=net] @04.79500s +00.02100s
- |`->setting up datasource @04.88900s +00.00000s
- |`->reading and applying user-data @04.90100s +00.00400s
- |`->reading and applying vendor-data @04.90500s +00.00800s
- |`->activating datasource @04.95200s +00.00400s
- Finished stage: (init-network) 02.72100 seconds
-
- Starting stage: modules-config
- |`->config-snap ran successfully @15.43100s +00.00100s
- ...
- |`->config-runcmd ran successfully @16.22300s +00.00100s
- |`->config-byobu ran successfully @16.23400s +00.00100s
- Finished stage: (modules-config) 00.83500 seconds
-
- Starting stage: modules-final
- |`->config-snappy ran successfully @16.87400s +00.00200s
- |`->config-package-update-upgrade-install ran successfully @16.87600s +00.00100s
- ...
- |`->config-final-message ran successfully @16.93700s +00.00500s
- |`->config-power-state-change ran successfully @16.94300s +00.00000s
- Finished stage: (modules-final) 00.10300 seconds
-
- Total Time: 4.60100 seconds
-
- 1 boot records analyzed
-
-If additional boot records are detected then they are printed out from oldest
-to newest.
-
-Dump
-----
-
-The ``dump`` action simply dumps the cloud-init logs that the analyze module
-is performing the analysis on and returns a list of dictionaries that can be
-consumed for other reporting needs. Each element in the list is a boot entry.
-
-.. code-block:: shell-session
-
- $ cloud-init analyze dump
- [
- {
- "description": "starting search for local datasources",
- "event_type": "start",
- "name": "init-local",
- "origin": "cloudinit",
- "timestamp": 1567057578.037
- },
- {
- "description": "attempting to read from cache [check]",
- "event_type": "start",
- "name": "init-local/check-cache",
- "origin": "cloudinit",
- "timestamp": 1567057578.054
- },
- {
- "description": "no cache found",
- "event_type": "finish",
- "name": "init-local/check-cache",
- "origin": "cloudinit",
- "result": "SUCCESS",
- "timestamp": 1567057578.056
- },
- {
- "description": "searching for local data from DataSourceNoCloud",
- "event_type": "start",
- "name": "init-local/search-NoCloud",
- "origin": "cloudinit",
- "timestamp": 1567057578.147
- },
- {
- "description": "found local data from DataSourceNoCloud",
- "event_type": "finish",
- "name": "init-local/search-NoCloud",
- "origin": "cloudinit",
- "result": "SUCCESS",
- "timestamp": 1567057578.55
- },
- {
- "description": "searching for local datasources",
- "event_type": "finish",
- "name": "init-local",
- "origin": "cloudinit",
- "result": "SUCCESS",
- "timestamp": 1567057578.979
- },
- {
- "description": "searching for network datasources",
- "event_type": "start",
- "name": "init-network",
- "origin": "cloudinit",
- "timestamp": 1567057582.814
- },
- {
- "description": "attempting to read from cache [trust]",
- "event_type": "start",
- "name": "init-network/check-cache",
- "origin": "cloudinit",
- "timestamp": 1567057582.832
- },
- ...
- {
- "description": "config-power-state-change ran successfully",
- "event_type": "finish",
- "name": "modules-final/config-power-state-change",
- "origin": "cloudinit",
- "result": "SUCCESS",
- "timestamp": 1567057594.98
- },
- {
- "description": "running modules for final",
- "event_type": "finish",
- "name": "modules-final",
- "origin": "cloudinit",
- "result": "SUCCESS",
- "timestamp": 1567057594.982
- }
- ]
-
-
-Boot
-----
-
-The ``boot`` action prints out kernel related timestamps that are not included
-in any of the cloud-init logs. There are three different timestamps that are
-presented to the user:
-
-- kernel start
-- kernel finish boot
-- cloud-init start
-
-This was added for additional clarity into the boot process that cloud-init
-does not have control over, to aid in debugging of performance issues related
-to cloud-init startup, and tracking regression.
-
-.. code-block:: shell-session
-
- $ cloud-init analyze boot
- -- Most Recent Boot Record --
- Kernel Started at: 2019-08-29 01:35:37.753790
- Kernel ended boot at: 2019-08-29 01:35:38.807407
- Kernel time to boot (seconds): 1.053617000579834
- Cloud-init activated by systemd at: 2019-08-29 01:35:43.992460
- Time between Kernel end boot and Cloud-init activation (seconds): 5.185053110122681
- Cloud-init start: 2019-08-29 08:35:45.867000
- successful
-
-Timestamp Gathering
-^^^^^^^^^^^^^^^^^^^
-
-The following boot related timestamps are gathered on demand when cloud-init
-analyze boot runs:
-
-- Kernel startup gathered from system uptime
-- Kernel finishes initialization from systemd
- UserSpaceMonotonicTimestamp property
-- Cloud-init activation from the property InactiveExitTimestamp of the
- cloud-init local systemd unit
-
-In order to gather the necessary timestamps using systemd, running the
-commands below will gather the UserspaceTimestamp and InactiveExitTimestamp:
-
-.. code-block:: shell-session
-
- $ systemctl show -p UserspaceTimestampMonotonic
- UserspaceTimestampMonotonic=989279
- $ systemctl show cloud-init-local -p InactiveExitTimestampMonotonic
- InactiveExitTimestampMonotonic=4493126
-
-The UserspaceTimestamp tracks when the init system starts, which is used as
-an indicator of kernel finishing initialization. The InactiveExitTimestamp
-tracks when a particular systemd unit transitions from the Inactive to Active
-state, which can be used to mark the beginning of systemd's activation of
-cloud-init.
-
-Currently this only works for distros that use systemd as the init process.
-We will be expanding support for other distros in the future and this document
-will be updated accordingly.
-
-If systemd is not present on the system, dmesg is used to attempt to find an
-event that logs the beginning of the init system. However, with this method
-only the first two timestamps are able to be found; dmesg does not monitor
-userspace processes, so no cloud-init start timestamps are emitted like when
-using systemd.
-
-.. vi: textwidth=79
diff --git a/doc/rtd/topics/boot.rst b/doc/rtd/topics/boot.rst
deleted file mode 100644
index ba9bd40b..00000000
--- a/doc/rtd/topics/boot.rst
+++ /dev/null
@@ -1,246 +0,0 @@
-.. _boot_stages:
-
-Boot Stages
-***********
-
-In order to be able to provide the functionality that it does, cloud-init
-must be integrated into the boot in fairly controlled way. There are five
-stages to boot:
-
-1. Generator
-2. Local
-3. Network
-4. Config
-5. Final
-
-Generator
-=========
-
-When booting under systemd, a
-`generator <https://www.freedesktop.org/software/systemd/man/systemd.generator.html>`_
-will run that determines if cloud-init.target should be included in the boot
-goals. By default, this generator will enable cloud-init. It will not enable
-cloud-init if either:
-
- * The file ``/etc/cloud/cloud-init.disabled`` exists
- * The kernel command line as found in ``/proc/cmdline`` contains
- ``cloud-init=disabled``. When running in a container, the kernel command
- line is not honored, but cloud-init will read an environment variable named
- ``KERNEL_CMDLINE`` in its place.
-
-Again, these mechanisms for disabling cloud-init at runtime currently only
-exist in systemd.
-
-Local
-=====
-
-+------------------+----------------------------------------------------------+
-| systemd service | ``cloud-init-local.service`` |
-+---------+--------+----------------------------------------------------------+
-| runs | as soon as possible with ``/`` mounted read-write |
-+---------+--------+----------------------------------------------------------+
-| blocks | as much of boot as possible, *must* block network |
-+---------+--------+----------------------------------------------------------+
-| modules | none |
-+---------+--------+----------------------------------------------------------+
-
-The purpose of the local stage is to:
-
- * locate "local" data sources.
- * apply networking configuration to the system (including "Fallback")
-
-In most cases, this stage does not do much more than that. It finds the
-datasource and determines the network configuration to be used. That
-network configuration can come from:
-
- * **datasource**: cloud provided network configuration via metadata
- * **fallback**: cloud-init's fallback networking consists of rendering the
- equivalent to "dhcp on eth0", which was historically the most popular
- mechanism for network configuration of a guest
- * **none**: network configuration can be disabled by writing the file
- ``/etc/cloud/cloud.cfg`` with the content:
- ``network: {config: disabled}``
-
-If this is an instance's first boot, then the selected network configuration
-is rendered. This includes clearing of all previous (stale) configuration
-including persistent device naming with old mac addresses.
-
-This stage must block network bring-up or any stale configuration that might
-have already been applied. Otherwise, that could have negative effects such
-as DHCP hooks or broadcast of an old hostname. It would also put the system
-in an odd state to recover from, as it may then have to restart network
-devices.
-
-Cloud-init then exits and expects for the continued boot of the operating
-system to bring network configuration up as configured.
-
-**Note**: In the past, local data sources have been only those that were
-available without network (such as 'ConfigDrive'). However, as seen in
-the recent additions to the DigitalOcean datasource, even data sources
-that require a network can operate at this stage.
-
-Network
-=======
-
-+------------------+----------------------------------------------------------+
-| systemd service | ``cloud-init.service`` |
-+---------+--------+----------------------------------------------------------+
-| runs | after local stage and configured networking is up |
-+---------+--------+----------------------------------------------------------+
-| blocks | as much of remaining boot as possible |
-+---------+--------+----------------------------------------------------------+
-| modules | *cloud_init_modules* in ``/etc/cloud/cloud.cfg`` |
-+---------+--------+----------------------------------------------------------+
-
-This stage requires all configured networking to be online, as it will fully
-process any user-data that is found. Here processing means:
-
- * retrieve any ``#include`` or ``#include-once`` (recursively) including http
- * decompress any compressed content
- * run any part-handler found.
-
-This stage runs the ``disk_setup`` and ``mounts`` modules which may partition
-and format disks and configure mount points (such as in ``/etc/fstab``).
-Those modules cannot run earlier as they may receive configuration input
-from sources only available via network. For example, a user may have
-provided user-data in a network resource that describes how local mounts
-should be done.
-
-On some clouds, such as Azure, this stage will create filesystems to be
-mounted, including ones that have stale (previous instance) references in
-``/etc/fstab``. As such, entries ``/etc/fstab`` other than those necessary for
-cloud-init to run should not be done until after this stage.
-
-A part-handler will run at this stage, as will boot-hooks including
-cloud-config ``bootcmd``. The user of this functionality has to be aware
-that the system is in the process of booting when their code runs.
-
-Config
-======
-
-+------------------+----------------------------------------------------------+
-| systemd service | ``cloud-config.service`` |
-+---------+--------+----------------------------------------------------------+
-| runs | after network |
-+---------+--------+----------------------------------------------------------+
-| blocks | nothing |
-+---------+--------+----------------------------------------------------------+
-| modules | *cloud_config_modules* in ``/etc/cloud/cloud.cfg`` |
-+---------+--------+----------------------------------------------------------+
-
-This stage runs config modules only. Modules that do not really have an
-effect on other stages of boot are run here, including ``runcmd``.
-
-Final
-=====
-
-+------------------+----------------------------------------------------------+
-| systemd service | ``cloud-final.service`` |
-+---------+--------+----------------------------------------------------------+
-| runs | as final part of boot (traditional "rc.local") |
-+---------+--------+----------------------------------------------------------+
-| blocks | nothing |
-+---------+--------+----------------------------------------------------------+
-| modules | *cloud_final_modules* in ``/etc/cloud/cloud.cfg`` |
-+---------+--------+----------------------------------------------------------+
-
-This stage runs as late in boot as possible. Any scripts that a user is
-accustomed to running after logging into a system should run correctly here.
-Things that run here include:
-
- * package installations
- * configuration management plugins (ansible, puppet, chef, salt-minion)
- * user-defined scripts (i.e. shell scripts passed as user-data)
-
-For scripts external to cloud-init looking to wait until cloud-init is
-finished, the ``cloud-init status --wait`` subcommand can help block external
-scripts until cloud-init is done without having to write your own systemd
-units dependency chains. See :ref:`cli_status` for more info.
-
-First Boot Determination
-========================
-
-cloud-init has to determine whether or not the current boot is the first boot
-of a new instance or not, so that it applies the appropriate configuration. On
-an instance's first boot, it should run all "per-instance" configuration,
-whereas on a subsequent boot it should run only "per-boot" configuration. This
-section describes how cloud-init performs this determination, as well as why it
-is necessary.
-
-When it runs, cloud-init stores a cache of its internal state for use across
-stages and boots.
-
-If this cache is present, then cloud-init has run on this system before.
-[#not-present]_ There are two cases where this could occur. Most commonly,
-the instance has been rebooted, and this is a second/subsequent boot.
-Alternatively, the filesystem has been attached to a *new* instance, and this
-is an instance's first boot. The most obvious case where this happens is when
-an instance is launched from an image captured from a launched instance.
-
-By default, cloud-init attempts to determine which case it is running in by
-checking the instance ID in the cache against the instance ID it determines at
-runtime. If they do not match, then this is an instance's first boot;
-otherwise, it's a subsequent boot. Internally, cloud-init refers to this
-behavior as ``check``.
-
-This behavior is required for images captured from launched instances to
-behave correctly, and so is the default which generic cloud images ship with.
-However, there are cases where it can cause problems. [#problems]_ For these
-cases, cloud-init has support for modifying its behavior to trust the instance
-ID that is present in the system unconditionally. This means that cloud-init
-will never detect a new instance when the cache is present, and it follows that
-the only way to cause cloud-init to detect a new instance (and therefore its
-first boot) is to manually remove cloud-init's cache. Internally, this
-behavior is referred to as ``trust``.
-
-To configure which of these behaviors to use, cloud-init exposes the
-``manual_cache_clean`` configuration option. When ``false`` (the default),
-cloud-init will ``check`` and clean the cache if the instance IDs do not match
-(this is the default, as discussed above). When ``true``, cloud-init will
-``trust`` the existing cache (and therefore not clean it).
-
-Manual Cache Cleaning
-=====================
-
-cloud-init ships a command for manually cleaning the cache: ``cloud-init
-clean``. See :ref:`cli_clean`'s documentation for further details.
-
-Reverting ``manual_cache_clean`` Setting
-========================================
-
-Currently there is no support for switching an instance that is launched with
-``manual_cache_clean: true`` from ``trust`` behavior to ``check`` behavior,
-other than manually cleaning the cache.
-
-.. warning:: If you want to capture an instance that is currently in ``trust``
- mode as an image for launching other instances, you **must** manually clean
- the cache. If you do not do so, then instances launched from the captured
- image will all detect their first boot as a subsequent boot of the captured
- instance, and will not apply any per-instance configuration.
-
- This is a functional issue, but also a potential security one: cloud-init is
- responsible for rotating SSH host keys on first boot, and this will not
- happen on these instances.
-
-.. [#not-present] It follows that if this cache is not present, cloud-init has
- not run on this system before, so this is unambiguously this instance's
- first boot.
-
-.. [#problems] A couple of ways in which this strict reliance on the presence
- of a datasource has been observed to cause problems:
-
- * If a cloud's metadata service is flaky and cloud-init cannot obtain the
- instance ID locally on that platform, cloud-init's instance ID
- determination will sometimes fail to determine the current instance ID,
- which makes it impossible to determine if this is an instance's first or
- subsequent boot (`#1885527`_).
- * If cloud-init is used to provision a physical appliance or device and an
- attacker can present a datasource to the device with a different instance
- ID, then cloud-init's default behavior will detect this as an instance's
- first boot and reset the device using the attacker's configuration
- (this has been observed with the NoCloud datasource in `#1879530`_).
-
-.. _#1885527: https://bugs.launchpad.net/ubuntu/+source/cloud-init/+bug/1885527
-.. _#1879530: https://bugs.launchpad.net/ubuntu/+source/cloud-init/+bug/1879530
-
-.. vi: textwidth=79
diff --git a/doc/rtd/topics/bugs.rst b/doc/rtd/topics/bugs.rst
deleted file mode 100644
index c66048e2..00000000
--- a/doc/rtd/topics/bugs.rst
+++ /dev/null
@@ -1,108 +0,0 @@
-.. _reporting_bugs:
-
-Reporting Bugs
-**************
-
-The following documents:
-
-1) How to collect information for reporting bugs
-2) How to file bugs to the upstream cloud-init project or for distro specific
- packages
-
-Collect Logs
-============
-
-To aid in debugging, please collect the necessary logs. To do so, run the
-`collect-logs` subcommand to produce a tarfile that you can easily upload:
-
-.. code-block:: shell-session
-
- $ sudo cloud-init collect-logs
- Wrote /home/ubuntu/cloud-init.tar.gz
-
-If your version of cloud-init does not have the `collect-logs` subcommand,
-then please manually collect the base log files by doing the following:
-
-.. code-block:: shell-session
-
- $ sudo dmesg > dmesg.txt
- $ sudo journalctl -o short-precise > journal.txt
- $ sudo tar -cvf cloud-init.tar dmesg.txt journal.txt /run/cloud-init \
- /var/log/cloud-init.log /var/log/cloud-init-output.log
-
-Report Upstream Bug
-===================
-
-Bugs for upstream cloud-init are tracked using Launchpad. To file a bug:
-
-1. Collect the necessary debug logs as described above
-2. `Create a Launchpad account`_ or login to your existing account
-3. `Report an upstream cloud-init bug`_
-
-If debug logs are not provided, you will be asked for them before any
-further time is spent debugging. If you are unable to obtain the required
-logs please explain why in the bug.
-
-If your bug is for a specific distro using cloud-init, please first consider
-reporting it with the upstream distro or confirm that it still occurs
-with the latest upstream cloud-init code. See below for details on specific
-distro reporting.
-
-Distro Specific Issues
-======================
-
-For issues specific to your distro please use one of the following distro
-specific reporting mechanisms:
-
-Ubuntu
-------
-
-To report a bug on Ubuntu use the `ubuntu-bug` command on the affected
-system to automatically collect the necessary logs and file a bug on
-Launchpad:
-
-.. code-block:: shell-session
-
- $ ubuntu-bug cloud-init
-
-If that does not work or is not an option, please collect the logs using the
-commands in the above Collect Logs section and then report the bug on the
-`Ubuntu bug tracker`_. Make sure to attach your collected logs!
-
-Debian
-------
-
-To file a bug against the Debian package fo cloud-init please use the
-`Debian bug tracker`_ to file against 'Package: cloud-init'. See the
-`Debian bug reporting wiki`_ wiki page for more details.
-
-Red Hat, CentOS, & Fedora
--------------------------
-
-To file a bug against the Red Hat or Fedora packages of cloud-init please use
-the `Red Hat bugzilla`_.
-
-SUSE & openSUSE
----------------
-
-To file a bug against the SuSE packages of cloud-init please use the
-`SUSE bugzilla`_.
-
-Arch Linux
-----------
-
-To file a bug against the Arch package of cloud-init please use the
-`Arch Linux Bugtracker`_. See the `Arch Linux bug reporting wiki`_ for more
-details.
-
-.. _Create a Launchpad account: https://help.launchpad.net/YourAccount/NewAccount
-.. _Report an upstream cloud-init bug: https://bugs.launchpad.net/cloud-init/+filebug
-.. _Ubuntu bug tracker: https://bugs.launchpad.net/ubuntu/+source/cloud-init/+filebug
-.. _Debian bug tracker: https://bugs.debian.org/cgi-bin/pkgreport.cgi?pkg=cloud-init;dist=unstable
-.. _Debian bug reporting wiki: https://www.debian.org/Bugs/Reporting
-.. _Red Hat bugzilla: https://bugzilla.redhat.com/
-.. _SUSE bugzilla: https://bugzilla.suse.com/index.cgi
-.. _Arch Linux Bugtracker: https://bugs.archlinux.org/
-.. _Arch Linux bug reporting wiki: https://wiki.archlinux.org/index.php/Bug_reporting_guidelines
-
-.. vi: textwidth=79
diff --git a/doc/rtd/topics/cli.rst b/doc/rtd/topics/cli.rst
deleted file mode 100644
index bd7cac62..00000000
--- a/doc/rtd/topics/cli.rst
+++ /dev/null
@@ -1,344 +0,0 @@
-.. _cli:
-
-CLI Interface
-*************
-
-For the latest list of subcommands and arguments use cloud-init's ``--help``
-option. This can be used against cloud-init itself or any of its subcommands.
-
-.. code-block:: shell-session
-
- $ cloud-init --help
- usage: cloud-init [-h] [--version] [--file FILES] [--debug] [--force]
- {init,modules,single,query,dhclient-hook,features,analyze,devel,collect-logs,clean,status,schema} ...
-
- options:
- -h, --help show this help message and exit
- --version, -v Show program's version number and exit.
- --file FILES, -f FILES
- Use additional yaml configuration files.
- --debug, -d Show additional pre-action logging (default: False).
- --force Force running even if no datasource is found (use at your own risk).
-
- Subcommands:
- {init,modules,single,query,dhclient-hook,features,analyze,devel,collect-logs,clean,status,schema}
- init Initialize cloud-init and perform initial modules.
- modules Activate modules using a given configuration key.
- single Run a single module.
- query Query standardized instance metadata from the command line.
- dhclient-hook Run the dhclient hook to record network info.
- features List defined features.
- analyze Devel tool: Analyze cloud-init logs and data.
- devel Run development tools.
- collect-logs Collect and tar all cloud-init debug info.
- clean Remove logs and artifacts so cloud-init can re-run.
- status Report cloud-init status or wait on completion.
- schema Validate cloud-config files using jsonschema.
-
-
-The rest of this document will give an overview of each of the subcommands.
-
-
-.. _cli_analyze:
-
-analyze
-=======
-
-Get detailed reports of where cloud-init spends its time during the boot
-process. For more complete reference see :ref:`analyze`.
-
-Possible subcommands include:
-
-* *blame*: report ordered by most costly operations
-* *dump*: machine-readable JSON dump of all cloud-init tracked events
-* *show*: show time-ordered report of the cost of operations during each
- boot stage
-* *boot*: show timestamps from kernel initialization, kernel finish
- initialization, and cloud-init start
-
-
-.. _cli_clean:
-
-clean
-=====
-
-Remove cloud-init artifacts from ``/var/lib/cloud`` to simulate a clean
-instance. On reboot, cloud-init will re-run all stages as it did on first boot.
-
-* ``--logs``: optionally remove all cloud-init log files in ``/var/log/``
-* ``--reboot``: reboot the system after removing artifacts
-* ``--machine-id``: Remove ``/etc/machine-id`` on this image. Best practice
- when cloning a golden image to ensure that the next boot of that image
- auto-generates an unique machine ID. `More details on machine-id`_.
-
-
-.. _cli_collect_logs:
-
-collect-logs
-============
-
-Collect and tar cloud-init generated logs, data files, and system
-information for triage. This subcommand is integrated with apport.
-
-Logs collected include:
-
- * ``/var/log/cloud-init.log``
- * ``/var/log/cloud-init-output.log``
- * ``/run/cloud-init``
- * ``/var/lib/cloud/instance/user-data.txt``
- * cloud-init package version
- * ``dmesg`` output
- * journalctl output
-
-.. note::
-
- Ubuntu users can file bugs with ``ubuntu-bug cloud-init`` to
- automatically attach these logs to a bug report
-
-
-.. _cli_devel:
-
-devel
-=====
-
-Collection of development tools under active development. These tools will
-likely be promoted to top-level subcommands when stable.
-
-Do **NOT** rely on the output of these commands as they can and will change.
-
-Current subcommands:
-
- * ``net-convert``: manually use cloud-init's network format conversion, useful
- for testing configuration or testing changes to the network conversion logic
- itself.
- * ``render``: use cloud-init's jinja template render to
- process **#cloud-config** or **custom-scripts**, injecting any variables
- from ``/run/cloud-init/instance-data.json``. It accepts a user-data file
- containing the jinja template header ``## template: jinja`` and renders
- that content with any instance-data.json variables present.
- * ``hotplug-hook``: respond to newly added system devices by retrieving
- updated system metadata and bringing up/down the corresponding device.
- This command is intended to be called via a systemd service and is
- not considered user-accessible except for debugging purposes.
-
-
-.. _cli_features:
-
-features
-========
-
-Print out each feature supported. If cloud-init does not have the
-features subcommand, it also does not support any features described in
-this document.
-
-.. code-block:: shell-session
-
- $ cloud-init features
- NETWORK_CONFIG_V1
- NETWORK_CONFIG_V2
-
-
-.. _cli_init:
-
-init
-====
-
-Generally run by OS init systems to execute cloud-init's stages
-*init* and *init-local*. See :ref:`boot_stages` for more info.
-Can be run on the commandline, but is generally gated to run only once
-due to semaphores in ``/var/lib/cloud/instance/sem/`` and
-``/var/lib/cloud/sem``.
-
-* ``--local``: run *init-local* stage instead of *init*
-
-
-.. _cli_modules:
-
-modules
-=======
-
-Generally run by OS init systems to execute *modules:config* and
-*modules:final* boot stages. This executes cloud config :ref:`modules`
-configured to run in the init, config and final stages. The modules are
-declared to run in various boot stages in the file
-``/etc/cloud/cloud.cfg`` under keys:
-
-* *cloud_init_modules*
-* *cloud_config_modules*
-* *cloud_final_modules*
-
-Can be run on the command line, but each module is gated to run only once due
-to semaphores in ``/var/lib/cloud/``.
-
-* ``--mode [init|config|final]``: run ``modules:init``, ``modules:config`` or
- `modules:final` cloud-init stages. See :ref:`boot_stages` for more info.
-
-
-.. _cli_query:
-
-query
-=====
-
-Query standardized cloud instance metadata crawled by cloud-init and stored
-in ``/run/cloud-init/instance-data.json``. This is a convenience command-line
-interface to reference any cached configuration metadata that cloud-init
-crawls when booting the instance. See :ref:`instance_metadata` for more info.
-
-* ``--all``: dump all available instance data as json which can be queried
-* ``--instance-data``: optional path to a different instance-data.json file
- to source for queries
-* ``--list-keys``: list available query keys from cached instance data
-* ``--format``: a string that will use jinja-template syntax to render a
- string replacing
-* ``<varname>``: a dot-delimited variable path into the instance-data.json
- object
-
-Below demonstrates how to list all top-level query keys that are standardized
-aliases:
-
-.. code-block:: shell-session
-
- $ cloud-init query --list-keys
- _beta_keys
- availability_zone
- base64_encoded_keys
- cloud_name
- ds
- instance_id
- local_hostname
- platform
- public_ssh_keys
- region
- sensitive_keys
- subplatform
- userdata
- v1
- vendordata
-
-Below demonstrates how to query standardized metadata from clouds:
-
-.. code-block:: shell-session
-
- % cloud-init query v1.cloud_name
- aws # or openstack, azure, gce etc.
-
- # Any standardized instance-data under a <v#> key is aliased as a top-level key for convenience.
- % cloud-init query cloud_name
- aws # or openstack, azure, gce etc.
-
- # Query datasource-specific metadata on EC2
- % cloud-init query ds.meta_data.public_ipv4
-
-.. note::
-
- The standardized instance data keys under **v#** are guaranteed not to change
- behavior or format. If using top-level convenience aliases for any
- standardized instance data keys, the most value (highest **v#**) of that key
- name is what is reported as the top-level value. So these aliases act as a
- 'latest'.
-
-This data can then be formatted to generate custom strings or data:
-
-.. code-block:: shell-session
-
- # Generate a custom hostname fqdn based on instance-id, cloud and region
- % cloud-init query --format 'custom-{{instance_id}}.{{region}}.{{v1.cloud_name}}.com'
- custom-i-0e91f69987f37ec74.us-east-2.aws.com
-
-
-.. _cli_schema:
-
-schema
-======
-
-Validate cloud-config files using jsonschema.
-
-* ``-h, --help``: show this help message and exit
-* ``-c CONFIG_FILE, --config-file CONFIG_FILE``: Path of the cloud-config yaml
- file to validate
-* ``--system``: Validate the system cloud-config userdata
-* ``-d DOCS [DOCS ...], --docs DOCS [DOCS ...]``: Print schema module docs.
- Choices: all or space-delimited cc_names.
-* ``--annotate``: Annotate existing cloud-config file with errors
-
-The following example checks a config file and annotates the config file with
-errors on stdout.
-
-.. code-block:: shell-session
-
- $ cloud-init schema -c ./config.yml --annotate
-
-
-.. _cli_single:
-
-single
-======
-
-Attempt to run a single named cloud config module.
-
-* ``--name``: the cloud-config module name to run
-* ``--frequency``: module frequency for this run.
- One of (always|once-per-instance|once)
-* ``--report``: enable reporting
-
-The following example re-runs the cc_set_hostname module ignoring the module
-default frequency of once-per-instance:
-
-.. code-block:: shell-session
-
- $ cloud-init single --name set_hostname --frequency always
-
-.. note::
-
- Mileage may vary trying to re-run each cloud-config module, as
- some are not idempotent.
-
-
-.. _cli_status:
-
-status
-======
-
-Report whether cloud-init is running, done, disabled or errored. Exits
-non-zero if an error is detected in cloud-init.
-
-* ``--long``: detailed status information
-* ``--wait``: block until cloud-init completes
-* ``--format [yaml|json|tabular]``: machine-readable JSON or YAML detailed
- output
-
-Below are examples of output when cloud-init is running, showing status and
-the currently running modules, as well as when it is done.
-
-.. code-block:: shell-session
-
- $ cloud-init status
- status: running
-
- $ cloud-init status --long
- status: running
- time: Fri, 26 Jan 2018 21:39:43 +0000
- detail:
- Running in stage: init-local
-
- $ cloud-init status
- status: done
-
- $ cloud-init status --long
- status: done
- boot_status_code: enabled-by-generator
- last_update: Tue, 16 Aug 2022 19:12:58 +0000
- detail:
- DataSourceNoCloud [seed=/var/lib/cloud/seed/nocloud-net][dsmode=net]
-
- $ cloud-init status --format=json
- {
- "boot_status_code": "enabled-by-generator",
- "datasource": "nocloud",
- "detail": "DataSourceNoCloud [seed=/var/lib/cloud/seed/nocloud-net][dsmode=net]",
- "errors": [],
- "last_update": "Tue, 16 Aug 2022 19:12:58 +0000",
- "status": "done"
- }
-
-.. _More details on machine-id: https://www.freedesktop.org/software/systemd/man/machine-id.html
diff --git a/doc/rtd/topics/configuration.rst b/doc/rtd/topics/configuration.rst
deleted file mode 100644
index 14716f29..00000000
--- a/doc/rtd/topics/configuration.rst
+++ /dev/null
@@ -1,79 +0,0 @@
-.. _configuration:
-
-Configuration Sources
-*********************
-
-Internally, cloud-init builds a single configuration that is then referenced
-throughout the life of cloud-init. The configuration is built from multiple
-sources such that if a key is defined in multiple sources, the higher priority
-source overwrites the lower priority source.
-
-Base Configuration
-==================
-
-From lowest priority to highest, configuration sources are:
-
-* **Hardcoded config**: Config_ that lives within the source of cloud-init
- and cannot be changed.
-* **Configuration directory**: Anything defined in ``/etc/cloud/cloud.cfg`` and
- ``/etc/cloud/cloud.cfg.d``.
-* **Runtime config**: Anything defined in ``/run/cloud-init/cloud.cfg``.
-* **Kernel command line**: On the kernel command line, anything found between
- ``cc:`` and ``end_cc`` will be interpreted as cloud-config user data.
-
-These four sources make up the base configuration.
-
-Vendor and User Data
-====================
-Added to the base configuration are:
-
-* **Vendor data**: :ref:`Data<vendordata>` provided by the datasource
-* **User data**: :ref:`Data<user_data_formats>` also provided by
- the datasource
-
-These get fetched from the datasource and are defined at instance launch.
-
-.. note::
- While much of what is defined in the base configuration can be overridden by
- vendor data and user data, base configuration sources do not conform to
- :ref:`#cloud-config<topics/format:Cloud Config Data>`
-
-Network Configuration
-=====================
-Network configuration happens independently from other cloud-init
-configuration. See :ref:`network configuration documentation<default_behavior>`
-for more information.
-
-Specifying Configuration
-==========================
-
-End users
----------
-Pass :ref:`user data<user_data_formats>` to the cloud provider.
-Every platform supporting cloud-init will provide a method of supplying
-user data. If you're unsure how to do this, reference the documentation
-provided by the cloud platform you're on. Additionally, there may be
-related cloud-init documentation in the :ref:`datasource<datasources>`
-section.
-
-Once an instance has been initialized, the user data may not be edited.
-It is sourced directly from the cloud, so even if you find a local file
-that contains user data, it will likely be overwritten next boot.
-
-Distro Providers
-----------------
-Modify the base config. This often involves submitting a PR to modify
-the base `cloud.cfg template`_, which is used to customize
-`/etc/cloud/cloud.cfg` per distro. Additionally, a file can be added to
-``/etc/cloud/cloud.cfg.d`` to override a piece of the base configuration.
-
-Cloud Providers
----------------
-Pass vendor data. This is the preferred method for clouds to provide
-their own customization. In some cases, it may make sense to modify the
-base config in the same manner as distro providers on cloud-supported
-images.
-
-
-.. _Config: https://github.com/canonical/cloud-init/blob/b861ea8a5e1fd0eb33096f60f54eeff42d80d3bd/cloudinit/settings.py#L22
-.. _cloud.cfg template: https://github.com/canonical/cloud-init/blob/main/config/cloud.cfg.tmpl
diff --git a/doc/rtd/topics/datasources.rst b/doc/rtd/topics/datasources.rst
deleted file mode 100644
index 0867564d..00000000
--- a/doc/rtd/topics/datasources.rst
+++ /dev/null
@@ -1,114 +0,0 @@
-.. _datasources:
-
-Datasources
-***********
-
-Datasources are sources of configuration data for cloud-init that typically
-come from the user (i.e. userdata) or come from the cloud that created the
-configuration drive (i.e. metadata). Typical userdata would include files,
-YAML, and shell scripts while typical metadata would include server name,
-instance id, display name and other cloud specific details.
-
-Since there are multiple ways to provide this data (each cloud solution seems
-to prefer its own way) internally a datasource abstract class was created to
-allow for a single way to access the different cloud systems methods to provide
-this data through the typical usage of subclasses.
-
-Any metadata processed by cloud-init's datasources is persisted as
-``/run/cloud-init/instance-data.json``. Cloud-init provides tooling to quickly
-introspect some of that data. See :ref:`instance_metadata` for more
-information.
-
-Known Sources
-=============
-
-The following is a list of documents for each supported datasource:
-
-.. toctree::
- :titlesonly:
-
- datasources/aliyun.rst
- datasources/altcloud.rst
- datasources/ec2.rst
- datasources/azure.rst
- datasources/cloudsigma.rst
- datasources/cloudstack.rst
- datasources/configdrive.rst
- datasources/digitalocean.rst
- datasources/e24cloud.rst
- datasources/exoscale.rst
- datasources/fallback.rst
- datasources/gce.rst
- datasources/lxd.rst
- datasources/maas.rst
- datasources/nocloud.rst
- datasources/opennebula.rst
- datasources/openstack.rst
- datasources/oracle.rst
- datasources/ovf.rst
- datasources/rbxcloud.rst
- datasources/smartos.rst
- datasources/upcloud.rst
- datasources/vmware.rst
- datasources/vultr.rst
- datasources/zstack.rst
- datasources/nwcs.rst
-
-Creation
-========
-
-The datasource objects have a few touch points with cloud-init. If you
-are interested in adding a new datasource for your cloud platform you will
-need to take care of the following items:
-
-* **Identify a mechanism for positive identification of the platform**:
- It is good practice for a cloud platform to positively identify itself
- to the guest. This allows the guest to make educated decisions based
- on the platform on which it is running. On the x86 and arm64 architectures,
- many clouds identify themselves through DMI data. For example,
- Oracle's public cloud provides the string 'OracleCloud.com' in the
- DMI chassis-asset field.
-
- cloud-init enabled images produce a log file with details about the
- platform. Reading through this log in ``/run/cloud-init/ds-identify.log``
- may provide the information needed to uniquely identify the platform.
- If the log is not present, you can generate it by running from source
- ``./tools/ds-identify`` or the installed location
- ``/usr/lib/cloud-init/ds-identify``.
-
- The mechanism used to identify the platform will be required for the
- ds-identify and datasource module sections below.
-
-* **Add datasource module ``cloudinit/sources/DataSource<CloudPlatform>.py``**:
- It is suggested that you start by copying one of the simpler datasources
- such as DataSourceHetzner.
-
-* **Add tests for datasource module**:
- Add a new file with some tests for the module to
- ``cloudinit/sources/test_<yourplatform>.py``. For example see
- ``cloudinit/sources/tests/test_oracle.py``
-
-* **Update ds-identify**: In systemd systems, ds-identify is used to detect
- which datasource should be enabled or if cloud-init should run at all.
- You'll need to make changes to ``tools/ds-identify``.
-
-* **Add tests for ds-identify**: Add relevant tests in a new class to
- ``tests/unittests/test_ds_identify.py``. You can use ``TestOracle`` as an
- example.
-
-* **Add your datasource name to the builtin list of datasources:** Add
- your datasource module name to the end of the ``datasource_list``
- entry in ``cloudinit/settings.py``.
-
-* **Add your cloud platform to apport collection prompts:** Update the
- list of cloud platforms in ``cloudinit/apport.py``. This list will be
- provided to the user who invokes ``ubuntu-bug cloud-init``.
-
-* **Enable datasource by default in ubuntu packaging branches:**
- Ubuntu packaging branches contain a template file
- ``debian/cloud-init.templates`` that ultimately sets the default
- datasource_list when installed via package. This file needs updating when
- the commit gets into a package.
-
-* **Add documentation for your datasource**: You should add a new
- file in ``doc/datasources/<cloudplatform>.rst``
diff --git a/doc/rtd/topics/datasources/aliyun.rst b/doc/rtd/topics/datasources/aliyun.rst
deleted file mode 100644
index 0bb9c19e..00000000
--- a/doc/rtd/topics/datasources/aliyun.rst
+++ /dev/null
@@ -1,89 +0,0 @@
-.. _datasource_aliyun:
-
-Alibaba Cloud (AliYun)
-======================
-The ``AliYun`` datasource reads data from Alibaba Cloud ECS. Support is
-present in cloud-init since 0.7.9.
-
-Metadata Service
-----------------
-The Alibaba Cloud metadata service is available at the well known url
-``http://100.100.100.200/``. For more information see
-Alibaba Cloud ECS on `metadata
-<https://www.alibabacloud.com/help/zh/faq-detail/49122.htm>`__.
-
-Configuration
--------------
-The following configuration can be set for the datasource in system
-configuration (in ``/etc/cloud/cloud.cfg`` or ``/etc/cloud/cloud.cfg.d/``).
-
-An example configuration with the default values is provided below:
-
-.. sourcecode:: yaml
-
- datasource:
- AliYun:
- metadata_urls: ["http://100.100.100.200"]
- timeout: 50
- max_wait: 120
-
-Versions
-^^^^^^^^
-Like the EC2 metadata service, Alibaba Cloud's metadata service provides
-versioned data under specific paths. As of April 2018, there are only
-``2016-01-01`` and ``latest`` versions.
-
-It is expected that the dated version will maintain a stable interface but
-``latest`` may change content at a future date.
-
-Cloud-init uses the ``2016-01-01`` version.
-
-You can list the versions available to your instance with:
-
-.. code-block:: shell-session
-
- $ curl http://100.100.100.200/
- 2016-01-01
- latest
-
-Metadata
-^^^^^^^^
-Instance metadata can be queried at
-``http://100.100.100.200/2016-01-01/meta-data``
-
-.. code-block:: shell-session
-
- $ curl http://100.100.100.200/2016-01-01/meta-data
- dns-conf/
- eipv4
- hostname
- image-id
- instance-id
- instance/
- mac
- network-type
- network/
- ntp-conf/
- owner-account-id
- private-ipv4
- public-keys/
- region-id
- serial-number
- source-address
- sub-private-ipv4-list
- vpc-cidr-block
- vpc-id
-
-Userdata
-^^^^^^^^
-If provided, user-data will appear at
-``http://100.100.100.200/2016-01-01/user-data``.
-If no user-data is provided, this will return a 404.
-
-.. code-block:: shell-session
-
- $ curl http://100.100.100.200/2016-01-01/user-data
- #!/bin/sh
- echo "Hello World."
-
-.. vi: textwidth=79
diff --git a/doc/rtd/topics/datasources/altcloud.rst b/doc/rtd/topics/datasources/altcloud.rst
deleted file mode 100644
index acd5e2a3..00000000
--- a/doc/rtd/topics/datasources/altcloud.rst
+++ /dev/null
@@ -1,94 +0,0 @@
-.. _datasource_alt_cloud:
-
-Alt Cloud
-=========
-
-The datasource altcloud will be used to pick up user data on `RHEVm`_ and
-`vSphere`_.
-
-RHEVm
------
-
-For `RHEVm`_ v3.0 the userdata is injected into the VM using floppy
-injection via the `RHEVm`_ dashboard "Custom Properties".
-
-The format of the Custom Properties entry must be:
-
-::
-
- floppyinject=user-data.txt:<base64 encoded data>
-
-For example to pass a simple bash script:
-
-.. sourcecode:: sh
-
- % cat simple_script.bash
- #!/bin/bash
- echo "Hello Joe!" >> /tmp/JJV_Joe_out.txt
-
- % base64 < simple_script.bash
- IyEvYmluL2Jhc2gKZWNobyAiSGVsbG8gSm9lISIgPj4gL3RtcC9KSlZfSm9lX291dC50eHQK
-
-To pass this example script to cloud-init running in a `RHEVm`_ v3.0 VM
-set the "Custom Properties" when creating the RHEMv v3.0 VM to:
-
-::
-
- floppyinject=user-data.txt:IyEvYmluL2Jhc2gKZWNobyAiSGVsbG8gSm9lISIgPj4gL3RtcC9KSlZfSm9lX291dC50eHQK
-
-**NOTE:** The prefix with file name must be: ``floppyinject=user-data.txt:``
-
-It is also possible to launch a `RHEVm`_ v3.0 VM and pass optional user
-data to it using the Delta Cloud.
-
-For more information on Delta Cloud see: http://deltacloud.apache.org
-
-vSphere
--------
-
-For VMWare's `vSphere`_ the userdata is injected into the VM as an ISO
-via the cdrom. This can be done using the `vSphere`_ dashboard
-by connecting an ISO image to the CD/DVD drive.
-
-To pass this example script to cloud-init running in a `vSphere`_ VM
-set the CD/DVD drive when creating the vSphere VM to point to an
-ISO on the data store.
-
-**Note:** The ISO must contain the user data.
-
-For example, to pass the same ``simple_script.bash`` to vSphere:
-
-Create the ISO
-^^^^^^^^^^^^^^
-
-.. sourcecode:: sh
-
- % mkdir my-iso
-
-NOTE: The file name on the ISO must be: ``user-data.txt``
-
-.. sourcecode:: sh
-
- % cp simple_script.bash my-iso/user-data.txt
- % genisoimage -o user-data.iso -r my-iso
-
-Verify the ISO
-^^^^^^^^^^^^^^
-
-.. sourcecode:: sh
-
- % sudo mkdir /media/vsphere_iso
- % sudo mount -o loop user-data.iso /media/vsphere_iso
- % cat /media/vsphere_iso/user-data.txt
- % sudo umount /media/vsphere_iso
-
-Then, launch the `vSphere`_ VM the ISO user-data.iso attached as a CDROM.
-
-It is also possible to launch a `vSphere`_ VM and pass optional user
-data to it using the Delta Cloud.
-
-For more information on Delta Cloud see: http://deltacloud.apache.org
-
-.. _RHEVm: https://www.redhat.com/virtualization/rhev/desktop/rhevm/
-.. _vSphere: https://www.vmware.com/products/datacenter-virtualization/vsphere/overview.html
-.. vi: textwidth=79
diff --git a/doc/rtd/topics/datasources/azure.rst b/doc/rtd/topics/datasources/azure.rst
deleted file mode 100644
index b73d7d38..00000000
--- a/doc/rtd/topics/datasources/azure.rst
+++ /dev/null
@@ -1,125 +0,0 @@
-.. _datasource_azure:
-
-Azure
-=====
-
-This datasource finds metadata and user-data from the Azure cloud platform.
-
-
-The Azure cloud platform provides initial data to an instance via an attached
-CD formatted in UDF. That CD contains a 'ovf-env.xml' file that provides some
-information. Additional information is obtained via interaction with the
-"endpoint".
-
-
-IMDS
-----
-Azure provides the `instance metadata service (IMDS)
-<https://docs.microsoft.com/en-us/azure/virtual-machines/windows/instance-metadata-service>`_
-which is a REST service on ``169.254.169.254`` providing additional
-configuration information to the instance. Cloud-init uses the IMDS for:
-
-- network configuration for the instance which is applied per boot
-- a preprovisioing gate which blocks instance configuration until Azure fabric
- is ready to provision
-- retrieving SSH public keys. Cloud-init will first try to utilize SSH keys
- returned from IMDS, and if they are not provided from IMDS then it will
- fallback to using the OVF file provided from the CD-ROM. There is a large
- performance benefit to using IMDS for SSH key retrieval, but in order to
- support environments where IMDS is not available then we must continue to
- all for keys from OVF
-
-
-Configuration
--------------
-The following configuration can be set for the datasource in system
-configuration (in ``/etc/cloud/cloud.cfg`` or ``/etc/cloud/cloud.cfg.d/``).
-
-The settings that may be configured are:
-
- * **apply_network_config**: Boolean set to True to use network configuration
- described by Azure's IMDS endpoint instead of fallback network config of
- dhcp on eth0. Default is True. For Ubuntu 16.04 or earlier, default is
- False.
- * **data_dir**: Path used to read metadata files and write crawled data.
- * **disk_aliases**: A dictionary defining which device paths should be
- interpreted as ephemeral images. See cc_disk_setup module for more info.
-
-Configuration for the datasource can also be read from a
-``dscfg`` entry in the ``LinuxProvisioningConfigurationSet``. Content in
-dscfg node is expected to be base64 encoded yaml content, and it will be
-merged into the 'datasource: Azure' entry.
-
-An example configuration with the default values is provided below:
-
-.. sourcecode:: yaml
-
- datasource:
- Azure:
- apply_network_config: true
- data_dir: /var/lib/waagent
- disk_aliases:
- ephemeral0: /dev/disk/cloud/azure_resource
-
-
-Userdata
---------
-Userdata is provided to cloud-init inside the ovf-env.xml file. Cloud-init
-expects that user-data will be provided as base64 encoded value inside the
-text child of a element named ``UserData`` or ``CustomData`` which is a direct
-child of the ``LinuxProvisioningConfigurationSet`` (a sibling to ``UserName``)
-If both ``UserData`` and ``CustomData`` are provided behavior is undefined on
-which will be selected.
-
-In the example below, user-data provided is 'this is my userdata'
-
-Example:
-
-.. sourcecode:: xml
-
- <wa:ProvisioningSection>
- <wa:Version>1.0</wa:Version>
- <LinuxProvisioningConfigurationSet
- xmlns="http://schemas.microsoft.com/windowsazure"
- xmlns:i="http://www.w3.org/2001/XMLSchema-instance">
- <ConfigurationSetType>LinuxProvisioningConfiguration</ConfigurationSetType>
- <HostName>myHost</HostName>
- <UserName>myuser</UserName>
- <UserPassword/>
- <CustomData>dGhpcyBpcyBteSB1c2VyZGF0YQ===</CustomData>
- <dscfg>eyJhZ2VudF9jb21tYW5kIjogWyJzdGFydCIsICJ3YWxpbnV4YWdlbnQiXX0=</dscfg>
- <DisableSshPasswordAuthentication>true</DisableSshPasswordAuthentication>
- <SSH>
- <PublicKeys>
- <PublicKey>
- <Fingerprint>6BE7A7C3C8A8F4B123CCA5D0C2F1BE4CA7B63ED7</Fingerprint>
- <Path>this-value-unused</Path>
- </PublicKey>
- </PublicKeys>
- </SSH>
- </LinuxProvisioningConfigurationSet>
- </wa:ProvisioningSection>
-
-hostname
---------
-When the user launches an instance, they provide a hostname for that instance.
-The hostname is provided to the instance in the ovf-env.xml file as
-``HostName``.
-
-Whatever value the instance provides in its dhcp request will resolve in the
-domain returned in the 'search' request.
-
-A generic image will already have a hostname configured. The ubuntu
-cloud images have 'ubuntu' as the hostname of the system, and the
-initial dhcp request on eth0 is not guaranteed to occur after the
-datasource code has been run. So, on first boot, that initial value
-will be sent in the dhcp request and *that* value will resolve.
-
-In order to make the ``HostName`` provided in the ovf-env.xml resolve,
-a dhcp request must be made with the new value. cloud-init handles
-this by setting the hostname in the DataSource's 'get_data' method via
-'``hostname $HostName``', and then bouncing the interface. This
-behavior can be configured or disabled in the datasource config. See
-'Configuration' above.
-
-.. vi: textwidth=79
diff --git a/doc/rtd/topics/datasources/cloudsigma.rst b/doc/rtd/topics/datasources/cloudsigma.rst
deleted file mode 100644
index dee665a4..00000000
--- a/doc/rtd/topics/datasources/cloudsigma.rst
+++ /dev/null
@@ -1,42 +0,0 @@
-.. _datasource_cloudsigma:
-
-CloudSigma
-==========
-
-This datasource finds metadata and user-data from the `CloudSigma`_ cloud
-platform. Data transfer occurs through a virtual serial port of the
-`CloudSigma`_'s VM and the presence of network adapter is **NOT** a
-requirement, See `server context`_ in the public documentation for more
-information.
-
-
-Setting a hostname
-------------------
-By default the name of the server will be applied as a hostname on the first
-boot.
-
-
-Providing user-data
--------------------
-
-You can provide user-data to the VM using the dedicated `meta field`_ in the
-`server context`_ ``cloudinit-user-data``. By default *cloud-config* format is
-expected there and the ``#cloud-config`` header could be omitted. However
-since this is a raw-text field you could provide any of the valid `config
-formats`_.
-
-You have the option to encode your user-data using Base64. In order to do that
-you have to add the ``cloudinit-user-data`` field to the ``base64_fields``.
-The latter is a comma-separated field with all the meta fields whit base64
-encoded values.
-
-If your user-data does not need an internet connection you can create a `meta
-field`_ in the `server context`_ ``cloudinit-dsmode`` and set "local" as
-value. If this field does not exist the default value is "net".
-
-
-.. _CloudSigma: http://cloudsigma.com/
-.. _server context: http://cloudsigma-docs.readthedocs.org/en/latest/server_context.html
-.. _meta field: http://cloudsigma-docs.readthedocs.org/en/latest/meta.html
-.. _config formats: http://cloudinit.readthedocs.org/en/latest/topics/format.html
-.. vi: textwidth=79
diff --git a/doc/rtd/topics/datasources/cloudstack.rst b/doc/rtd/topics/datasources/cloudstack.rst
deleted file mode 100644
index e889ab6e..00000000
--- a/doc/rtd/topics/datasources/cloudstack.rst
+++ /dev/null
@@ -1,54 +0,0 @@
-.. _datasource_cloudstack:
-
-CloudStack
-==========
-
-`Apache CloudStack`_ expose user-data, meta-data, user password and account
-SSH key thru the Virtual-Router. The datasource obtains the VR address via
-dhcp lease information given to the instance.
-For more details on meta-data and user-data,
-refer the `CloudStack Administrator Guide`_.
-
-URLs to access user-data and meta-data from the Virtual Machine.
-`data-server.` is a well-known hostname provided by the CloudStack virtual
-router that points to the next UserData server (which is usually also
-the virtual router).
-
-.. code:: bash
-
- http://data-server./latest/user-data
- http://data-server./latest/meta-data
- http://data-server./latest/meta-data/{metadata type}
-
-If `data-server.` cannot be resolved, cloud-init will try to obtain the
-virtual router's address from the system's DHCP leases. If that fails,
-it will use the system's default gateway.
-
-Configuration
--------------
-The following configuration can be set for the datasource in system
-configuration (in `/etc/cloud/cloud.cfg` or `/etc/cloud/cloud.cfg.d/`).
-
-The settings that may be configured are:
-
- * **max_wait**: the maximum amount of clock time in seconds that should be
- spent searching metadata_urls. A value less than zero will result in only
- one request being made, to the first in the list. (default: 120)
- * **timeout**: the timeout value provided to urlopen for each individual http
- request. This is used both when selecting a metadata_url and when crawling
- the metadata service. (default: 50)
-
-An example configuration with the default values is provided below:
-
-.. sourcecode:: yaml
-
- datasource:
- CloudStack:
- max_wait: 120
- timeout: 50
-
-
-.. _Apache CloudStack: http://cloudstack.apache.org/
-.. _CloudStack Administrator Guide: http://docs.cloudstack.apache.org/projects/cloudstack-administration/en/latest/virtual_machines.html#user-data-and-meta-data
-
-.. vi: textwidth=79
diff --git a/doc/rtd/topics/datasources/configdrive.rst b/doc/rtd/topics/datasources/configdrive.rst
deleted file mode 100644
index 777597c2..00000000
--- a/doc/rtd/topics/datasources/configdrive.rst
+++ /dev/null
@@ -1,133 +0,0 @@
-.. _datasource_config_drive:
-
-Config Drive
-============
-
-The configuration drive datasource supports the `OpenStack`_ configuration
-drive disk.
-
- See `the config drive extension`_ and `metadata introduction`_ in the public
- documentation for more information.
-
-By default, cloud-init does *always* consider this source to be a full-fledged
-datasource. Instead, the typical behavior is to assume it is really only
-present to provide networking information. Cloud-init will copy off the
-network information, apply it to the system, and then continue on. The "full"
-datasource could then be found in the EC2 metadata service. If this is not the
-case then the files contained on the located drive must provide equivalents to
-what the EC2 metadata service would provide (which is typical of the version 2
-support listed below)
-
-.. dropdown:: Version 1 (Deprecated)
-
- **Note:** Version 1 is legacy and should be considered deprecated.
- Version 2 has been supported in OpenStack since 2012.2 (Folsom).
-
- The following criteria are required to as a config drive:
-
- 1. Must be formatted with `vfat`_ filesystem
- 2. Must contain *one* of the following files
-
- ::
-
- /etc/network/interfaces
- /root/.ssh/authorized_keys
- /meta.js
-
- ``/etc/network/interfaces``
-
- This file is laid down by nova in order to pass static networking
- information to the guest. Cloud-init will copy it off of the
- config-drive and into /etc/network/interfaces (or convert it to RH
- format) as soon as it can, and then attempt to bring up all network
- interfaces.
-
- ``/root/.ssh/authorized_keys``
-
- This file is laid down by nova, and contains the ssk keys that were
- provided to nova on instance creation (nova-boot --key ....)
-
- ``/meta.js``
-
- meta.js is populated on the config-drive in response to the user
- passing "meta flags" (nova boot --meta key=value ...). It is
- expected to be json formatted.
-
-
-Version 2
----------
-
-The following criteria are required to as a config drive:
-
-1. Must be formatted with `vfat`_ or `iso9660`_ filesystem
- or have a *filesystem* label of **config-2** or **CONFIG-2**
-2. The files that will typically be present in the config drive are:
-
-::
-
- openstack/
- - 2012-08-10/ or latest/
- - meta_data.json
- - user_data (not mandatory)
- - content/
- - 0000 (referenced content files)
- - 0001
- - ....
- ec2
- - latest/
- - meta-data.json (not mandatory)
-
-Keys and values
----------------
-
-Cloud-init's behavior can be modified by keys found in the meta.js (version 1
-only) file in the following ways.
-
-::
-
- dsmode:
- values: local, net, pass
- default: pass
-
-
-This is what indicates if configdrive is a final data source or not.
-By default it is 'pass', meaning this datasource should not be read.
-Set it to 'local' or 'net' to stop cloud-init from continuing on to
-search for other data sources after network config.
-
-The difference between 'local' and 'net' is that local will not require
-networking to be up before user-data actions (or boothooks) are run.
-
-::
-
- instance-id:
- default: iid-dsconfigdrive
-
-This is utilized as the metadata's instance-id. It should generally
-be unique, as it is what is used to determine "is this a new instance".
-
-::
-
- public-keys:
- default: None
-
-If present, these keys will be used as the public keys for the
-instance. This value overrides the content in authorized_keys.
-
-Note: it is likely preferable to provide keys via user-data
-
-::
-
- user-data:
- default: None
-
-This provides cloud-init user-data. See :ref:`examples <yaml_examples>` for
-what all can be present here.
-
-.. _OpenStack: http://www.openstack.org/
-.. _metadata introduction: https://docs.openstack.org/nova/latest/user/metadata.html#config-drives
-.. _python-novaclient: https://github.com/openstack/python-novaclient
-.. _iso9660: https://en.wikipedia.org/wiki/ISO_9660
-.. _vfat: https://en.wikipedia.org/wiki/File_Allocation_Table
-.. _the config drive extension: https://docs.openstack.org/nova/latest/admin/config-drive.html
-.. vi: textwidth=79
diff --git a/doc/rtd/topics/datasources/digitalocean.rst b/doc/rtd/topics/datasources/digitalocean.rst
deleted file mode 100644
index 801841c1..00000000
--- a/doc/rtd/topics/datasources/digitalocean.rst
+++ /dev/null
@@ -1,32 +0,0 @@
-.. _datasource_digital_ocean:
-
-DigitalOcean
-============
-
-The `DigitalOcean`_ datasource consumes the content served from DigitalOcean's
-`metadata service`_. This metadata service serves information about the
-running droplet via HTTP over the link local address 169.254.169.254. The
-metadata API endpoints are fully described at
-`https://developers.digitalocean.com/metadata/
-<https://developers.digitalocean.com/metadata/>`_.
-
-Configuration
--------------
-
-DigitalOcean's datasource can be configured as follows:
-
- datasource:
- DigitalOcean:
- retries: 3
- timeout: 2
-
-- *retries*: Determines the number of times to attempt to connect to the
- metadata service
-- *timeout*: Determines the timeout in seconds to wait for a response from the
- metadata service
-
-.. _DigitalOcean: http://digitalocean.com/
-.. _metadata service: https://developers.digitalocean.com/metadata/
-.. _Full documentation: https://developers.digitalocean.com/metadata/
-
-.. vi: textwidth=79
diff --git a/doc/rtd/topics/datasources/e24cloud.rst b/doc/rtd/topics/datasources/e24cloud.rst
deleted file mode 100644
index 2af6634b..00000000
--- a/doc/rtd/topics/datasources/e24cloud.rst
+++ /dev/null
@@ -1,9 +0,0 @@
-.. _datasource_e24cloud:
-
-E24Cloud
-========
-`E24Cloud <https://www.e24cloud.com/en/>`_ platform provides an AWS Ec2 metadata
-service clone. It identifies itself to guests using the dmi
-system-manufacturer (/sys/class/dmi/id/sys_vendor).
-
-.. vi: textwidth=79
diff --git a/doc/rtd/topics/datasources/ec2.rst b/doc/rtd/topics/datasources/ec2.rst
deleted file mode 100644
index d30e1bb6..00000000
--- a/doc/rtd/topics/datasources/ec2.rst
+++ /dev/null
@@ -1,141 +0,0 @@
-.. _datasource_ec2:
-
-Amazon EC2
-==========
-
-The EC2 datasource is the oldest and most widely used datasource that
-cloud-init supports. This datasource interacts with a *magic* ip that is
-provided to the instance by the cloud provider. Typically this ip is
-``169.254.169.254`` of which at this ip a http server is provided to the
-instance so that the instance can make calls to get instance userdata and
-instance metadata.
-
-Metadata is accessible via the following URL:
-
-::
-
- GET http://169.254.169.254/2009-04-04/meta-data/
- ami-id
- ami-launch-index
- ami-manifest-path
- block-device-mapping/
- hostname
- instance-id
- instance-type
- local-hostname
- local-ipv4
- placement/
- public-hostname
- public-ipv4
- public-keys/
- reservation-id
- security-groups
-
-Userdata is accessible via the following URL:
-
-::
-
- GET http://169.254.169.254/2009-04-04/user-data
- 1234,fred,reboot,true | 4512,jimbo, | 173,,,
-
-Note that there are multiple EC2 Metadata versions of this data provided
-to instances. cloud-init will attempt to use the most recent API version it
-supports in order to get latest API features and instance-data. If a given
-API version is not exposed to the instance, those API features will be
-unavailable to the instance.
-
-
-+----------------+----------------------------------------------------------+
-+ EC2 version | supported instance-data/feature |
-+================+==========================================================+
-+ **2021-03-23** | Required for Instance tag support. This feature must be |
-| | enabled individually on each instance. See the |
-| | `EC2 tags user guide`_. |
-+----------------+----------------------------------------------------------+
-| **2016-09-02** | Required for secondary IP address support. |
-+----------------+----------------------------------------------------------+
-| **2009-04-04** | Minimum supports EC2 API version for meta-data and |
-| | user-data. |
-+----------------+----------------------------------------------------------+
-
-
-To see which versions are supported from your cloud provider use the following
-URL:
-
-::
-
- GET http://169.254.169.254/
- 1.0
- 2007-01-19
- 2007-03-01
- 2007-08-29
- 2007-10-10
- 2007-12-15
- 2008-02-01
- 2008-09-01
- 2009-04-04
- ...
- latest
-
-
-
-Configuration
--------------
-The following configuration can be set for the datasource in system
-configuration (in `/etc/cloud/cloud.cfg` or `/etc/cloud/cloud.cfg.d/`).
-
-The settings that may be configured are:
-
- * **metadata_urls**: This list of urls will be searched for an EC2
- metadata service. The first entry that successfully returns a 200 response
- for <url>/<version>/meta-data/instance-id will be selected.
- (default: ['http://169.254.169.254', 'http://[fd00:ec2::254]',
- 'http://instance-data:8773']).
- * **max_wait**: the maximum amount of clock time in seconds that should be
- spent searching metadata_urls. A value less than zero will result in only
- one request being made, to the first in the list. (default: 120)
- * **timeout**: the timeout value provided to urlopen for each individual http
- request. This is used both when selecting a metadata_url and when crawling
- the metadata service. (default: 50)
- * **apply_full_imds_network_config**: Boolean (default: True) to allow
- cloud-init to configure any secondary NICs and secondary IPs described by
- the metadata service. All network interfaces are configured with DHCP (v4)
- to obtain an primary IPv4 address and route. Interfaces which have a
- non-empty 'ipv6s' list will also enable DHCPv6 to obtain a primary IPv6
- address and route. The DHCP response (v4 and v6) return an IP that matches
- the first element of local-ipv4s and ipv6s lists respectively. All
- additional values (secondary addresses) in the static ip lists will be
- added to interface.
-
-An example configuration with the default values is provided below:
-
-.. sourcecode:: yaml
-
- datasource:
- Ec2:
- metadata_urls: ["http://169.254.169.254:80", "http://instance-data:8773"]
- max_wait: 120
- timeout: 50
- apply_full_imds_network_config: true
-
-Notes
------
- * There are 2 types of EC2 instances network-wise: VPC ones (Virtual Private
- Cloud) and Classic ones (also known as non-VPC). One major difference
- between them is that Classic instances have their MAC address changed on
- stop/restart operations, so cloud-init will recreate the network config
- file for EC2 Classic instances every boot. On VPC instances this file is
- generated only in the first boot of the instance.
- The check for the instance type is performed by is_classic_instance()
- method.
-
- * For EC2 instances with multiple network interfaces (NICs) attached, dhcp4
- will be enabled to obtain the primary private IPv4 address of those NICs.
- Wherever dhcp4 or dhcp6 is enabled for a NIC, a dhcp route-metric will be
- added with the value of ``<device-number + 1> * 100`` to ensure dhcp
- routes on the primary NIC are preferred to any secondary NICs.
- For example: the primary NIC will have a DHCP route-metric of 100,
- the next NIC will be 200.
-
-.. _EC2 tags user guide: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html#work-with-tags-in-IMDS
-.. vi: textwidth=79
diff --git a/doc/rtd/topics/datasources/exoscale.rst b/doc/rtd/topics/datasources/exoscale.rst
deleted file mode 100644
index 2d2e4544..00000000
--- a/doc/rtd/topics/datasources/exoscale.rst
+++ /dev/null
@@ -1,69 +0,0 @@
-.. _datasource_exoscale:
-
-Exoscale
-========
-
-This datasource supports reading from the metadata server used on the
-`Exoscale platform <https://exoscale.com>`_.
-
-Use of the Exoscale datasource is recommended to benefit from new features of
-the Exoscale platform.
-
-The datasource relies on the availability of a compatible metadata server
-(``http://169.254.169.254`` is used by default) and its companion password
-server, reachable at the same address (by default on port 8080).
-
-Crawling of metadata
---------------------
-
-The metadata service and password server are crawled slightly differently:
-
- * The "metadata service" is crawled every boot.
- * The password server is also crawled every boot (the Exoscale datasource
- forces the password module to run with "frequency always").
-
-In the password server case, the following rules apply in order to enable the
-"restore instance password" functionality:
-
- * If a password is returned by the password server, it is then marked "saved"
- by the cloud-init datasource. Subsequent boots will skip setting the
- password (the password server will return "saved_password").
- * When the instance password is reset (via the Exoscale UI), the password
- server will return the non-empty password at next boot, therefore causing
- cloud-init to reset the instance's password.
-
-Configuration
--------------
-
-Users of this datasource are discouraged from changing the default settings
-unless instructed to by Exoscale support.
-
-The following settings are available and can be set for the
-:ref:`datasource base configuration<datasource_base_config>`
-(in `/etc/cloud/cloud.cfg.d/`).
-
-The settings available are:
-
- * **metadata_url**: The URL for the metadata service (defaults to
- ``http://169.254.169.254``)
- * **api_version**: The API version path on which to query the instance
- metadata (defaults to ``1.0``)
- * **password_server_port**: The port (on the metadata server) on which the
- password server listens (defaults to ``8080``).
- * **timeout**: the timeout value provided to urlopen for each individual http
- request. (defaults to ``10``)
- * **retries**: The number of retries that should be done for an http request
- (defaults to ``6``)
-
-
-An example configuration with the default values is provided below:
-
-.. sourcecode:: yaml
-
- datasource:
- Exoscale:
- metadata_url: "http://169.254.169.254"
- api_version: "1.0"
- password_server_port: 8080
- timeout: 10
- retries: 6
diff --git a/doc/rtd/topics/datasources/fallback.rst b/doc/rtd/topics/datasources/fallback.rst
deleted file mode 100644
index 03658f54..00000000
--- a/doc/rtd/topics/datasources/fallback.rst
+++ /dev/null
@@ -1,18 +0,0 @@
-.. _datasource_fallback:
-
-Fallback/None
-=============
-
-This is the fallback datasource when no other datasource can be selected. It
-is the equivalent of a empty datasource in that it provides a empty string as
-userdata and a empty dictionary as metadata. It is useful for testing as well
-as for when you do not have a need to have an actual datasource to meet your
-instance requirements (ie you just want to run modules that are not concerned
-with any external data). It is typically put at the end of the datasource
-search list so that if all other datasources are not matched, then this one
-will be so that the user is not left with an inaccessible instance.
-
-**Note:** the instance id that this datasource provides is
-``iid-datasource-none``.
-
-.. vi: textwidth=79
diff --git a/doc/rtd/topics/datasources/gce.rst b/doc/rtd/topics/datasources/gce.rst
deleted file mode 100644
index 3aeb9afc..00000000
--- a/doc/rtd/topics/datasources/gce.rst
+++ /dev/null
@@ -1,42 +0,0 @@
-.. _datasource_gce:
-
-Google Compute Engine
-=====================
-
-The GCE datasource gets its data from the internal compute metadata server.
-Metadata can be queried at the URL
-'``http://metadata.google.internal/computeMetadata/v1/``'
-from within an instance. For more information see the `GCE metadata docs`_.
-
-Currently the default project and instance level metadatakeys keys
-``project/attributes/sshKeys`` and ``instance/attributes/ssh-keys`` are merged
-to provide ``public-keys``.
-
-``user-data`` and ``user-data-encoding`` can be provided to cloud-init by
-setting those custom metadata keys for an *instance*.
-
-Configuration
--------------
-The following configuration can be set for the datasource in system
-configuration (in `/etc/cloud/cloud.cfg` or `/etc/cloud/cloud.cfg.d/`).
-
-The settings that may be configured are:
-
- * **retries**: The number of retries that should be done for an http request.
- This value is used only after metadata_url is selected. (default: 5)
- * **sec_between_retries**: The amount of wait time between the retries when
- crawling the metadata service. (default: 1)
-
-
-An example configuration with the default values is provided below:
-
-.. sourcecode:: yaml
-
- datasource:
- GCE:
- retries: 5
- sec_between_retries: 1
-
-.. _GCE metadata docs: https://cloud.google.com/compute/docs/storing-retrieving-metadata
-
-.. vi: textwidth=79
diff --git a/doc/rtd/topics/datasources/lxd.rst b/doc/rtd/topics/datasources/lxd.rst
deleted file mode 100644
index 3b523d50..00000000
--- a/doc/rtd/topics/datasources/lxd.rst
+++ /dev/null
@@ -1,110 +0,0 @@
-.. _datasource_lxd:
-
-LXD
-===
-
-The data source ``LXD`` allows the user to provide custom user-data,
-vendor-data, meta-data and network-config to the instance without running
-a network service (or even without having a network at all). This datasource
-performs HTTP GETs against the `LXD socket device`_ which is provided to each
-running LXD container and VM as ``/dev/lxd/sock`` and represents all
-instance-metadata as versioned HTTP routes such as:
-
- - 1.0/meta-data
- - 1.0/config/user.meta-data
- - 1.0/config/user.vendor-data
- - 1.0/config/user.user-data
- - 1.0/config/user.<any-custom-key>
-
-The LXD socket device ``/dev/lxd/sock`` is only present on containers and VMs
-when the instance configuration has ``security.devlxd=true`` (default).
-Disabling ``security.devlxd`` configuration setting at initial launch will
-ensure that cloud-init uses the :ref:`datasource_nocloud` datasource.
-Disabling ``security.devlxd`` over the life of the container will result in
-warnings from cloud-init and cloud-init will keep the originally detected LXD
-datasource.
-
-The LXD datasource is detected as viable by ``ds-identify`` during systemd
-generator time when either ``/dev/lxd/sock`` exists or
-``/sys/class/dmi/id/board_name`` matches "LXD".
-
-The LXD datasource provides cloud-init the ability to react to meta-data,
-vendor-data, user-data and network-config changes and render the updated
-configuration across a system reboot.
-
-To modify what meta-data, vendor-data or user-data are provided to the
-launched container, use either LXD profiles or
-``lxc launch ... -c <key>="<value>"`` at initial container launch setting one
-of the following keys:
-
- - user.meta-data: YAML metadata which will be appended to base meta-data
- - user.vendor-data: YAML which overrides any meta-data values
- - user.network-config: YAML representing either :ref:`network_config_v1` or
- :ref:`network_config_v2` format
- - user.user-data: YAML which takes preference and overrides both meta-data
- and vendor-data values
- - user.any-key: Custom user configuration key and value pairs can be passed to
- cloud-init. Those keys/values will be present in instance-data which can be
- used by both `#template: jinja` #cloud-config templates and
- the `cloud-init query` command.
-
-Note: LXD version 4.22 introduced a new scope of config keys prefaced by
-``cloud-init.`` which are preferred above the related ``user.*`` keys:
-
- - cloud-init.meta-data
- - cloud-init.vendor-data
- - cloud-init.network-config
- - cloud-init.user-data
-
-
-By default, network configuration from this datasource will be:
-
-.. code:: yaml
-
- version: 1
- config:
- - type: physical
- name: eth0
- subnets:
- - type: dhcp
- control: auto
-
-This datasource is intended to replace :ref:`datasource_nocloud`
-datasource for LXD instances with a more direct support for LXD APIs instead
-of static NoCloud seed files.
-
-.. _LXD socket device: https://linuxcontainers.org/lxd/docs/master/dev-lxd
-.. vi: textwidth=79
-
-Hotplug
--------
-
-Network hotplug functionality is supported for the LXD datasource as described
-in the :ref:`events` documentation. As hotplug functionality relies on the
-cloud provided network metadata, the LXD datasource will only meaningfully
-react to a hotplug event if it has the configuration necessary to respond to
-the change has been provided to LXD. Practically, this means that
-even with hotplug enabled, **the default behavior for adding a new virtual
-NIC will result no change**.
-
-To update the configuration to be used by hotplug, first pass the network
-configuration via the ``cloud-init.network-config`` (or
-``user.network-config`` on older versions).
-
-For example, given an LXD instance named ``my-lxd`` with hotplug enabled and
-an LXD bridge named ``my-bridge``, the following will allow for additional
-DHCP configuration of ``eth1``:
-
-.. code-block:: shell-session
-
- $ cat /tmp/cloud-network-config.yaml
- version: 2
- ethernets:
- eth0:
- dhcp4: true
- eth1:
- dhcp4: true
-
- $ lxc config set my-lxd cloud-init.network-config="$(cat /tmp/cloud-network-config.yaml)"
- $ lxc config device add my-lxd eth1 nic name=eth1 nictype=bridged parent=my-bridge
- Device eth1 added to my-lxd
diff --git a/doc/rtd/topics/datasources/nocloud.rst b/doc/rtd/topics/datasources/nocloud.rst
deleted file mode 100644
index aedc0f58..00000000
--- a/doc/rtd/topics/datasources/nocloud.rst
+++ /dev/null
@@ -1,161 +0,0 @@
-.. _datasource_nocloud:
-
-NoCloud
-=======
-
-The data source ``NoCloud`` allows the user to provide user-data and meta-data
-to the instance without running a network service (or even without having a
-network at all).
-
-You can provide meta-data and user-data to a local vm boot via files on a
-`vfat`_ or `iso9660`_ filesystem. The filesystem volume label must be
-``cidata`` or ``CIDATA``.
-
-Alternatively, you can provide meta-data via kernel command line or SMBIOS
-"serial number" option. The data must be passed in the form of a string:
-
-::
-
- ds=nocloud[;key=val;key=val]
-
-or
-
-::
-
- ds=nocloud-net[;key=val;key=val]
-
-The permitted keys are:
-
-- ``h`` or ``local-hostname``
-- ``i`` or ``instance-id``
-- ``s`` or ``seedfrom``
-
-With ``ds=nocloud``, the ``seedfrom`` value must start with ``/`` or
-``file://``. With ``ds=nocloud-net``, the ``seedfrom`` value must start
-with ``http://`` or ``https://``.
-
-e.g. you can pass this option to QEMU:
-
-::
-
- -smbios type=1,serial=ds=nocloud-net;s=http://10.10.0.1:8000/
-
-to cause NoCloud to fetch the full meta-data from http://10.10.0.1:8000/meta-data
-after the network initialization is complete.
-
-These user-data and meta-data files are expected to be in the following format.
-
-::
-
- /user-data
- /meta-data
-
-Both files are required to be present for it to be considered a valid seed ISO.
-
-Basically, user-data is simply user-data and meta-data is a YAML formatted file
-representing what you'd find in the EC2 metadata service.
-
-You may also optionally provide a vendor-data file in the following format.
-
-::
-
- /vendor-data
-
-Given a disk ubuntu cloud image in 'disk.img', you can create a
-sufficient disk by following the example below.
-
-::
-
- ## 1) create user-data and meta-data files that will be used
- ## to modify image on first boot
- $ echo -e "instance-id: iid-local01\nlocal-hostname: cloudimg" > meta-data
- $ echo -e "#cloud-config\npassword: passw0rd\nchpasswd: { expire: False }\nssh_pwauth: True\n" > user-data
-
- ## 2a) create a disk to attach with some user-data and meta-data
- $ genisoimage -output seed.iso -volid cidata -joliet -rock user-data meta-data
-
- ## 2b) alternatively, create a vfat filesystem with same files
- ## $ truncate --size 2M seed.iso
- ## $ mkfs.vfat -n cidata seed.iso
-
- ## 2b) option 1: mount and copy files
- ## $ sudo mount -t vfat seed.iso /mnt
- ## $ sudo cp user-data meta-data /mnt
- ## $ sudo umount /mnt
-
- ## 2b) option 2: the mtools package provides mcopy, which can access vfat
- ## filesystems without mounting them
- ## $ mcopy -oi seed.iso user-data meta-data
-
- ## 3) create a new qcow image to boot, backed by your original image
- $ qemu-img create -f qcow2 -b disk.img -F qcow2 boot-disk.img
-
- ## 4) boot the image and login as 'ubuntu' with password 'passw0rd'
- ## note, passw0rd was set as password through the user-data above,
- ## there is no password set on these images.
- $ kvm -m 256 \
- -net nic -net user,hostfwd=tcp::2222-:22 \
- -drive file=boot-disk.img,if=virtio \
- -drive driver=raw,file=seed.iso,if=virtio
-
-**Note:** that the instance-id provided (``iid-local01`` above) is what is used
-to determine if this is "first boot". So if you are making updates to
-user-data you will also have to change that, or start the disk fresh.
-
-Also, you can inject an ``/etc/network/interfaces`` file by providing the
-content for that file in the ``network-interfaces`` field of metadata.
-
-Example metadata:
-
-::
-
- instance-id: iid-abcdefg
- network-interfaces: |
- iface eth0 inet static
- address 192.168.1.10
- network 192.168.1.0
- netmask 255.255.255.0
- broadcast 192.168.1.255
- gateway 192.168.1.254
- hostname: myhost
-
-
-Network configuration can also be provided to cloud-init in either
-:ref:`network_config_v1` or :ref:`network_config_v2` by providing that
-YAML formatted data in a file named ``network-config``. If found,
-this file will override a ``network-interfaces`` file.
-
-See an example below. Note specifically that this file does not
-have a top level ``network`` key as it is already assumed to
-be network configuration based on the filename.
-
-.. code:: yaml
-
- version: 1
- config:
- - type: physical
- name: interface0
- mac_address: "52:54:00:12:34:00"
- subnets:
- - type: static
- address: 192.168.1.10
- netmask: 255.255.255.0
- gateway: 192.168.1.254
-
-
-.. code:: yaml
-
- version: 2
- ethernets:
- interface0:
- match:
- macaddress: "52:54:00:12:34:00"
- set-name: interface0
- addresses:
- - 192.168.1.10/255.255.255.0
- gateway4: 192.168.1.254
-
-
-.. _iso9660: https://en.wikipedia.org/wiki/ISO_9660
-.. _vfat: https://en.wikipedia.org/wiki/File_Allocation_Table
-.. vi: textwidth=79
diff --git a/doc/rtd/topics/datasources/nwcs.rst b/doc/rtd/topics/datasources/nwcs.rst
deleted file mode 100644
index 2b6543d3..00000000
--- a/doc/rtd/topics/datasources/nwcs.rst
+++ /dev/null
@@ -1,30 +0,0 @@
-.. _datasource_nwcs:
-
-NWCS
-=====
-
-The NWCS datasource retrieves basic configuration values from the locally
-accessible metadata service. All data is served over HTTP from the address
-169.254.169.254.
-
-Configuration
--------------
-
-NWCS' datasource can be configured as follows:
-
- datasource:
- NWCS:
- url: 'http://169.254.169.254'
- retries: 3
- timeout: 2
- wait: 2
-
-- *url*: The URL used to acquire the metadata configuration from
-- *retries*: Determines the number of times to attempt to connect to the
- metadata service
-- *timeout*: Determines the timeout in seconds to wait for a response from the
- metadata service
-- *wait*: Determines the timeout in seconds to wait before retrying after
- accessible failure
-
-.. vi: textwidth=79
diff --git a/doc/rtd/topics/datasources/openstack.rst b/doc/rtd/topics/datasources/openstack.rst
deleted file mode 100644
index 7818507a..00000000
--- a/doc/rtd/topics/datasources/openstack.rst
+++ /dev/null
@@ -1,93 +0,0 @@
-.. _datasource_openstack:
-
-OpenStack
-=========
-
-This datasource supports reading data from the
-`OpenStack Metadata Service
-<https://docs.openstack.org/nova/latest/admin/metadata-service.html>`_.
-
-Discovery
--------------
-To determine whether a platform looks like it may be OpenStack, cloud-init
-checks the following environment attributes as a potential OpenStack platform:
-
- * Maybe OpenStack if:
-
- * **non-x86 cpu architecture**: because DMI data is buggy on some arches
- * Is OpenStack **if x86 architecture and ANY** of the following:
-
- * **/proc/1/environ**: Nova-lxd contains *product_name=OpenStack Nova*
- * **DMI product_name**: Either *Openstack Nova* or *OpenStack Compute*
- * **DMI chassis_asset_tag** is *HUAWEICLOUD*, *OpenTelekomCloud*,
- *SAP CCloud VM*, *OpenStack Nova* (since 19.2) or
- *OpenStack Compute* (since 19.2)
-
-Configuration
--------------
-The following configuration can be set for the datasource in system
-configuration (in `/etc/cloud/cloud.cfg` or `/etc/cloud/cloud.cfg.d/`).
-
-The settings that may be configured are:
-
- * **metadata_urls**: This list of urls will be searched for an OpenStack
- metadata service. The first entry that successfully returns a 200 response
- for <url>/openstack will be selected. (default: ['http://169.254.169.254'])
- * **max_wait**: the maximum amount of clock time in seconds that should be
- spent searching metadata_urls. A value less than zero will result in only
- one request being made, to the first in the list. (default: -1)
- * **timeout**: the timeout value provided to urlopen for each individual http
- request. This is used both when selecting a metadata_url and when crawling
- the metadata service. (default: 10)
- * **retries**: The number of retries that should be done for an http request.
- This value is used only after metadata_url is selected. (default: 5)
- * **apply_network_config**: A boolean specifying whether to configure the
- network for the instance based on network_data.json provided by the
- metadata service. When False, only configure dhcp on the primary nic for
- this instances. (default: True)
-
-An example configuration with the default values is provided below:
-
-.. sourcecode:: yaml
-
- datasource:
- OpenStack:
- metadata_urls: ["http://169.254.169.254"]
- max_wait: -1
- timeout: 10
- retries: 5
- apply_network_config: True
-
-
-Vendor Data
------------
-
-The OpenStack metadata server can be configured to serve up vendor data
-which is available to all instances for consumption. OpenStack vendor
-data is, generally, a JSON object.
-
-cloud-init will look for configuration in the ``cloud-init`` attribute
-of the vendor data JSON object. cloud-init processes this configuration
-using the same handlers as user data, so any formats that work for user
-data should work for vendor data.
-
-For example, configuring the following as vendor data in OpenStack would
-upgrade packages and install ``htop`` on all instances:
-
-.. sourcecode:: json
-
- {"cloud-init": "#cloud-config\npackage_upgrade: True\npackages:\n - htop"}
-
-For more general information about how cloud-init handles vendor data,
-including how it can be disabled by users on instances, see
-:doc:`/topics/vendordata`.
-
-OpenStack can also be configured to provide 'dynamic vendordata'
-which is provided by the DynamicJSON provider and appears under a
-different metadata path, /vendor_data2.json.
-
-Cloud-init will look for a ``cloud-init`` at the vendor_data2 path; if found,
-settings are applied after (and, hence, overriding) the settings from static
-vendor data. Both sets of vendor data can be overridden by user data.
-
-.. vi: textwidth=79
diff --git a/doc/rtd/topics/datasources/oracle.rst b/doc/rtd/topics/datasources/oracle.rst
deleted file mode 100644
index 7e480021..00000000
--- a/doc/rtd/topics/datasources/oracle.rst
+++ /dev/null
@@ -1,49 +0,0 @@
-.. _datasource_oracle:
-
-Oracle
-======
-
-This datasource reads metadata, vendor-data and user-data from
-`Oracle Compute Infrastructure`_ (OCI).
-
-Oracle Platform
----------------
-OCI provides bare metal and virtual machines. In both cases,
-the platform identifies itself via DMI data in the chassis asset tag
-with the string 'OracleCloud.com'.
-
-Oracle's platform provides a metadata service that mimics the 2013-10-17
-version of OpenStack metadata service. Initially support for Oracle
-was done via the OpenStack datasource.
-
-Cloud-init has a specific datasource for Oracle in order to:
- a. allow and support future growth of the OCI platform.
- b. address small differences between OpenStack and Oracle metadata
- implementation.
-
-
-Configuration
--------------
-
-The following configuration can be set for the datasource in system
-configuration (in ``/etc/cloud/cloud.cfg`` or ``/etc/cloud/cloud.cfg.d/``).
-
-The settings that may be configured are:
-
-* **configure_secondary_nics**: A boolean, defaulting to False. If set
- to True on an OCI Virtual Machine, cloud-init will fetch networking
- metadata from Oracle's IMDS and use it to configure the non-primary
- network interface controllers in the system. If set to True on an
- OCI Bare Metal Machine, it will have no effect (though this may
- change in the future).
-
-An example configuration with the default values is provided below:
-
-.. sourcecode:: yaml
-
- datasource:
- Oracle:
- configure_secondary_nics: false
-
-.. _Oracle Compute Infrastructure: https://cloud.oracle.com/
-.. vi: textwidth=79
diff --git a/doc/rtd/topics/datasources/ovf.rst b/doc/rtd/topics/datasources/ovf.rst
deleted file mode 100644
index d6eb75da..00000000
--- a/doc/rtd/topics/datasources/ovf.rst
+++ /dev/null
@@ -1,46 +0,0 @@
-.. _datasource_ovf:
-
-OVF
-===
-
-The OVF Datasource provides a datasource for reading data from
-on an `Open Virtualization Format
-<https://en.wikipedia.org/wiki/Open_Virtualization_Format>`_ ISO
-transport.
-
-For further information see a full working example in cloud-init's
-source code tree in doc/sources/ovf
-
-Configuration
--------------
-The following configuration can be set for the datasource in system
-configuration (in `/etc/cloud/cloud.cfg` or `/etc/cloud/cloud.cfg.d/`).
-
-The settings that may be configured are:
-
- * disable_vmware_customization: disable or enable the vmware customization
- based on vmware customization files. (default: True)
- * allow_raw_data: enable or disable the vmware customization based on raw
- cloud-init data including metadata and userdata. (default: True)
- * vmware_cust_file_max_wait: the maximum amount of clock time in seconds that
- should be spent waiting for vmware customization files. (default: 15)
-
-
-On VMware platforms, VMTools use is required for OVF datasource configuration
-settings as well as vCloud and vSphere admin configuration. User could change
-the VMTools configuration options with command::
-
- vmware-toolbox-cmd config set <section> <key> <value>
-
-The following VMTools configuration options affect cloud-init's behavior on a booted VM:
- * a: [deploypkg] enable-custom-scripts
- If this option is absent in VMTools configuration, the custom script is
- disabled by default for security reasons. Some VMware products could
- change this default behavior (for example: enabled by default) via
- customization specification settings.
-
-VMWare admin can refer to (https://github.com/canonical/cloud-init/blob/main/cloudinit/sources/helpers/vmware/imc/config.py) and set the customization specification settings.
-
-For more information, see `VMware vSphere Product Documentation <https://docs.vmware.com/en/VMware-vSphere/7.0/com.vmware.vsphere.vm_admin.doc/GUID-9A5093A5-C54F-4502-941B-3F9C0F573A39.html>`_ and specific VMTools parameters consumed.
-
-.. vi: textwidth=79
diff --git a/doc/rtd/topics/datasources/rbxcloud.rst b/doc/rtd/topics/datasources/rbxcloud.rst
deleted file mode 100644
index cbbf6900..00000000
--- a/doc/rtd/topics/datasources/rbxcloud.rst
+++ /dev/null
@@ -1,23 +0,0 @@
-.. _datasource_rbx:
-
-Rbx Cloud
-=========
-
-The Rbx datasource consumes the metadata drive available on platform
-`HyperOne`_ and `Rootbox`_ platform.
-
-Datasource supports, in particular, network configurations, hostname,
-user accounts and user metadata.
-
-Metadata drive
---------------
-
-Drive metadata is a `FAT`_-formatted partition with the ```CLOUDMD``` or
-```cloudmd``` label on the system disk. Its contents are refreshed each time
-the virtual machine is restarted, if the partition exists. For more information
-see `HyperOne Virtual Machine docs`_.
-
-.. _HyperOne: http://www.hyperone.com/
-.. _Rootbox: https://rootbox.com/
-.. _HyperOne Virtual Machine docs: http://www.hyperone.com/
-.. _FAT: https://en.wikipedia.org/wiki/File_Allocation_Table
diff --git a/doc/rtd/topics/datasources/smartos.rst b/doc/rtd/topics/datasources/smartos.rst
deleted file mode 100644
index 6fe45c73..00000000
--- a/doc/rtd/topics/datasources/smartos.rst
+++ /dev/null
@@ -1,170 +0,0 @@
-.. _datasource_smartos:
-
-SmartOS Datasource
-==================
-
-This datasource finds metadata and user-data from the SmartOS virtualization
-platform (i.e. Joyent).
-
-Please see http://smartos.org/ for information about SmartOS.
-
-SmartOS Platform
-----------------
-The SmartOS virtualization platform uses meta-data to the instance via the
-second serial console. On Linux, this is /dev/ttyS1. The data is a provided
-via a simple protocol: something queries for the data, the console responds
-with the status and if "SUCCESS" returns until a single ".\n".
-
-New versions of the SmartOS tooling will include support for base64 encoded
-data.
-
-Meta-data channels
-------------------
-
-Cloud-init supports three modes of delivering user/meta-data via the flexible
-channels of SmartOS.
-
-* user-data is written to /var/db/user-data
-
- - per the spec, user-data is for consumption by the end-user, not
- provisioning tools
- - cloud-init entirely ignores this channel other than writing it to disk
- - removal of the meta-data key means that /var/db/user-data gets removed
- - a backup of previous meta-data is maintained as
- /var/db/user-data.<timestamp>. <timestamp> is the epoch time when
- cloud-init ran
-
-* user-script is written to /var/lib/cloud/scripts/per-boot/99_user_data
-
- - this is executed each boot
- - a link is created to /var/db/user-script
- - previous versions of the user-script is written to
- /var/lib/cloud/scripts/per-boot.backup/99_user_script.<timestamp>.
- - <timestamp> is the epoch time when cloud-init ran.
- - when the 'user-script' meta-data key goes missing, the user-script is
- removed from the file system, although a backup is maintained.
- - if the script does not start with a shebang (i.e. starts with
- #!<executable>), then or is not an executable, cloud-init will add a
- shebang of "#!/bin/bash"
-
-* cloud-init:user-data is treated like on other Clouds.
-
- - this channel is used for delivering _all_ cloud-init instructions
- - scripts delivered over this channel must be well formed (i.e. must have
- a shebang)
-
-Cloud-init supports reading the traditional meta-data fields supported by the
-SmartOS tools. These are:
-
- * root_authorized_keys
- * hostname
- * enable_motd_sys_info
- * iptables_disable
-
-Note: At this time iptables_disable and enable_motd_sys_info are read but
- are not actioned.
-
-Disabling user-script
----------------------
-
-Cloud-init uses the per-boot script functionality to handle the execution
-of the user-script. If you want to prevent this use a cloud-config of:
-
-.. code:: yaml
-
- #cloud-config
- cloud_final_modules:
- - scripts-per-once
- - scripts-per-instance
- - scripts-user
- - ssh-authkey-fingerprints
- - keys-to-console
- - phone-home
- - final-message
- - power-state-change
-
-Alternatively you can use the json patch method
-
-.. code:: yaml
-
- #cloud-config-jsonp
- [
- { "op": "replace",
- "path": "/cloud_final_modules",
- "value": ["scripts-per-once",
- "scripts-per-instance",
- "scripts-user",
- "ssh-authkey-fingerprints",
- "keys-to-console",
- "phone-home",
- "final-message",
- "power-state-change"]
- }
- ]
-
-The default cloud-config includes "script-per-boot". Cloud-init will still
-ingest and write the user-data but will not execute it, when you disable
-the per-boot script handling.
-
-Note: Unless you have an explicit use-case, it is recommended that you not
- disable the per-boot script execution, especially if you are using
- any of the life-cycle management features of SmartOS.
-
-The cloud-config needs to be delivered over the cloud-init:user-data channel
-in order for cloud-init to ingest it.
-
-base64
-------
-
-The following are exempt from base64 encoding, owing to the fact that they
-are provided by SmartOS:
-
- * root_authorized_keys
- * enable_motd_sys_info
- * iptables_disable
- * user-data
- * user-script
-
-This list can be changed through
-:ref:`datasource base configuration<datasource_base_config>` variable
-'no_base64_decode'.
-
-This means that user-script and user-data as well as other values can be
-base64 encoded. Since Cloud-init can only guess as to whether or not something
-is truly base64 encoded, the following meta-data keys are hints as to whether
-or not to base64 decode something:
-
- * base64_all: Except for excluded keys, attempt to base64 decode
- the values. If the value fails to decode properly, it will be
- returned in its text
- * base64_keys: A comma delimited list of which keys are base64 encoded.
- * b64-<key>:
- for any key, if there exists an entry in the metadata for 'b64-<key>'
- Then 'b64-<key>' is expected to be a plaintext boolean indicating whether
- or not its value is encoded.
- * no_base64_decode: This is a configuration setting
- (i.e. /etc/cloud/cloud.cfg.d) that sets which values should not be
- base64 decoded.
-
-disk_aliases and ephemeral disk
--------------------------------
-By default, SmartOS only supports a single ephemeral disk. That disk is
-completely empty (un-partitioned with no filesystem).
-
-The SmartOS datasource has built-in cloud-config which instructs the
-'disk_setup' module to partition and format the ephemeral disk.
-
-You can control the disk_setup then in 2 ways:
- 1. through the datasource config, you can change the 'alias' of
- ephermeral0 to reference another device. The default is:
-
- 'disk_aliases': {'ephemeral0': '/dev/vdb'},
-
- Which means anywhere disk_setup sees a device named 'ephemeral0'
- then /dev/vdb will be substituted.
- 2. you can provide disk_setup or fs_setup data in user-data to overwrite
- the datasource's built-in values.
-
-See doc/examples/cloud-config-disk-setup.txt for information on disk_setup.
-
-.. vi: textwidth=79
diff --git a/doc/rtd/topics/datasources/upcloud.rst b/doc/rtd/topics/datasources/upcloud.rst
deleted file mode 100644
index 75f438ee..00000000
--- a/doc/rtd/topics/datasources/upcloud.rst
+++ /dev/null
@@ -1,24 +0,0 @@
-.. _datasource_upcloud:
-
-UpCloud
-=============
-
-The `UpCloud`_ datasource consumes information from UpCloud's `metadata
-service`_. This metadata service serves information about the
-running server via HTTP over the address 169.254.169.254 available in every
-DHCP-configured interface. The metadata API endpoints are fully described in
-UpCloud API documentation at
-`https://developers.upcloud.com/1.3/8-servers/#metadata-service
-<https://developers.upcloud.com/1.3/8-servers/#metadata-service>`_.
-
-Providing user-data
--------------------
-
-When creating a server, user-data is provided by specifying it as `user_data`
-in the API or via the server creation tool in the control panel. User-data is
-immutable during server's lifetime and can be removed by deleting the server.
-
-.. _UpCloud: https://upcloud.com/
-.. _metadata service: https://upcloud.com/community/tutorials/upcloud-metadata-service/
-
-.. vi: textwidth=79
diff --git a/doc/rtd/topics/datasources/vmware.rst b/doc/rtd/topics/datasources/vmware.rst
deleted file mode 100644
index de3de6af..00000000
--- a/doc/rtd/topics/datasources/vmware.rst
+++ /dev/null
@@ -1,358 +0,0 @@
-.. _datasource_vmware:
-
-VMware
-======
-
-This datasource is for use with systems running on a VMware platform such as
-vSphere and currently supports the following data transports:
-
-
-* `GuestInfo <https://github.com/vmware/govmomi/blob/master/govc/USAGE.md>`_ keys
-
-Configuration
--------------
-
-The configuration method is dependent upon the transport:
-
-GuestInfo Keys
-^^^^^^^^^^^^^^
-
-One method of providing meta, user, and vendor data is by setting the following
-key/value pairs on a VM's ``extraConfig`` `property <https://vdc-repo.vmware.com/vmwb-repository/dcr-public/723e7f8b-4f21-448b-a830-5f22fd931b01/5a8257bd-7f41-4423-9a73-03307535bd42/doc/vim.vm.ConfigInfo.html>`_:
-
-.. list-table::
- :header-rows: 1
-
- * - Property
- - Description
- * - ``guestinfo.metadata``
- - A YAML or JSON document containing the cloud-init metadata.
- * - ``guestinfo.metadata.encoding``
- - The encoding type for ``guestinfo.metadata``.
- * - ``guestinfo.userdata``
- - A YAML document containing the cloud-init user data.
- * - ``guestinfo.userdata.encoding``
- - The encoding type for ``guestinfo.userdata``.
- * - ``guestinfo.vendordata``
- - A YAML document containing the cloud-init vendor data.
- * - ``guestinfo.vendordata.encoding``
- - The encoding type for ``guestinfo.vendordata``.
-
-
-All ``guestinfo.*.encoding`` values may be set to ``base64`` or
-``gzip+base64``.
-
-Features
---------
-
-This section reviews several features available in this datasource, regardless
-of how the meta, user, and vendor data was discovered.
-
-Instance data and lazy networks
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-One of the hallmarks of cloud-init is `its use of instance-data and JINJA
-queries <../instancedata.html#using-instance-data>`_
--- the ability to write queries in user and vendor data that reference runtime
-information present in ``/run/cloud-init/instance-data.json``. This works well
-when the metadata provides all of the information up front, such as the network
-configuration. For systems that rely on DHCP, however, this information may not
-be available when the metadata is persisted to disk.
-
-This datasource ensures that even if the instance is using DHCP to configure
-networking, the same details about the configured network are available in
-``/run/cloud-init/instance-data.json`` as if static networking was used. This
-information collected at runtime is easy to demonstrate by executing the
-datasource on the command line. From the root of this repository, run the
-following command:
-
-.. code-block:: bash
-
- PYTHONPATH="$(pwd)" python3 cloudinit/sources/DataSourceVMware.py
-
-The above command will result in output similar to the below JSON:
-
-.. code-block:: json
-
- {
- "hostname": "akutz.localhost",
- "local-hostname": "akutz.localhost",
- "local-ipv4": "192.168.0.188",
- "local_hostname": "akutz.localhost",
- "network": {
- "config": {
- "dhcp": true
- },
- "interfaces": {
- "by-ipv4": {
- "172.0.0.2": {
- "netmask": "255.255.255.255",
- "peer": "172.0.0.2"
- },
- "192.168.0.188": {
- "broadcast": "192.168.0.255",
- "mac": "64:4b:f0:18:9a:21",
- "netmask": "255.255.255.0"
- }
- },
- "by-ipv6": {
- "fd8e:d25e:c5b6:1:1f5:b2fd:8973:22f2": {
- "flags": 208,
- "mac": "64:4b:f0:18:9a:21",
- "netmask": "ffff:ffff:ffff:ffff::/64"
- }
- },
- "by-mac": {
- "64:4b:f0:18:9a:21": {
- "ipv4": [
- {
- "addr": "192.168.0.188",
- "broadcast": "192.168.0.255",
- "netmask": "255.255.255.0"
- }
- ],
- "ipv6": [
- {
- "addr": "fd8e:d25e:c5b6:1:1f5:b2fd:8973:22f2",
- "flags": 208,
- "netmask": "ffff:ffff:ffff:ffff::/64"
- }
- ]
- },
- "ac:de:48:00:11:22": {
- "ipv6": []
- }
- }
- }
- },
- "wait-on-network": {
- "ipv4": true,
- "ipv6": "false"
- }
- }
-
-
-Redacting sensitive information
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-Sometimes the cloud-init userdata might contain sensitive information, and it
-may be desirable to have the ``guestinfo.userdata`` key (or other guestinfo
-keys) redacted as soon as its data is read by the datasource. This is possible
-by adding the following to the metadata:
-
-.. code-block:: yaml
-
- redact: # formerly named cleanup-guestinfo, which will also work
- - userdata
- - vendordata
-
-When the above snippet is added to the metadata, the datasource will iterate
-over the elements in the ``redact`` array and clear each of the keys. For
-example, when the guestinfo transport is used, the above snippet will cause
-the following commands to be executed:
-
-.. code-block:: shell
-
- vmware-rpctool "info-set guestinfo.userdata ---"
- vmware-rpctool "info-set guestinfo.userdata.encoding "
- vmware-rpctool "info-set guestinfo.vendordata ---"
- vmware-rpctool "info-set guestinfo.vendordata.encoding "
-
-Please note that keys are set to the valid YAML string ``---`` as it is not
-possible remove an existing key from the guestinfo key-space. A key's analogous
-encoding property will be set to a single white-space character, causing the
-datasource to treat the actual key value as plain-text, thereby loading it as
-an empty YAML doc (hence the aforementioned ``---``\ ).
-
-Reading the local IP addresses
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-This datasource automatically discovers the local IPv4 and IPv6 addresses for
-a guest operating system based on the default routes. However, when inspecting
-a VM externally, it's not possible to know what the *default* IP address is for
-the guest OS. That's why this datasource sets the discovered, local IPv4 and
-IPv6 addresses back in the guestinfo namespace as the following keys:
-
-
-* ``guestinfo.local-ipv4``
-* ``guestinfo.local-ipv6``
-
-It is possible that a host may not have any default, local IP addresses. It's
-also possible the reported, local addresses are link-local addresses. But these
-two keys may be used to discover what this datasource determined were the local
-IPv4 and IPv6 addresses for a host.
-
-Waiting on the network
-^^^^^^^^^^^^^^^^^^^^^^
-
-Sometimes cloud-init may bring up the network, but it will not finish coming
-online before the datasource's ``setup`` function is called, resulting in an
-``/var/run/cloud-init/instance-data.json`` file that does not have the correct
-network information. It is possible to instruct the datasource to wait until an
-IPv4 or IPv6 address is available before writing the instance data with the
-following metadata properties:
-
-.. code-block:: yaml
-
- wait-on-network:
- ipv4: true
- ipv6: true
-
-If either of the above values are true, then the datasource will sleep for a
-second, check the network status, and repeat until one or both addresses from
-the specified families are available.
-
-Walkthrough
------------
-
-The following series of steps is a demonstration on how to configure a VM with
-this datasource:
-
-
-#. Create the metadata file for the VM. Save the following YAML to a file named
- ``metadata.yaml``\ :
-
- .. code-block:: yaml
-
- instance-id: cloud-vm
- local-hostname: cloud-vm
- network:
- version: 2
- ethernets:
- nics:
- match:
- name: ens*
- dhcp4: yes
-
-#. Create the userdata file ``userdata.yaml``\ :
-
- .. code-block:: yaml
-
- #cloud-config
-
- users:
- - default
- - name: akutz
- primary_group: akutz
- sudo: ALL=(ALL) NOPASSWD:ALL
- groups: sudo, wheel
- lock_passwd: true
- ssh_authorized_keys:
- - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDE0c5FczvcGSh/tG4iw+Fhfi/O5/EvUM/96js65tly4++YTXK1d9jcznPS5ruDlbIZ30oveCBd3kT8LLVFwzh6hepYTf0YmCTpF4eDunyqmpCXDvVscQYRXyasEm5olGmVe05RrCJSeSShAeptv4ueIn40kZKOghinGWLDSZG4+FFfgrmcMCpx5YSCtX2gvnEYZJr0czt4rxOZuuP7PkJKgC/mt2PcPjooeX00vAj81jjU2f3XKrjjz2u2+KIt9eba+vOQ6HiC8c2IzRkUAJ5i1atLy8RIbejo23+0P4N2jjk17QySFOVHwPBDTYb0/0M/4ideeU74EN/CgVsvO6JrLsPBR4dojkV5qNbMNxIVv5cUwIy2ThlLgqpNCeFIDLCWNZEFKlEuNeSQ2mPtIO7ETxEL2Cz5y/7AIuildzYMc6wi2bofRC8HmQ7rMXRWdwLKWsR0L7SKjHblIwarxOGqLnUI+k2E71YoP7SZSlxaKi17pqkr0OMCF+kKqvcvHAQuwGqyumTEWOlH6TCx1dSPrW+pVCZSHSJtSTfDW2uzL6y8k10MT06+pVunSrWo5LHAXcS91htHV1M1UrH/tZKSpjYtjMb5+RonfhaFRNzvj7cCE1f3Kp8UVqAdcGBTtReoE8eRUT63qIxjw03a7VwAyB2w+9cu1R9/vAo8SBeRqw== sakutz@gmail.com
-
-#. Please note this step requires that the VM be powered off. All of the
- commands below use the VMware CLI tool, `govc <https://github.com/vmware/govmomi/blob/master/govc>`_.
-
- Go ahead and assign the path to the VM to the environment variable ``VM``\ :
-
- .. code-block:: shell
-
- export VM="/inventory/path/to/the/vm"
-
-#. Power off the VM:
-
- .. raw:: html
-
- <hr />
-
- &#x26a0;&#xfe0f; <strong>First Boot Mode</strong>
-
- To ensure the next power-on operation results in a first-boot scenario for
- cloud-init, it may be necessary to run the following command just before
- powering off the VM:
-
- .. code-block:: bash
-
- cloud-init clean --logs --machine-id
-
- Otherwise cloud-init may not run in first-boot mode. For more information
- on how the boot mode is determined, please see the
- `First Boot Documentation <../boot.html#first-boot-determination>`_.
-
- .. raw:: html
-
- <hr />
-
- .. code-block:: shell
-
- govc vm.power -off "${VM}"
-
-#.
- Export the environment variables that contain the cloud-init metadata and
- userdata:
-
- .. code-block:: shell
-
- export METADATA=$(gzip -c9 <metadata.yaml | { base64 -w0 2>/dev/null || base64; }) \
- USERDATA=$(gzip -c9 <userdata.yaml | { base64 -w0 2>/dev/null || base64; })
-
-#.
- Assign the metadata and userdata to the VM:
-
- .. code-block:: shell
-
- govc vm.change -vm "${VM}" \
- -e guestinfo.metadata="${METADATA}" \
- -e guestinfo.metadata.encoding="gzip+base64" \
- -e guestinfo.userdata="${USERDATA}" \
- -e guestinfo.userdata.encoding="gzip+base64"
-
- Please note the above commands include specifying the encoding for the
- properties. This is important as it informs the datasource how to decode
- the data for cloud-init. Valid values for ``metadata.encoding`` and
- ``userdata.encoding`` include:
-
-
- * ``base64``
- * ``gzip+base64``
-
-#.
- Power on the VM:
-
- .. code-block:: shell
-
- govc vm.power -on "${VM}"
-
-If all went according to plan, the CentOS box is:
-
-* Locked down, allowing SSH access only for the user in the userdata
-* Configured for a dynamic IP address via DHCP
-* Has a hostname of ``cloud-vm``
-
-Examples
---------
-
-This section reviews common configurations:
-
-Setting the hostname
-^^^^^^^^^^^^^^^^^^^^
-
-The hostname is set by way of the metadata key ``local-hostname``.
-
-Setting the instance ID
-^^^^^^^^^^^^^^^^^^^^^^^
-
-The instance ID may be set by way of the metadata key ``instance-id``. However,
-if this value is absent then the instance ID is read from the file
-``/sys/class/dmi/id/product_uuid``.
-
-Providing public SSH keys
-^^^^^^^^^^^^^^^^^^^^^^^^^
-
-The public SSH keys may be set by way of the metadata key ``public-keys-data``.
-Each newline-terminated string will be interpreted as a separate SSH public
-key, which will be placed in distro's default user's
-``~/.ssh/authorized_keys``. If the value is empty or absent, then nothing will
-be written to ``~/.ssh/authorized_keys``.
-
-Configuring the network
-^^^^^^^^^^^^^^^^^^^^^^^
-
-The network is configured by setting the metadata key ``network`` with a value
-consistent with Network Config Versions
-`1 <../network-config-format-v1.html>`_ or
-`2 <../network-config-format-v2.html>`_\ , depending on the Linux
-distro's version of cloud-init.
-
-The metadata key ``network.encoding`` may be used to indicate the format of
-the metadata key "network". Valid encodings are ``base64`` and ``gzip+base64``.
diff --git a/doc/rtd/topics/datasources/vultr.rst b/doc/rtd/topics/datasources/vultr.rst
deleted file mode 100644
index f8601700..00000000
--- a/doc/rtd/topics/datasources/vultr.rst
+++ /dev/null
@@ -1,35 +0,0 @@
-.. _datasource_vultr:
-
-Vultr
-=====
-
-The `Vultr`_ datasource retrieves basic configuration values from the locally
-accessible `metadata service`_. All data is served over HTTP from the address
-169.254.169.254. The endpoints are documented in
-`https://www.vultr.com/metadata/
-<https://www.vultr.com/metadata/>`_
-
-Configuration
--------------
-
-Vultr's datasource can be configured as follows:
-
- datasource:
- Vultr:
- url: 'http://169.254.169.254'
- retries: 3
- timeout: 2
- wait: 2
-
-- *url*: The URL used to acquire the metadata configuration from
-- *retries*: Determines the number of times to attempt to connect to the
- metadata service
-- *timeout*: Determines the timeout in seconds to wait for a response from the
- metadata service
-- *wait*: Determines the timeout in seconds to wait before retrying after
- accessible failure
-
-.. _Vultr: https://www.vultr.com/
-.. _metadata service: https://www.vultr.com/metadata/
-
-.. vi: textwidth=79
diff --git a/doc/rtd/topics/datasources/zstack.rst b/doc/rtd/topics/datasources/zstack.rst
deleted file mode 100644
index 6630ad9f..00000000
--- a/doc/rtd/topics/datasources/zstack.rst
+++ /dev/null
@@ -1,37 +0,0 @@
-.. _datasource_zstack:
-
-ZStack
-======
-ZStack platform provides a AWS Ec2 metadata service, but with different
-datasource identity.
-More information about ZStack can be found at `ZStack <https://www.zstack.io>`__.
-
-Discovery
----------
-To determine whether a vm running on ZStack platform, cloud-init checks DMI
-information by 'dmidecode -s chassis-asset-tag', if the output ends with
-'.zstack.io', it's running on ZStack platform:
-
-
-Metadata
-^^^^^^^^
-Same as EC2, instance metadata can be queried at
-
-::
-
- GET http://169.254.169.254/2009-04-04/meta-data/
- instance-id
- local-hostname
-
-Userdata
-^^^^^^^^
-Same as EC2, instance userdata can be queried at
-
-::
-
- GET http://169.254.169.254/2009-04-04/user-data/
- meta_data.json
- user_data
- password
-
-.. vi: textwidth=79
diff --git a/doc/rtd/topics/debugging.rst b/doc/rtd/topics/debugging.rst
deleted file mode 100644
index 23ef0dfe..00000000
--- a/doc/rtd/topics/debugging.rst
+++ /dev/null
@@ -1,265 +0,0 @@
-********************
-Debugging cloud-init
-********************
-
-Overview
-========
-This topic will discuss general approaches for test and debug of cloud-init on
-deployed instances.
-
-.. _boot_time_analysis:
-
-Boot Time Analysis - cloud-init analyze
-=======================================
-Occasionally instances don't appear as performant as we would like and
-cloud-init packages a simple facility to inspect what operations took
-cloud-init the longest during boot and setup.
-
-The script **/usr/bin/cloud-init** has an analyze sub-command **analyze**
-which parses any cloud-init.log file into formatted and sorted events. It
-allows for detailed analysis of the most costly cloud-init operations are to
-determine the long-pole in cloud-init configuration and setup. These
-subcommands default to reading /var/log/cloud-init.log.
-
-* ``analyze show`` Parse and organize cloud-init.log events by stage and
- include each sub-stage granularity with time delta reports.
-
-.. code-block:: shell-session
-
- $ cloud-init analyze show -i my-cloud-init.log
- -- Boot Record 01 --
- The total time elapsed since completing an event is printed after the "@"
- character.
- The time the event takes is printed after the "+" character.
-
- Starting stage: modules-config
- |`->config-snap_config ran successfully @05.47700s +00.00100s
- |`->config-ssh-import-id ran successfully @05.47800s +00.00200s
- |`->config-locale ran successfully @05.48000s +00.00100s
- ...
-
-
-* ``analyze dump`` Parse cloud-init.log into event records and return a list of
- dictionaries that can be consumed for other reporting needs.
-
-.. code-block:: shell-session
-
- $ cloud-init analyze dump -i my-cloud-init.log
- [
- {
- "description": "running config modules",
- "event_type": "start",
- "name": "modules-config",
- "origin": "cloudinit",
- "timestamp": 1510807493.0
- },...
-
-* ``analyze blame`` Parse cloud-init.log into event records and sort them based
- on highest time cost for quick assessment of areas of cloud-init that may
- need improvement.
-
-.. code-block:: shell-session
-
- $ cloud-init analyze blame -i my-cloud-init.log
- -- Boot Record 11 --
- 00.01300s (modules-final/config-scripts-per-boot)
- 00.00400s (modules-final/config-final-message)
- 00.00100s (modules-final/config-rightscale_userdata)
- ...
-
-* ``analyze boot`` Make subprocess calls to the kernel in order to get relevant
- pre-cloud-init timestamps, such as the kernel start, kernel finish boot, and cloud-init start.
-
-.. code-block:: shell-session
-
- $ cloud-init analyze boot
- -- Most Recent Boot Record --
- Kernel Started at: 2019-06-13 15:59:55.809385
- Kernel ended boot at: 2019-06-13 16:00:00.944740
- Kernel time to boot (seconds): 5.135355
- Cloud-init start: 2019-06-13 16:00:05.738396
- Time between Kernel boot and Cloud-init start (seconds): 4.793656
-
-
-Analyze quickstart - LXC
----------------------------
-To quickly obtain a cloud-init log try using lxc on any ubuntu system:
-
-.. code-block:: shell-session
-
- $ lxc init ubuntu-daily:focal x1
- $ lxc start x1
- $ # Take lxc's cloud-init.log and pipe it to the analyzer
- $ lxc file pull x1/var/log/cloud-init.log - | cloud-init analyze dump -i -
- $ lxc file pull x1/var/log/cloud-init.log - | \
- python3 -m cloudinit.analyze dump -i -
-
-
-Analyze quickstart - KVM
----------------------------
-To quickly analyze a KVM a cloud-init log:
-
-1. Download the current cloud image
-
-.. code-block:: shell-session
-
- $ wget https://cloud-images.ubuntu.com/daily/server/focal/current/focal-server-cloudimg-amd64.img
-
-2. Create a snapshot image to preserve the original cloud-image
-
-.. code-block:: shell-session
-
- $ qemu-img create -b focal-server-cloudimg-amd64.img -f qcow2 \
- test-cloudinit.qcow2
-
-3. Create a seed image with metadata using `cloud-localds`
-
-.. code-block:: shell-session
-
- $ cat > user-data <<EOF
- #cloud-config
- password: passw0rd
- chpasswd: { expire: False }
- EOF
- $ cloud-localds my-seed.img user-data
-
-4. Launch your modified VM
-
-.. code-block:: shell-session
-
- $ kvm -m 512 -net nic -net user -redir tcp:2222::22 \
- -drive file=test-cloudinit.qcow2,if=virtio,format=qcow2 \
- -drive file=my-seed.img,if=virtio,format=raw
-
-5. Analyze the boot (blame, dump, show)
-
-.. code-block:: shell-session
-
- $ ssh -p 2222 ubuntu@localhost 'cat /var/log/cloud-init.log' | \
- cloud-init analyze blame -i -
-
-
-Running single cloud config modules
-===================================
-This subcommand is not called by the init system. It can be called manually to
-load the configured datasource and run a single cloud-config module once using
-the cached userdata and metadata after the instance has booted. Each
-cloud-config module has a module FREQUENCY configured: PER_INSTANCE, PER_BOOT,
-PER_ONCE or PER_ALWAYS. When a module is run by cloud-init, it stores a
-semaphore file in
-``/var/lib/cloud/instance/sem/config_<module_name>.<frequency>`` which marks
-when the module last successfully ran. Presence of this semaphore file
-prevents a module from running again if it has already been run. To ensure that
-a module is run again, the desired frequency can be overridden on the
-commandline:
-
-.. code-block:: shell-session
-
- $ sudo cloud-init single --name cc_ssh --frequency always
- ...
- Generating public/private ed25519 key pair
- ...
-
-Inspect cloud-init.log for output of what operations were performed as a
-result.
-
-.. _proposed_sru_testing:
-
-Stable Release Updates (SRU) testing for cloud-init
-===================================================
-Once an Ubuntu release is stable (i.e. after it is released), updates for it
-must follow a special procedure called a "stable release update" (or `SRU`_).
-
-The cloud-init project has a specific process it follows when validating
-a cloud-init SRU, documented in the `CloudinitUpdates`_ wiki page.
-
-Generally an SRU test of cloud-init performs the following:
-
- * Install a pre-release version of cloud-init from the
- **-proposed** APT pocket (e.g. **bionic-proposed**)
- * Upgrade cloud-init and attempt a clean run of cloud-init to assert the new
- version of cloud-init works properly the specific platform and Ubuntu series
- * Check for tracebacks or errors in behavior
-
-
-Manual SRU verification procedure
----------------------------------
-Below are steps to manually test a pre-release version of cloud-init
-from **-proposed**
-
-.. note::
- For each Ubuntu SRU, the Ubuntu Server team manually validates the new version of cloud-init
- on these platforms: **Amazon EC2, Azure, GCE, OpenStack, Oracle,
- Softlayer (IBM), LXD, KVM**
-
-1. Launch a VM on your favorite platform, providing this cloud-config
- user-data and replacing `<YOUR_LAUNCHPAD_USERNAME>` with your username:
-
-.. code-block:: yaml
-
- ## template: jinja
- #cloud-config
- ssh_import_id: [<YOUR_LAUNCHPAD_USERNAME>]
- hostname: SRU-worked-{{v1.cloud_name}}
-
-2. Wait for current cloud-init to complete, replace `<YOUR_VM_IP>` with the IP
- address of the VM that you launched in step 1:
-
-.. code-block:: bash
-
- CI_VM_IP=<YOUR_VM_IP>
- # Make note of the datasource cloud-init detected in --long output.
- # In step 5, you will use this to confirm the same datasource is detected after upgrade.
- ssh ubuntu@$CI_VM_IP -- cloud-init status --wait --long
-
-3. Set up the **-proposed** pocket on your VM and upgrade to the **-proposed**
- cloud-init:
-
-.. code-block:: bash
-
- # Create a script that will add the -proposed pocket to APT's sources
- # and install cloud-init from that pocket
- cat > setup_proposed.sh <<EOF
- #/bin/bash
- mirror=http://archive.ubuntu.com/ubuntu
- echo deb \$mirror \$(lsb_release -sc)-proposed main | tee \
- /etc/apt/sources.list.d/proposed.list
- apt-get update -q
- apt-get install -qy cloud-init
- EOF
-
- scp setup_proposed.sh ubuntu@$CI_VM_IP:.
- ssh ubuntu@$CI_VM_IP -- sudo bash setup_proposed.sh
-
-4. Change hostname, clean cloud-init's state, and reboot to run cloud-init
- from scratch:
-
-.. code-block:: bash
-
- ssh ubuntu@$CI_VM_IP -- sudo hostname something-else
- ssh ubuntu@$CI_VM_IP -- sudo cloud-init clean --logs --reboot
-
-5. Validate **-proposed** cloud-init came up without error
-
-.. code-block:: bash
-
- # Block until cloud-init completes and verify from --long the datasource
- # from step 1. Errors would show up in --long
-
- ssh ubuntu@$CI_VM_IP -- cloud-init status --wait --long
- # Make sure hostname was set properly to SRU-worked-<cloud name>
- ssh ubuntu@$CI_VM_IP -- hostname
- # Check for any errors or warnings in cloud-init logs.
- # (This should produce no output if successful.)
- ssh ubuntu@$CI_VM_IP -- grep Trace "/var/log/cloud-init*"
-
-6. If you encounter an error during SRU testing:
-
- * Create a `new cloud-init bug`_ reporting the version of cloud-init
- affected
- * Ping upstream cloud-init on Libera's `#cloud-init IRC channel`_
-
-.. _SRU: https://wiki.ubuntu.com/StableReleaseUpdates
-.. _CloudinitUpdates: https://wiki.ubuntu.com/CloudinitUpdates
-.. _new cloud-init bug: https://bugs.launchpad.net/cloud-init/+filebug
-.. _#cloud-init IRC channel: https://kiwiirc.com/nextclient/irc.libera.chat/cloud-init
diff --git a/doc/rtd/topics/docs.rst b/doc/rtd/topics/docs.rst
deleted file mode 100644
index 99c2c140..00000000
--- a/doc/rtd/topics/docs.rst
+++ /dev/null
@@ -1,76 +0,0 @@
-.. _docs:
-
-Docs
-****
-
-These docs are hosted on Read the Docs. The following will explain how to
-contribute to and build these docs locally.
-
-The documentation is primarily written in reStructuredText.
-
-
-Building
-========
-
-There is a makefile target to build the documentation for you:
-
-.. code-block:: shell-session
-
- $ tox -e doc
-
-This will do two things:
-
-- Build the documentation using sphinx
-- Run doc8 against the documentation source code
-
-Once build the HTML files will be viewable in ``doc/rtd_html``. Use your
-web browser to open ``index.html`` to view and navigate the site.
-
-Style Guide
-===========
-
-Headings
---------
-The headings used across the documentation use the following hierarchy:
-
-- ``*****``: used once atop of a new page
-- ``=====``: each sections on the page
-- ``-----``: subsections
-- ``^^^^^``: sub-subsections
-- ``"""""``: paragraphs
-
-The top level header ``######`` is reserved for the first page.
-
-If under and overline are used, their length must be identical. The length of
-the underline must be at least as long as the title itself
-
-Line Length
------------
-Please keep the line lengths to a maximum of **79** characters. This ensures
-that the pages and tables do not get too wide that side scrolling is required.
-
-Header
-------
-Adding a link at the top of the page allows for the page to be referenced by
-other pages. For example for the FAQ page this would be:
-
-.. code-block:: rst
-
- .. _faq:
-
-Vertical Whitespace
--------------------
-One newline between each section helps ensure readability of the documentation
-source code.
-
-Common Words
-------------
-There are some common words that should follow specific usage:
-
-- ``cloud-init``: always lower case with a hyphen, unless starting a sentence
- in which case only the 'C' is capitalized (e.g. ``Cloud-init``).
-- ``metadata``: one word
-- ``user data``: two words, not to be combined
-- ``vendor data``: like user data, it is two words
-
-.. vi: textwidth=79
diff --git a/doc/rtd/topics/events.rst b/doc/rtd/topics/events.rst
deleted file mode 100644
index 1a562fb4..00000000
--- a/doc/rtd/topics/events.rst
+++ /dev/null
@@ -1,95 +0,0 @@
-.. _events:
-
-******************
-Events and Updates
-******************
-
-Events
-======
-
-`Cloud-init`_ will fetch and apply cloud and user data configuration
-upon several event types. The two most common events for cloud-init
-are when an instance first boots and any subsequent boot thereafter (reboot).
-In addition to boot events, cloud-init users and vendors are interested
-in when devices are added. cloud-init currently supports the following
-event types:
-
-- **BOOT_NEW_INSTANCE**: New instance first boot
-- **BOOT**: Any system boot other than 'BOOT_NEW_INSTANCE'
-- **BOOT_LEGACY**: Similar to 'BOOT', but applies networking config twice each
- boot: once during Local stage, then again in Network stage. As this behavior
- was previously the default behavior, this option exists to prevent regressing
- such behavior.
-- **HOTPLUG**: Dynamic add of a system device
-
-Future work will likely include infrastructure and support for the following
-events:
-
-- **METADATA_CHANGE**: An instance's metadata has change
-- **USER_REQUEST**: Directed request to update
-
-Datasource Event Support
-========================
-
-All :ref:`datasources` by default support the ``BOOT_NEW_INSTANCE`` event.
-Each Datasource will declare a set of these events that it is capable of
-handling. Datasources may not support all event types. In some cases a system
-may be configured to allow a particular event but may be running on
-a platform whose datasource cannot support the event.
-
-Configuring Event Updates
-=========================
-
-Update configuration may be specified via user data,
-which can be used to enable or disable handling of specific events.
-This configuration will be honored as long as the events are supported by
-the datasource. However, configuration will always be applied at first
-boot, regardless of the user data specified.
-
-Updates
-~~~~~~~
-Update policy configuration defines which
-events are allowed to be handled. This is separate from whether a
-particular platform or datasource has the capability for such events.
-
-**scope**: *<name of the scope for event policy>*
-
-The ``scope`` value is a string which defines under which domain does the
-event occur. Currently the only one known scope is ``network``, though more
-scopes may be added in the future. Scopes are defined by convention but
-arbitrary values can be used.
-
-**when**: *<list of events to handle for a particular scope>*
-
-Each ``scope`` requires a ``when`` element to specify which events
-are to allowed to be handled.
-
-Hotplug
-=======
-When the hotplug event is supported by the data source and configured in
-user data, cloud-init will respond to the addition or removal of network
-interfaces to the system. In addition to fetching and updating the system
-metadata, cloud-init will also bring up/down the newly added interface.
-
-.. warning:: Due to its use of systemd sockets, hotplug functionality
- is currently incompatible with SELinux. This issue is being tracked
- `on Launchpad`_. Additionally, hotplug support is considered experimental for
- non-Debian based systems.
-
-Examples
-========
-
-apply network config every boot
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-On every boot, apply network configuration found in the datasource.
-
-.. code-block:: shell-session
-
- # apply network config on every boot
- updates:
- network:
- when: ['boot']
-
-.. _Cloud-init: https://launchpad.net/cloud-init
-.. _on Launchpad: https://bugs.launchpad.net/cloud-init/+bug/1936229
-.. vi: textwidth=79
diff --git a/doc/rtd/topics/faq.rst b/doc/rtd/topics/faq.rst
deleted file mode 100644
index 8dae49e9..00000000
--- a/doc/rtd/topics/faq.rst
+++ /dev/null
@@ -1,430 +0,0 @@
-.. _faq:
-
-FAQ
-***
-
-How do I get help?
-==================
-
-Having trouble? We would like to help!
-
-- First go through this page with answers to common questions
-- Use the search bar at the upper left to search these docs
-- Ask a question in the ``#cloud-init`` IRC channel on Libera
-- Join and ask questions on the `cloud-init mailing list <https://launchpad.net/~cloud-init>`_
-- Find a bug? Check out the :ref:`reporting_bugs` topic for
- how to report one
-
-Where are the logs?
-===================
-
-Cloud-init uses two files to log to:
-
-- `/var/log/cloud-init-output.log`: captures the output from each stage of
- cloud-init when it runs
-- `/var/log/cloud-init.log`: very detailed log with debugging output,
- detailing each action taken
-- `/run/cloud-init`: contains logs about how cloud-init decided to enable or
- disable itself, as well as what platforms/datasources were detected. These
- logs are most useful when trying to determine what cloud-init ran or did not
- run.
-
-Be aware that each time a system boots, new logs are appended to the files in
-`/var/log`. Therefore, the files may have more than one boot worth of
-information present.
-
-When reviewing these logs look for any errors or Python tracebacks to check
-for any errors.
-
-Where are the configuration files?
-==================================
-
-Cloud-init config is provided in two places:
-
-- `/etc/cloud/cloud.cfg`
-- `/etc/cloud/cloud.cfg.d/*.cfg`
-
-These files can define the modules that run during instance initialization,
-the datasources to evaluate on boot, and other settings.
-
-See the :ref:`configuration sources explanation<configuration>` and
-:ref:`configuration reference<base_config_reference>` for more information.
-
-Where are the data files?
-=========================
-
-Inside the `/var/lib/cloud/` directory there are two important subdirectories:
-
-instance
---------
-
-The `/var/lib/cloud/instance` directory is a symbolic link that points
-to the most recently used instance-id directory. This folder contains the
-information cloud-init received from datasources, including vendor and user
-data. This can be helpful to review to ensure the correct data was passed.
-
-It also contains the `datasource` file that contains the full information
-about what datasource was identified and used to setup the system.
-
-Finally, the `boot-finished` file is the last thing that cloud-init does.
-
-data
-----
-
-The `/var/lib/cloud/data` directory contain information related to the
-previous boot:
-
-* `instance-id`: id of the instance as discovered by cloud-init. Changing
- this file has no effect.
-* `result.json`: json file will show both the datasource used to setup
- the instance, and if any errors occurred
-* `status.json`: json file shows the datasource used and a break down
- of all four modules if any errors occurred and the start and stop times.
-
-What datasource am I using?
-===========================
-
-To correctly setup an instance, cloud-init must correctly identify the
-cloud that it is on. Therefore knowing what datasource is used on an
-instance launch can help aid in debugging.
-
-To find what datasource is getting used run the `cloud-id` command:
-
-.. code-block:: shell-session
-
- $ cloud-id
- nocloud
-
-If the cloud-id is not what is expected, then running the `ds-identify`
-script in debug mode and providing that in a bug can help aid in resolving
-any issues:
-
-.. code-block:: shell-session
-
- $ sudo DEBUG_LEVEL=2 DI_LOG=stderr /usr/lib/cloud-init/ds-identify --force
-
-The force parameter allows the command to be run again since the instance has
-already launched. The other options increase the verbosity of logging and
-put the logs to STDERR.
-
-How can I re-run datasource detection and cloud-init?
-=====================================================
-
-If a user is developing a new datasource or working on debugging an issue it
-may be useful to re-run datasource detection and the initial setup of
-cloud-init.
-
-To do this, force ds-identify to re-run, clean up any logs, and re-run
-cloud-init:
-
-.. code-block:: shell-session
-
- $ sudo DI_LOG=stderr /usr/lib/cloud-init/ds-identify --force
- $ sudo cloud-init clean --logs
- $ sudo cloud-init init --local
- $ sudo cloud-init init
-
-.. warning::
-
- These commands will re-run cloud-init as if this were first boot of a
- system: this will, at the very least, cycle SSH host keys and may do
- substantially more. Do not run these commands on production systems.
-
-How can I debug my user data?
-=============================
-
-Two of the most common issues with user data, that also happens to be
-cloud-config is:
-
-1. Incorrectly formatted YAML
-2. First line does not contain `#cloud-config`
-
-To verify your YAML, we do have a short script called `validate-yaml.py`_
-that can validate your user data offline.
-
-.. _validate-yaml.py: https://github.com/canonical/cloud-init/blob/main/tools/validate-yaml.py
-
-Another option is to run the following on an instance to debug userdata
-provided to the system:
-
-.. code-block:: shell-session
-
- $ cloud-init schema --system --annotate
-
-As launching instances in the cloud can cost money and take a bit longer,
-sometimes it is easier to launch instances locally using Multipass or LXD:
-
-Why did cloud-init never complete?
-==================================
-
-To check if cloud-init is running still, run:
-
-.. code-block:: shell-session
-
- $ cloud-init status
-
-To wait for cloud-init to complete, run:
-
-.. code-block:: shell-session
-
- $ cloud-init status --wait
-
-There are a number of reasons that cloud-init might never complete. This list
-is not exhaustive, but attempts to enumerate potential causes:
-
-External reasons:
------------------
-- failed dependent services in the boot
-- bugs in the kernel or drivers
-- bugs in external userspace tools that are called by cloud-init
-
-Internal reasons:
------------------
-- a command in ``bootcmd`` or ``runcmd`` that never completes (ex: running
- `cloud-init status --wait` will wait forever on itself and never complete)
-- nonstandard configurations that disable timeouts or set extremely high
- values ("never" is used in a loose sense here)
-
-Failing to Complete on Systemd:
--------------------------------
-
-Cloud-init consists of multiple services on systemd. If a service
-that cloud-init depends on stalls, cloud-init will not continue.
-If reporting a bug related to cloud-init failing to complete on
-systemd, please make sure to include the following logs.
-
-.. code-block:: shell-session
-
- $ systemd-analyze critical-chain cloud-init.target
- $ journalctl --boot=-1
- $ systemctl --failed
-
-
-How can I make a module run on every boot?
-==========================================
-Modules have a default frequency that can be overridden. This is done
-by modifying the module list in ``/etc/cloud/cloud.cfg``.
-
-1. Change the module from a string (default) to a list.
-2. Set the first list item to the module name and the second item to the
- frequency.
-
-Example
--------
-The following example demonstrates how to log boot times to a file every boot.
-
-Update ``/etc/cloud/cloud.cfg``:
-
-.. code-block:: yaml
- :name: /etc/cloud/cloud.cfg
- :emphasize-lines: 3
-
- cloud_final_modules:
- # list shortened for brevity
- - [phone-home, always]
- - final-message
- - power-state-change
-
-
-
-Then your userdata could then be:
-
-.. code-block:: yaml
-
- #cloud-config
- phone_home:
- url: http://example.com/$INSTANCE_ID/
- post: all
-
-
-
-How can I test cloud-init locally before deploying to the cloud?
-================================================================
-
-Several different virtual machine and containerization tools can be used for
-testing locally. Multipass, LXD, and qemu are described in this section.
-
-
-Multipass
----------
-
-`Multipass`_ is a cross-platform tool to launch Ubuntu VMs across Linux,
-Windows, and macOS.
-
-When a user launches a Multipass VM, user data can be passed by adding the
-`--cloud-init` flag and the appropriate YAML file containing user data:
-
-.. code-block:: shell-session
-
- $ multipass launch bionic --name test-vm --cloud-init userdata.yaml
-
-Multipass will validate the YAML syntax of the cloud-config file before
-attempting to start the VM! A nice addition to help save time when
-experimenting with launching instances with various cloud-configs.
-
-Multipass only supports passing user-data and only as YAML cloud-config
-files. Passing a script, a MIME archive, or any of the other user-data
-formats cloud-init supports will result in an error from the YAML syntax
-validator.
-
-.. _Multipass: https://multipass.run/
-
-LXD
----
-
-`LXD`_ offers a streamlined user experience for using linux system
-containers. With LXD, a user can pass:
-
-* user data
-* vendor data
-* metadata
-* network configuration
-
-The following initializes a container with user data:
-
-.. code-block:: shell-session
-
- $ lxc init ubuntu-daily:bionic test-container
- $ lxc config set test-container user.user-data - < userdata.yaml
- $ lxc start test-container
-
-To avoid the extra commands this can also be done at launch:
-
-.. code-block:: shell-session
-
- $ lxc launch ubuntu-daily:bionic test-container --config=user.user-data="$(cat userdata.yaml)"
-
-Finally, a profile can be setup with the specific data if a user needs to
-launch this multiple times:
-
-.. code-block:: shell-session
-
- $ lxc profile create dev-user-data
- $ lxc profile set dev-user-data user.user-data - < cloud-init-config.yaml
- $ lxc launch ubuntu-daily:bionic test-container -p default -p dev-user-data
-
-The above examples all show how to pass user data. To pass other types of
-configuration data use the config option specified below:
-
-+----------------+---------------------------+
-| Data | Config Option |
-+================+===========================+
-| user data | cloud-init.user-data |
-+----------------+---------------------------+
-| vendor data | cloud-init.vendor-data |
-+----------------+---------------------------+
-| network config | cloud-init.network-config |
-+----------------+---------------------------+
-
-See the LXD `Instance Configuration`_ docs for more info about configuration
-values or the LXD `Custom Network Configuration`_ document for more about
-custom network config.
-
-.. _LXD: https://linuxcontainers.org/
-.. _Instance Configuration: https://linuxcontainers.org/lxd/docs/master/instances
-.. _Custom Network Configuration: https://linuxcontainers.org/lxd/docs/master/cloud-init
-
-QEMU
-----
-
-The `cloud-localds` command from the `cloud-utils`_ package generates a disk
-with user supplied data. The NoCloud datasouce allows users to provide their
-own user data, metadata, or network configuration directly to an instance
-without running a network service. This is helpful for launching local cloud
-images with QEMU for example.
-
-The following is an example of creating the local disk using the cloud-localds
-command:
-
-.. code-block:: shell-session
-
- $ cat >user-data <<EOF
- #cloud-config
- password: password
- chpasswd:
- expire: False
- ssh_pwauth: True
- ssh_authorized_keys:
- - ssh-rsa AAAA...UlIsqdaO+w==
- EOF
- $ cloud-localds seed.img user-data
-
-The resulting seed.img can then get passed along to a cloud image containing
-cloud-init. Below is an example of passing the seed.img with QEMU:
-
-.. code-block:: shell-session
-
- $ qemu-system-x86_64 -m 1024 -net nic -net user \
- -hda ubuntu-20.04-server-cloudimg-amd64.img \
- -hdb seed.img
-
-The now booted image will allow for login using the password provided above.
-
-For additional configuration, users can provide much more detailed
-configuration, including network configuration and metadata:
-
-.. code-block:: shell-session
-
- $ cloud-localds --network-config=network-config-v2.yaml \
- seed.img userdata.yaml metadata.yaml
-
-See the :ref:`network_config_v2` page for details on the format and config of
-network configuration. To learn more about the possible values for metadata,
-check out the :ref:`datasource_nocloud` page.
-
-.. _cloud-utils: https://github.com/canonical/cloud-utils/
-
-Where can I learn more?
-=======================
-
-Below are some videos, blog posts, and white papers about cloud-init from a
-variety of sources.
-
-Videos:
-
-- `cloud-init - The Good Parts`_
-- `Perfect Proxmox Template with Cloud Image and Cloud Init [proxmox, cloud-init, template]`_
-- `cloud-init - Building clouds one Linux box at a time (Video)`_
-- `Metadata and cloud-init`_
-- `Introduction to cloud-init`_
-
-Blog Posts:
-
-- `cloud-init - The cross-cloud Magic Sauce (PDF)`_
-- `cloud-init - Building clouds one Linux box at a time (PDF)`_
-- `The beauty of cloud-init`_
-- `Cloud-init Getting Started [fedora, libvirt, cloud-init]`_
-- `Build Azure Devops Agents With Linux cloud-init for Dotnet Development [terraform, azure, devops, docker, dotnet, cloud-init]`_
-- `Cloud-init Getting Started [fedora, libvirt, cloud-init]`_
-- `Setup Neovim cloud-init Completion [neovim, yaml, Language Server Protocol, jsonschema, cloud-init]`_
-
-Events:
-
-- `cloud-init Summit 2019`_
-- `cloud-init Summit 2018`_
-- `cloud-init Summit 2017`_
-
-
-Whitepapers:
-
-- `Utilising cloud-init on Microsoft Azure (Whitepaper)`_
-- `Cloud Instance Initialization with cloud-init (Whitepaper)`_
-
-.. _cloud-init - The Good Parts: https://www.youtube.com/watch?v=2_m6EUo6VOI
-.. _Utilising cloud-init on Microsoft Azure (Whitepaper): https://ubuntu.com/engage/azure-cloud-init-whitepaper
-.. _Cloud Instance Initialization with cloud-init (Whitepaper): https://ubuntu.com/blog/cloud-instance-initialisation-with-cloud-init
-
-.. _cloud-init - The cross-cloud Magic Sauce (PDF): https://events.linuxfoundation.org/wp-content/uploads/2017/12/cloud-init-The-cross-cloud-Magic-Sauce-Scott-Moser-Chad-Smith-Canonical.pdf
-.. _cloud-init - Building clouds one Linux box at a time (Video): https://www.youtube.com/watch?v=1joQfUZQcPg
-.. _cloud-init - Building clouds one Linux box at a time (PDF): https://web.archive.org/web/20181111020605/https://annex.debconf.org/debconf-share/debconf17/slides/164-cloud-init_Building_clouds_one_Linux_box_at_a_time.pdf
-.. _Metadata and cloud-init: https://www.youtube.com/watch?v=RHVhIWifVqU
-.. _The beauty of cloud-init: https://web.archive.org/web/20180830161317/http://brandon.fuller.name/archives/2011/05/02/06.40.57/
-.. _Introduction to cloud-init: http://www.youtube.com/watch?v=-zL3BdbKyGY
-.. _Build Azure Devops Agents With Linux cloud-init for Dotnet Development [terraform, azure, devops, docker, dotnet, cloud-init]: https://codingsoul.org/2022/04/25/build-azure-devops-agents-with-linux-cloud-init-for-dotnet-development/
-.. _Perfect Proxmox Template with Cloud Image and Cloud Init [proxmox, cloud-init, template]: https://www.youtube.com/watch?v=shiIi38cJe4
-.. _Cloud-init Getting Started [fedora, libvirt, cloud-init]: https://blog.while-true-do.io/cloud-init-getting-started/
-.. _Setup Neovim cloud-init Completion [neovim, yaml, Language Server Protocol, jsonschema, cloud-init]: https://phoenix-labs.xyz/blog/setup-neovim-cloud-init-completion/
-
-.. _cloud-init Summit 2019: https://powersj.io/post/cloud-init-summit19/
-.. _cloud-init Summit 2018: https://powersj.io/post/cloud-init-summit18/
-.. _cloud-init Summit 2017: https://powersj.io/post/cloud-init-summit17/
diff --git a/doc/rtd/topics/format.rst b/doc/rtd/topics/format.rst
deleted file mode 100644
index 7d75d168..00000000
--- a/doc/rtd/topics/format.rst
+++ /dev/null
@@ -1,214 +0,0 @@
-.. _user_data_formats:
-
-*****************
-User-Data Formats
-*****************
-
-User data that will be acted upon by cloud-init must be in one of the following
-types.
-
-Cloud Config Data
-=================
-
-Cloud-config is the simplest way to accomplish some things via user-data. Using
-cloud-config syntax, the user can specify certain things in a human friendly
-format.
-
-These things include:
-
-- apt upgrade should be run on first boot
-- a different apt mirror should be used
-- additional apt sources should be added
-- certain SSH keys should be imported
-- *and many more...*
-
-.. note::
- This file must be valid YAML syntax.
-
-See the :ref:`yaml_examples` section for a commented set of examples of
-supported cloud config formats.
-
-Begins with: ``#cloud-config`` or ``Content-Type: text/cloud-config`` when
-using a MIME archive.
-
-.. note::
- New in cloud-init v. 18.4: Cloud config data can also render cloud instance
- metadata variables using jinja templating. See
- :ref:`instance_metadata` for more information.
-
-User-Data Script
-================
-
-Typically used by those who just want to execute a shell script.
-
-Begins with: ``#!`` or ``Content-Type: text/x-shellscript`` when using a MIME
-archive.
-
-.. note::
- New in cloud-init v. 18.4: User-data scripts can also render cloud instance
- metadata variables using jinja templating. See
- :ref:`instance_metadata` for more information.
-
-Example
--------
-
-.. code-block:: shell-session
-
- $ cat myscript.sh
-
- #!/bin/sh
- echo "Hello World. The time is now $(date -R)!" | tee /root/output.txt
-
- $ euca-run-instances --key mykey --user-data-file myscript.sh ami-a07d95c9
-
-Kernel Command Line
-===================
-
-When using the :ref:`datasource_nocloud` datasource, users can pass user data
-via the kernel command line parameters. See the :ref:`datasource_nocloud`
-datasource and :ref:`kernel_cmdline` documentations for more details.
-
-Gzip Compressed Content
-=======================
-
-Content found to be gzip compressed will be uncompressed.
-The uncompressed data will then be used as if it were not compressed.
-This is typically useful because user-data is limited to ~16384 [#]_ bytes.
-
-Mime Multi Part Archive
-=======================
-
-This list of rules is applied to each part of this multi-part file.
-Using a mime-multi part file, the user can specify more than one type of data.
-
-For example, both a user data script and a cloud-config type could be
-specified.
-
-Supported content-types are listed from the cloud-init subcommand make-mime:
-
-.. code-block:: shell-session
-
- $ cloud-init devel make-mime --list-types
- cloud-boothook
- cloud-config
- cloud-config-archive
- cloud-config-jsonp
- jinja2
- part-handler
- x-include-once-url
- x-include-url
- x-shellscript
- x-shellscript-per-boot
- x-shellscript-per-instance
- x-shellscript-per-once
-
-Helper subcommand to generate mime messages
--------------------------------------------
-
-The cloud-init subcommand can generate MIME multi-part files: `make-mime`_.
-
-``make-mime`` subcommand takes pairs of (filename, "text/" mime subtype)
-separated by a colon (e.g. ``config.yaml:cloud-config``) and emits a MIME
-multipart message to stdout.
-
-Examples
---------
-Create userdata containing both a cloud-config (``config.yaml``)
-and a shell script (``script.sh``)
-
-.. code-block:: shell-session
-
- $ cloud-init devel make-mime -a config.yaml:cloud-config -a script.sh:x-shellscript > userdata
-
-Create userdata containing 3 shell scripts:
-
-- ``always.sh`` - Run every boot
-- ``instance.sh`` - Run once per instance
-- ``once.sh`` - Run once
-
-.. code-block:: shell-session
-
- $ cloud-init devel make-mime -a always.sh:x-shellscript-per-boot -a instance.sh:x-shellscript-per-instance -a once.sh:x-shellscript-per-once
-
-.. _make-mime: https://github.com/canonical/cloud-init/blob/main/cloudinit/cmd/devel/make_mime.py
-
-Include File
-============
-
-This content is a ``include`` file.
-
-The file contains a list of urls, one per line. Each of the URLs will be read,
-and their content will be passed through this same set of rules. I.e., the
-content read from the URL can be gzipped, mime-multi-part, or plain text. If
-an error occurs reading a file the remaining files will not be read.
-
-Begins with: ``#include`` or ``Content-Type: text/x-include-url`` when using
-a MIME archive.
-
-Cloud Boothook
-==============
-
-This content is ``boothook`` data. It is stored in a file under
-``/var/lib/cloud`` and then executed immediately. This is the earliest ``hook``
-available. Note, that there is no mechanism provided for running only once. The
-boothook must take care of this itself.
-
-It is provided with the instance id in the environment variable
-``INSTANCE_ID``. This could be made use of to provide a 'once-per-instance'
-type of functionality.
-
-Begins with: ``#cloud-boothook`` or ``Content-Type: text/cloud-boothook`` when
-using a MIME archive.
-
-Part Handler
-============
-
-This is a ``part-handler``: It contains custom code for either supporting new
-mime-types in multi-part user data, or overriding the existing handlers for
-supported mime-types. It will be written to a file in ``/var/lib/cloud/data``
-based on its filename (which is generated).
-
-This must be python code that contains a ``list_types`` function and a
-``handle_part`` function. Once the section is read the ``list_types`` method
-will be called. It must return a list of mime-types that this part-handler
-handles. Because mime parts are processed in order, a ``part-handler`` part
-must precede any parts with mime-types it is expected to handle in the same
-user data.
-
-The ``handle_part`` function must be defined like:
-
-.. code-block:: python
-
- def handle_part(data, ctype, filename, payload):
- # data = the cloudinit object
- # ctype = "__begin__", "__end__", or the mime-type of the part that is being handled.
- # filename = the filename of the part (or a generated filename if none is present in mime data)
- # payload = the parts' content
-
-Cloud-init will then call the ``handle_part`` function once before it handles
-any parts, once per part received, and once after all parts have been handled.
-The ``'__begin__'`` and ``'__end__'`` sentinels allow the part handler to do
-initialization or teardown before or after receiving any parts.
-
-Begins with: ``#part-handler`` or ``Content-Type: text/part-handler`` when
-using a MIME archive.
-
-Example
--------
-
-.. literalinclude:: ../../examples/part-handler.txt
- :language: python
- :linenos:
-
-Also this `blog`_ post offers another example for more advanced usage.
-
-Disabling User-Data
-===================
-
-Cloud-init can be configured to ignore any user-data provided to instance.
-This allows custom images to prevent users from accidentally breaking closed
-appliances. Setting ``allow_userdata: false`` in the configuration will disable
-cloud-init from processing user-data.
-
-.. [#] See your cloud provider for applicable user-data size limitations...
-.. _blog: http://foss-boss.blogspot.com/2011/01/advanced-cloud-init-custom-handlers.html
diff --git a/doc/rtd/topics/integration_tests.rst b/doc/rtd/topics/integration_tests.rst
deleted file mode 100644
index 2bee6a5c..00000000
--- a/doc/rtd/topics/integration_tests.rst
+++ /dev/null
@@ -1,208 +0,0 @@
-.. _integration_tests:
-
-*******************
-Integration Testing
-*******************
-
-Overview
-=========
-
-Integration tests are written using pytest and are located at
-``tests/integration_tests``. General design principles
-laid out in :ref:`testing` should be followed for integration tests.
-
-Setup is accomplished via a set of fixtures located in
-``tests/integration_tests/conftest.py``.
-
-Test Definition
-===============
-Tests are defined like any other pytest test. The ``user_data``
-mark can be used to supply the cloud-config user data. Platform specific
-marks can be used to limit tests to particular platforms. The
-client fixture can be used to interact with the launched
-test instance.
-
-See `Examples`_ section for examples.
-
-Test Execution
-==============
-Test execution happens via pytest. A tox definition exists to run integration
-tests. To run all integration tests, you would run:
-
-.. code-block:: bash
-
- $ tox -e integration-tests
-
-Pytest arguments may also be passed. For example:
-
-.. code-block:: bash
-
- $ tox -e integration-tests tests/integration_tests/modules/test_combined.py
-
-Configuration
-=============
-
-All possible configuration values are defined in
-`tests/integration_tests/integration_settings.py <https://github.com/canonical/cloud-init/blob/main/tests/integration_tests/integration_settings.py>`_.
-Defaults can be
-overridden by supplying values in ``tests/integration_tests/user_settings.py``
-or by providing an environment variable of the same name prepended with
-``CLOUD_INIT_``. For example, to set the ``PLATFORM`` setting:
-
-.. code-block:: bash
-
- CLOUD_INIT_PLATFORM='ec2' pytest tests/integration_tests/
-
-
-Cloud Interaction
-=================
-Cloud interaction happens via the
-`pycloudlib <https://pycloudlib.readthedocs.io/en/latest/index.html>`_ library.
-In order to run integration tests, pycloudlib must first be
-`configured <https://pycloudlib.readthedocs.io/en/latest/configuration.html#configuration>`_.
-
-For a minimal setup using LXD, write the following to
-``~/.config/pycloudlib.toml``:
-
-.. code-block:: toml
-
- [lxd]
-
-
-Image Selection
-===============
-
-Each integration testing run uses a single image as its basis. This
-image is configured using the ``OS_IMAGE`` variable; see
-`Configuration`_ for details of how configuration works.
-
-``OS_IMAGE`` can take two types of value: an Ubuntu series name (e.g.
-"focal"), or an image specification. If an Ubuntu series name is
-given, then the most recent image for that series on the target cloud
-will be used. For other use cases, an image specification is used.
-
-In its simplest form, an image specification can simply be a cloud's
-image ID (e.g. "ami-deadbeef", "ubuntu:focal"). In this case, the
-image so-identified will be used as the basis for this testing run.
-
-This has a drawback, however: as we do not know what OS or release is
-within the image, the integration testing framework will run *all*
-tests against the image in question. If it's a RHEL8 image, then we
-would expect Ubuntu-specific tests to fail (and vice versa).
-
-To address this, a full image specification can be given. This is of
-the form: ``<image_id>[::<os>[::<release>]]`` where ``image_id`` is a
-cloud's image ID, ``os`` is the OS name, and ``release`` is the OS
-release name. So, for example, Ubuntu 18.04 (Bionic Beaver) on LXD is
-``ubuntu:bionic::ubuntu::bionic`` or RHEL 8 on Amazon is
-``ami-justanexample::rhel::8``. When a full specification is given,
-only tests which are intended for use on that OS and release will be
-executed.
-
-Image Setup
-===========
-
-Image setup occurs once when a test session begins and is implemented
-via fixture. Image setup roughly follows these steps:
-
-* Launch an instance on the specified test platform
-* Install the version of cloud-init under test
-* Run ``cloud-init clean`` on the instance so subsequent boots
- resemble out of the box behavior
-* Take a snapshot of the instance to be used as a new image from
- which new instances can be launched
-
-Test Setup
-==============
-Test setup occurs between image setup and test execution. Test setup
-is implemented via one of the ``client`` fixtures. When a client fixture
-is used, a test instance from which to run tests is launched prior to
-test execution and torn down after.
-
-Continuous Integration
-======================
-A subset of the integration tests are run when a pull request
-is submitted on Github. The tests run on these continuous
-integration (CI) runs are given a pytest mark:
-
-.. code-block:: python
-
- @pytest.mark.ci
-
-Most new tests should *not* use this mark, so be aware that having a
-successful CI run does not necessarily mean that your test passed
-successfully.
-
-Fixtures
-========
-Integration tests rely heavily on fixtures to do initial test setup.
-One or more of these fixtures will be used in almost every integration test.
-
-Details such as the cloud platform or initial image to use are determined
-via what is specified in the `Configuration`_.
-
-client
-------
-The ``client`` fixture should be used for most test cases. It ensures:
-
-- All setup performed by `session_cloud`_ and `setup_image`_
-- `Pytest marks <https://github.com/canonical/cloud-init/blob/af7eb1deab12c7208853c5d18b55228e0ba29c4d/tests/integration_tests/conftest.py#L220-L224>`_
- used during instance creation are obtained and applied
-- The test instance is launched
-- Test failure status is determined after test execution
-- Logs are collected (if configured) after test execution
-- The test instance is torn down after test execution
-
-``module_client`` and ``class_client`` fixtures also exist for the
-purpose of running multiple tests against a single launched instance.
-They provide the exact same functionality as ``client``, but are
-scoped to the module or class respectively.
-
-session_cloud
--------------
-The ``session_cloud`` session-scoped fixture will provide an
-`IntegrationCloud <https://github.com/canonical/cloud-init/blob/af7eb1deab12c7208853c5d18b55228e0ba29c4d/tests/integration_tests/clouds.py#L102>`_
-instance for the currently configured cloud. The fixture also
-ensures that any custom cloud session cleanup is performed.
-
-setup_image
------------
-The ``setup_image`` session-scope fixture will
-create a new image to launch all further cloud instances
-during this test run. It ensures:
-
-- A cloud instance is launched on the configured platform
-- The version of cloud-init under test is installed on the instance
-- ``cloud-init clean --logs`` is run on the instance
-- A snapshot of the instance is taken to be used as the basis for
- future instance launches
-- The originally launched instance is torn down
-- The custom created image is torn down after all tests finish
-
-Examples
---------
-A simple test case using the ``client`` fixture:
-
-.. code-block:: python
-
- USER_DATA = """\
- #cloud-config
- bootcmd:
- - echo 'hello!' > /var/tmp/hello.txt
- """
-
-
- @pytest.mark.user_data(USER_DATA)
- def test_bootcmd(client):
- log = client.read_from_file("/var/log/cloud-init.log")
- assert "Shellified 1 commands." in log
- assert client.execute('cat /var/tmp/hello.txt').strip() == "hello!"
-
-Customizing the launch arguments before launching an instance manually:
-
-.. code-block:: python
-
- def test_launch(session_cloud: IntegrationCloud, setup_image):
- with session_cloud.launch(launch_kwargs={"wait": False}) as client:
- client.instance.wait()
- assert client.execute("echo hello world").strip() == "hello world"
diff --git a/doc/rtd/topics/kernel-cmdline.rst b/doc/rtd/topics/kernel-cmdline.rst
deleted file mode 100644
index 4aa02855..00000000
--- a/doc/rtd/topics/kernel-cmdline.rst
+++ /dev/null
@@ -1,71 +0,0 @@
-.. _kernel_cmdline:
-
-*******************
-Kernel Command Line
-*******************
-
-In order to allow an ephemeral, or otherwise pristine image to
-receive some configuration, cloud-init will read a url directed by
-the kernel command line and proceed as if its data had previously existed.
-
-This allows for configuring a meta-data service, or some other data.
-
-.. note::
-
- That usage of the kernel command line is somewhat of a last resort,
- as it requires knowing in advance the correct command line or modifying
- the boot loader to append data.
-
-For example, when ``cloud-init init --local`` runs, it will check to
-see if ``cloud-config-url`` appears in key/value fashion
-in the kernel command line as in:
-
-.. code-block:: text
-
- root=/dev/sda ro cloud-config-url=http://foo.bar.zee/abcde
-
-Cloud-init will then read the contents of the given url.
-If the content starts with ``#cloud-config``, it will store
-that data to the local filesystem in a static filename
-``/etc/cloud/cloud.cfg.d/91_kernel_cmdline_url.cfg``, and consider it as
-part of the config from that point forward.
-
-If that file exists already, it will not be overwritten, and the
-`cloud-config-url` parameter is completely ignored.
-
-Then, when the DataSource runs, it will find that config already available.
-
-So, in order to be able to configure the MAAS DataSource by controlling the
-kernel command line from outside the image, you can append:
-
- * ``cloud-config-url=http://your.url.here/abcdefg``
-
-Then, have the following content at that url:
-
-.. code-block:: yaml
-
- #cloud-config
- datasource:
- MAAS:
- metadata_url: http://mass-host.localdomain/source
- consumer_key: Xh234sdkljf
- token_key: kjfhgb3n
- token_secret: 24uysdfx1w4
-
-.. warning::
-
- `url` kernel command line key is deprecated.
- Please use `cloud-config-url` parameter instead"
-
-.. note::
-
- Because ``cloud-config-url=`` is so very generic, in order to avoid false
- positives,
- cloud-init requires the content to start with ``#cloud-config`` in order
- for it to be considered.
-
-.. note::
-
- The ``cloud-config-url=`` is un-authed http GET, and contains credentials.
- It could be set up to be randomly generated and also check source
- address in order to be more secure.
diff --git a/doc/rtd/topics/logging.rst b/doc/rtd/topics/logging.rst
deleted file mode 100644
index 4d0a14ca..00000000
--- a/doc/rtd/topics/logging.rst
+++ /dev/null
@@ -1,267 +0,0 @@
-.. _logging:
-
-*******
-Logging
-*******
-Cloud-init supports both local and remote logging configurable through
-multiple configurations:
-
-- Python's built-in logging configuration
-- Cloud-init's event reporting system
-- The cloud-init rsyslog module
-
-Python Logging
-==============
-Cloud-init uses the python logging module, and can accept config for this
-module using the standard python fileConfig format. Cloud-init looks for
-config for the logging module under the ``logcfg`` key.
-
-.. note::
- the logging configuration is not YAML, it is python ``fileConfig`` format,
- and is passed through directly to the python logging module. please use the
- correct syntax for a multi-line string in YAML.
-
-By default, cloud-init uses the logging configuration provided in
-``/etc/cloud/cloud.cfg.d/05_logging.cfg``. The default python logging
-configuration writes all cloud-init events with a priority of ``WARNING`` or
-higher to console, and writes all events with a level of ``DEBUG`` or higher
-to ``/var/log/cloud-init.log`` and via syslog.
-
-Python's fileConfig format consists of sections with headings in the format
-``[title]`` and key value pairs in each section. Configuration for python
-logging must contain the sections ``[loggers]``, ``[handlers]``, and
-``[formatters]``, which name the entities of their respective types that will
-be defined. The section name for each defined logger, handler and formatter
-will start with its type, followed by an underscore (``_``) and the name of
-the entity. For example, if a logger was specified with the name ``log01``,
-config for the logger would be in the section ``[logger_log01]``.
-
-Logger config entries contain basic logging set up. They may specify a list of
-handlers to send logging events to as well as the lowest priority level of
-events to handle. A logger named ``root`` must be specified and its
-configuration (under ``[logger_root]``) must contain a level and a list of
-handlers. A level entry can be any of the following: ``DEBUG``, ``INFO``,
-``WARNING``, ``ERROR``, ``CRITICAL``, or ``NOTSET``. For the ``root`` logger
-the ``NOTSET`` option will allow all logging events to be recorded.
-
-Each configured handler must specify a class under the python's ``logging``
-package namespace. A handler may specify a message formatter to use, a
-priority level, and arguments for the handler class. Common handlers are
-``StreamHandler``, which handles stream redirects (i.e. logging to stderr),
-and ``FileHandler`` which outputs to a log file. The logging module also
-supports logging over net sockets, over http, via smtp, and additional complex
-configurations. For full details about the handlers available for python
-logging, please see the documentation for `python logging handlers`_.
-
-Log messages are formatted using the ``logging.Formatter`` class, which is
-configured using ``formatter`` config entities. A default format of
-``%(message)s`` is given if no formatter configs are specified. Formatter
-config entities accept a format string which supports variable replacements.
-These may also accept a ``datefmt`` string which may be used to configure the
-timestamp used in the log messages. The format variables ``%(asctime)s``,
-``%(levelname)s`` and ``%(message)s`` are commonly used and represent the
-timestamp, the priority level of the event and the event message. For
-additional information on logging formatters see `python logging formatters`_.
-
-.. note::
- by default the format string used in the logging formatter are in python's
- old style ``%s`` form. the ``str.format()`` and ``string.Template`` styles
- can also be used by using ``{`` or ``$`` in place of ``%`` by setting the
- ``style`` parameter in formatter config.
-
-A simple, but functional python logging configuration for cloud-init is below.
-It will log all messages of priority ``DEBUG`` or higher both stderr and
-``/tmp/my.log`` using a ``StreamHandler`` and a ``FileHandler``, using
-the default format string ``%(message)s``::
-
- logcfg: |
- [loggers]
- keys=root,cloudinit
- [handlers]
- keys=ch,cf
- [formatters]
- keys=
- [logger_root]
- level=DEBUG
- handlers=
- [logger_cloudinit]
- level=DEBUG
- qualname=cloudinit
- handlers=ch,cf
- [handler_ch]
- class=StreamHandler
- level=DEBUG
- args=(sys.stderr,)
- [handler_cf]
- class=FileHandler
- level=DEBUG
- args=('/tmp/my.log',)
-
-For additional information about configuring python's logging module, please
-see the documentation for `python logging config`_.
-
-.. _logging_command_output:
-
-Command Output
-==============
-Cloud-init can redirect its stdout and stderr based on config given under the
-``output`` config key. The output of any commands run by cloud-init and any
-user or vendor scripts provided will also be included here. The ``output`` key
-accepts a dictionary for configuration. Output files may be specified
-individually for each stage (``init``, ``config``, and ``final``), or a single
-key ``all`` may be used to specify output for all stages.
-
-The output for each stage may be specified as a dictionary of ``output`` and
-``error`` keys, for stdout and stderr respectively, as a tuple with stdout
-first and stderr second, or as a single string to use for both. The strings
-passed to all of these keys are handled by the system shell, so any form of
-redirection that can be used in bash is valid, including piping cloud-init's
-output to ``tee``, or ``logger``. If only a filename is provided, cloud-init
-will append its output to the file as though ``>>`` was specified.
-
-By default, cloud-init loads its output configuration from
-``/etc/cloud/cloud.cfg.d/05_logging.cfg``. The default config directs both
-stdout and stderr from all cloud-init stages to
-``/var/log/cloud-init-output.log``. The default config is given as ::
-
- output: { all: "| tee -a /var/log/cloud-init-output.log" }
-
-For a more complex example, the following configuration would output the init
-stage to ``/var/log/cloud-init.out`` and ``/var/log/cloud-init.err``, for
-stdout and stderr respectively, replacing anything that was previously there.
-For the config stage, it would pipe both stdout and stderr through ``tee -a
-/var/log/cloud-config.log``. For the final stage it would append the output of
-stdout and stderr to ``/var/log/cloud-final.out`` and
-``/var/log/cloud-final.err`` respectively. ::
-
- output:
- init:
- output: "> /var/log/cloud-init.out"
- error: "> /var/log/cloud-init.err"
- config: "tee -a /var/log/cloud-config.log"
- final:
- - ">> /var/log/cloud-final.out"
- - "/var/log/cloud-final.err"
-
-Event Reporting
-===============
-Cloud-init contains an eventing system that allows events to emitted
-to a variety of destinations.
-
-3 configurations are available for reporting events:
-
-- **webhook**: POST to a web server
-- **log**: Write to the cloud-init log at configurable log level
-- **stdout**: Print to stdout
-
-The default configuration is to emit events to the cloud-init log file
-at ``DEBUG`` level.
-
-Event reporting can be configured using the ``reporting`` key in
-cloud-config userdata.
-
-Configuration
--------------
-
-**webhook**
-
-.. code-block:: yaml
-
- reporting:
- <user-defined name>:
- type: webhook
- endpoint: <url>
- timeout: <timeout in seconds>
- retries: <number of retries>
- consumer_key: <OAuth consumer key>
- token_key: <OAuth token key>
- token_secret: <OAuth token secret>
- consumer_secret: <OAuth consumer secret>
-
-``endpoint`` is the only additional required key when specifying
-``type: webhook``.
-
-**log**
-
-.. code-block:: yaml
-
- reporting:
- <user-defined name>:
- type: log
- level: <DEBUG|INFO|WARN|ERROR|FATAL>
-
-``level`` is optional and defaults to "DEBUG".
-
-**print**
-
-.. code-block:: yaml
-
- reporting:
- <user-defined name>:
- type: print
-
-
-Example
-^^^^^^^
-
-The follow example shows configuration for all three sources:
-
-.. code-block:: yaml
-
- #cloud-config
- reporting:
- webserver:
- type: webhook
- endpoint: "http://10.0.0.1:55555/asdf"
- timeout: 5
- retries: 3
- consumer_key: <consumer_key>
- token_key: <token_key>
- token_secret: <token_secret>
- consumer_secret: <consumer_secret>
- info_log:
- type: log
- level: WARN
- stdout:
- type: print
-
-Rsyslog Module
-==============
-Cloud-init's ``cc_rsyslog`` module allows for fully customizable rsyslog
-configuration under the ``rsyslog`` config key. The simplest way to
-use the rsyslog module is by specifying remote servers under the ``remotes``
-key in ``rsyslog`` config. The ``remotes`` key takes a dictionary where each
-key represents the name of an rsyslog server and each value is the
-configuration for that server. The format for server config is:
-
- - optional filter for log messages (defaults to ``*.*``)
- - optional leading ``@`` or ``@@``, indicating udp and tcp respectively
- (defaults to ``@``, for udp)
- - ipv4 or ipv6 hostname or address. ipv6 addresses must be in ``[::1]``
- format, (e.g. ``@[fd00::1]:514``)
- - optional port number (defaults to ``514``)
-
-For example, to send logging to an rsyslog server named ``log_serv`` with
-address ``10.0.4.1``, using port number ``514``, over udp, with all log
-messages enabled one could use either of the following.
-
-With all options specified::
-
- rsyslog:
- remotes:
- log_serv: "*.* @10.0.4.1:514"
-
-With defaults used::
-
- rsyslog:
- remotes:
- log_serv: "10.0.4.1"
-
-
-For more information on rsyslog configuration, see
-:ref:`topics/modules:rsyslog`.
-
-.. _python logging config: https://docs.python.org/3/library/logging.config.html#configuration-file-format
-.. _python logging handlers: https://docs.python.org/3/library/logging.handlers.html
-.. _python logging formatters: https://docs.python.org/3/library/logging.html#formatter-objects
-.. vi: textwidth=79
diff --git a/doc/rtd/topics/merging.rst b/doc/rtd/topics/merging.rst
deleted file mode 100644
index a1422fe3..00000000
--- a/doc/rtd/topics/merging.rst
+++ /dev/null
@@ -1,288 +0,0 @@
-**************************
-Merging User-Data Sections
-**************************
-
-Overview
-========
-
-This was implemented because it has been a common feature request that there be
-a way to specify how cloud-config YAML "dictionaries" provided as user-data are
-merged together when there are multiple YAML files to merge together (say when
-performing an #include).
-
-Since previously the merging algorithm was very simple and would only overwrite
-and not append lists, or strings, and so on it was decided to create a new and
-improved way to merge dictionaries (and their contained objects) together in a
-way that is customizable, thus allowing for users who provide cloud-config
-user-data to determine exactly how their objects will be merged.
-
-For example.
-
-.. code-block:: yaml
-
- #cloud-config (1)
- runcmd:
- - bash1
- - bash2
-
- #cloud-config (2)
- runcmd:
- - bash3
- - bash4
-
-The previous way of merging the two objects above would result in a final
-cloud-config object that contains the following.
-
-.. code-block:: yaml
-
- #cloud-config (merged)
- runcmd:
- - bash3
- - bash4
-
-Typically this is not what users want; instead they would likely prefer:
-
-.. code-block:: yaml
-
- #cloud-config (merged)
- runcmd:
- - bash1
- - bash2
- - bash3
- - bash4
-
-This way makes it easier to combine the various cloud-config objects you have
-into a more useful list, thus reducing duplication necessary to accomplish the
-same result with the previous method.
-
-
-Built-in Mergers
-================
-
-Cloud-init provides merging for the following built-in types:
-
-- Dict
-- List
-- String
-
-The ``Dict`` merger has the following options which control what is done with
-values contained within the config.
-
-- ``allow_delete``: Existing values not present in the new value can be
- deleted, defaults to False
-- ``no_replace``: Do not replace an existing value if one is already present,
- enabled by default.
-- ``replace``: Overwrite existing values with new ones.
-
-The ``List`` merger has the following options which control what is done with
-the values contained within the config.
-
-- ``append``: Add new value to the end of the list, defaults to False.
-- ``prepend``: Add new values to the start of the list, defaults to False.
-- ``no_replace``: Do not replace an existing value if one is already present,
- enabled by default.
-- ``replace``: Overwrite existing values with new ones.
-
-The ``Str`` merger has the following options which control what is done with
-the values contained within the config.
-
-- ``append``: Add new value to the end of the string, defaults to False.
-
-Common options for all merge types which control how recursive merging is
-done on other types.
-
-- ``recurse_dict``: If True merge the new values of the dictionary, defaults to
- True.
-- ``recurse_list``: If True merge the new values of the list, defaults to
- False.
-- ``recurse_array``: Alias for ``recurse_list``.
-- ``recurse_str``: If True merge the new values of the string, defaults to
- False.
-
-
-Customizability
-===============
-
-Because the above merging algorithm may not always be desired (just as the
-previous merging algorithm was not always the preferred one), the concept of
-customized merging was introduced through 'merge classes'.
-
-A merge class is a class definition which provides functions that can be used
-to merge a given type with another given type.
-
-An example of one of these merging classes is the following:
-
-.. code-block:: python
-
- class Merger:
- def __init__(self, merger, opts):
- self._merger = merger
- self._overwrite = 'overwrite' in opts
-
- # This merging algorithm will attempt to merge with
- # another dictionary, on encountering any other type of object
- # it will not merge with said object, but will instead return
- # the original value
- #
- # On encountering a dictionary, it will create a new dictionary
- # composed of the original and the one to merge with, if 'overwrite'
- # is enabled then keys that exist in the original will be overwritten
- # by keys in the one to merge with (and associated values). Otherwise
- # if not in overwrite mode the 2 conflicting keys themselves will
- # be merged.
- def _on_dict(self, value, merge_with):
- if not isinstance(merge_with, (dict)):
- return value
- merged = dict(value)
- for (k, v) in merge_with.items():
- if k in merged:
- if not self._overwrite:
- merged[k] = self._merger.merge(merged[k], v)
- else:
- merged[k] = v
- else:
- merged[k] = v
- return merged
-
-As you can see there is a '_on_dict' method here that will be given a source
-value and a value to merge with. The result will be the merged object. This
-code itself is called by another merging class which 'directs' the merging to
-happen by analyzing the types of the objects to merge and attempting to find a
-know object that will merge that type. I will avoid pasting that here, but it
-can be found in the `mergers/__init__.py` file (see `LookupMerger` and
-`UnknownMerger`).
-
-So following the typical cloud-init way of allowing source code to be
-downloaded and used dynamically, it is possible for users to inject there own
-merging files to handle specific types of merging as they choose (the basic
-ones included will handle lists, dicts, and strings). Note how each merge can
-have options associated with it which affect how the merging is performed, for
-example a dictionary merger can be told to overwrite instead of attempt to
-merge, or a string merger can be told to append strings instead of discarding
-other strings to merge with.
-
-How to activate
-===============
-
-There are a few ways to activate the merging algorithms, and to customize them
-for your own usage.
-
-1. The first way involves the usage of MIME messages in cloud-init to specify
- multipart documents (this is one way in which multiple cloud-config is
- joined together into a single cloud-config). Two new headers are looked
- for, both of which can define the way merging is done (the first header to
- exist wins). These new headers (in lookup order) are 'Merge-Type' and
- 'X-Merge-Type'. The value should be a string which will satisfy the new
- merging format definition (see below for this format).
-
-2. The second way is actually specifying the merge-type in the body of the
- cloud-config dictionary. There are 2 ways to specify this, either as a
- string or as a dictionary (see format below). The keys that are looked up
- for this definition are the following (in order), 'merge_how',
- 'merge_type'.
-
-String format
--------------
-
-The string format that is expected is the following.
-
-::
-
- classname1(option1,option2)+classname2(option3,option4)....
-
-The class name there will be connected to class names used when looking for the
-class that can be used to merge and options provided will be given to the class
-on construction of that class.
-
-For example, the default string that is used when none is provided is the
-following:
-
-::
-
- list()+dict()+str()
-
-Dictionary format
------------------
-
-A dictionary can be used when it specifies the same information as the
-string format (i.e. the second option above), for example:
-
-.. code-block:: python
-
- {'merge_how': [{'name': 'list', 'settings': ['append']},
- {'name': 'dict', 'settings': ['no_replace', 'recurse_list']},
- {'name': 'str', 'settings': ['append']}]}
-
-This would be the equivalent format for default string format but in dictionary
-form instead of string form.
-
-Specifying multiple types and its effect
-========================================
-
-Now you may be asking yourself, if I specify a merge-type header or dictionary
-for every cloud-config that I provide, what exactly happens?
-
-The answer is that when merging, a stack of 'merging classes' is kept, the
-first one on that stack is the default merging classes, this set of mergers
-will be used when the first cloud-config is merged with the initial empty
-cloud-config dictionary. If the cloud-config that was just merged provided a
-set of merging classes (via the above formats) then those merging classes will
-be pushed onto the stack. Now if there is a second cloud-config to be merged
-then the merging classes from the cloud-config before the first will be used
-(not the default) and so on. This way a cloud-config can decide how it will
-merge with a cloud-config dictionary coming after it.
-
-Other uses
-==========
-
-In addition to being used for merging user-data sections, the default merging
-algorithm for merging 'conf.d' YAML files (which form an initial YAML config
-for cloud-init) was also changed to use this mechanism so its full
-benefits (and customization) can also be used there as well. Other places that
-used the previous merging are also, similarly, now extensible (metadata
-merging, for example).
-
-Note, however, that merge algorithms are not used *across* types of
-configuration. As was the case before merging was implemented,
-user-data will overwrite conf.d configuration without merging.
-
-Example cloud-config
-====================
-
-A common request is to include multiple ``runcmd`` directives in different
-files and merge all of the commands together. To achieve this, we must modify
-the default merging to allow for dictionaries to join list values.
-
-
-The first config
-
-.. code-block:: yaml
-
- #cloud-config
- merge_how:
- - name: list
- settings: [append]
- - name: dict
- settings: [no_replace, recurse_list]
-
- runcmd:
- - bash1
- - bash2
-
-The second config
-
-.. code-block:: yaml
-
- #cloud-config
- merge_how:
- - name: list
- settings: [append]
- - name: dict
- settings: [no_replace, recurse_list]
-
- runcmd:
- - bash3
- - bash4
-
-
-.. vi: textwidth=79
diff --git a/doc/rtd/topics/network-config-format-v1.rst b/doc/rtd/topics/network-config-format-v1.rst
deleted file mode 100644
index a9dd31af..00000000
--- a/doc/rtd/topics/network-config-format-v1.rst
+++ /dev/null
@@ -1,625 +0,0 @@
-.. _network_config_v1:
-
-Networking Config Version 1
-===========================
-
-This network configuration format lets users customize their instance's
-networking interfaces by assigning subnet configuration, virtual device
-creation (bonds, bridges, vlans) routes and DNS configuration.
-
-Required elements of a Network Config Version 1 are ``config`` and
-``version``.
-
-Cloud-init will read this format from :ref:`base_config_reference`.
-
-For example the following could be present in
-``/etc/cloud/cloud.cfg.d/custom-networking.cfg``:
-
-.. code-block:: yaml
-
- network:
- version: 1
- config:
- - type: physical
- name: eth0
- subnets:
- - type: dhcp
-
-The :ref:`datasource_nocloud` datasource can also provide cloud-init
-networking configuration in this Format.
-
-Configuration Types
--------------------
-Within the network ``config`` portion, users include a list of configuration
-types. The current list of support ``type`` values are as follows:
-
-- Physical (``physical``)
-- Bond (``bond``)
-- Bridge (``bridge``)
-- VLAN (``vlan``)
-- Nameserver (``nameserver``)
-- Route (``route``)
-
-Physical, Bond, Bridge and VLAN types may also include IP configuration under
-the key ``subnets``.
-
-- Subnet/IP (``subnets``)
-
-
-Physical
-~~~~~~~~
-The ``physical`` type configuration represents a "physical" network device,
-typically Ethernet-based. At least one of these entries is required for
-external network connectivity. Type ``physical`` requires only one key:
-``name``. A ``physical`` device may contain some or all of the following
-keys:
-
-**name**: *<desired device name>*
-
-A devices name must be less than 15 characters. Names exceeding the maximum
-will be truncated. This is a limitation of the Linux kernel network-device
-structure.
-
-**mac_address**: *<MAC Address>*
-
-The MAC Address is a device unique identifier that most Ethernet-based network
-devices possess. Specifying a MAC Address is optional.
-Letters must be lowercase.
-
-.. note::
-
- MAC addresses must be strings. As MAC addresses which consist of only the
- digits 0-9 (i.e. no hex a-f) can be interpreted as a base 60 integer per
- the `YAML 1.1 spec`_ it is best practice to quote all MAC addresses to ensure
- they are parsed as strings regardless of value.
-
-.. _YAML 1.1 spec: https://yaml.org/type/int.html
-
-.. note::
-
- Cloud-init will handle the persistent mapping between a
- device's ``name`` and the ``mac_address``.
-
-**mtu**: *<MTU SizeBytes>*
-
-The MTU key represents a device's Maximum Transmission Unit, the largest size
-packet or frame, specified in octets (eight-bit bytes), that can be sent in a
-packet- or frame-based network. Specifying ``mtu`` is optional.
-
-.. note::
-
- The possible supported values of a device's MTU is not available at
- configuration time. It's possible to specify a value too large or to
- small for a device and may be ignored by the device.
-
-
-**Physical Example**::
-
- network:
- version: 1
- config:
- # Simple network adapter
- - type: physical
- name: interface0
- mac_address: '00:11:22:33:44:55'
- # Second nic with Jumbo frames
- - type: physical
- name: jumbo0
- mac_address: aa:11:22:33:44:55
- mtu: 9000
- # 10G pair
- - type: physical
- name: gbe0
- mac_address: cd:11:22:33:44:00
- - type: physical
- name: gbe1
- mac_address: cd:11:22:33:44:02
-
-Bond
-~~~~
-A ``bond`` type will configure a Linux software Bond with one or more network
-devices. A ``bond`` type requires the following keys:
-
-**name**: *<desired device name>*
-
-A devices name must be less than 15 characters. Names exceeding the maximum
-will be truncated. This is a limitation of the Linux kernel network-device
-structure.
-
-**mac_address**: *<MAC Address>*
-
-When specifying MAC Address on a bond this value will be assigned to the bond
-device and may be different than the MAC address of any of the underlying
-bond interfaces. Specifying a MAC Address is optional. If ``mac_address`` is
-not present, then the bond will use one of the MAC Address values from one of
-the bond interfaces.
-
-.. note::
-
- MAC addresses must be strings. As MAC addresses which consist of only the
- digits 0-9 (i.e. no hex a-f) can be interpreted as a base 60 integer per
- the `YAML 1.1 spec`_ it is best practice to quote all MAC addresses to ensure
- they are parsed as strings regardless of value.
-
-.. _YAML 1.1 spec: https://yaml.org/type/int.html
-
-**bond_interfaces**: *<List of network device names>*
-
-The ``bond_interfaces`` key accepts a list of network device ``name`` values
-from the configuration. This list may be empty.
-
-**mtu**: *<MTU SizeBytes>*
-
-The MTU key represents a device's Maximum Transmission Unit, the largest size
-packet or frame, specified in octets (eight-bit bytes), that can be sent in a
-packet- or frame-based network. Specifying ``mtu`` is optional.
-
-.. note::
-
- The possible supported values of a device's MTU is not available at
- configuration time. It's possible to specify a value too large or to
- small for a device and may be ignored by the device.
-
-**params**: *<Dictionary of key: value bonding parameter pairs>*
-
-The ``params`` key in a bond holds a dictionary of bonding parameters.
-This dictionary may be empty. For more details on what the various bonding
-parameters mean please read the Linux Kernel Bonding.txt.
-
-Valid ``params`` keys are:
-
- - ``active_slave``: Set bond attribute
- - ``ad_actor_key``: Set bond attribute
- - ``ad_actor_sys_prio``: Set bond attribute
- - ``ad_actor_system``: Set bond attribute
- - ``ad_aggregator``: Set bond attribute
- - ``ad_num_ports``: Set bond attribute
- - ``ad_partner_key``: Set bond attribute
- - ``ad_partner_mac``: Set bond attribute
- - ``ad_select``: Set bond attribute
- - ``ad_user_port_key``: Set bond attribute
- - ``all_slaves_active``: Set bond attribute
- - ``arp_all_targets``: Set bond attribute
- - ``arp_interval``: Set bond attribute
- - ``arp_ip_target``: Set bond attribute
- - ``arp_validate``: Set bond attribute
- - ``downdelay``: Set bond attribute
- - ``fail_over_mac``: Set bond attribute
- - ``lacp_rate``: Set bond attribute
- - ``lp_interval``: Set bond attribute
- - ``miimon``: Set bond attribute
- - ``mii_status``: Set bond attribute
- - ``min_links``: Set bond attribute
- - ``mode``: Set bond attribute
- - ``num_grat_arp``: Set bond attribute
- - ``num_unsol_na``: Set bond attribute
- - ``packets_per_slave``: Set bond attribute
- - ``primary``: Set bond attribute
- - ``primary_reselect``: Set bond attribute
- - ``queue_id``: Set bond attribute
- - ``resend_igmp``: Set bond attribute
- - ``slaves``: Set bond attribute
- - ``tlb_dynamic_lb``: Set bond attribute
- - ``updelay``: Set bond attribute
- - ``use_carrier``: Set bond attribute
- - ``xmit_hash_policy``: Set bond attribute
-
-**Bond Example**::
-
- network:
- version: 1
- config:
- # Simple network adapter
- - type: physical
- name: interface0
- mac_address: '00:11:22:33:44:55'
- # 10G pair
- - type: physical
- name: gbe0
- mac_address: cd:11:22:33:44:00
- - type: physical
- name: gbe1
- mac_address: cd:11:22:33:44:02
- - type: bond
- name: bond0
- bond_interfaces:
- - gbe0
- - gbe1
- params:
- bond-mode: active-backup
-
-Bridge
-~~~~~~
-Type ``bridge`` requires the following keys:
-
-- ``name``: Set the name of the bridge.
-- ``bridge_interfaces``: Specify the ports of a bridge via their ``name``.
- This list may be empty.
-- ``params``: A list of bridge params. For more details, please read the
- bridge-utils-interfaces manpage.
-
-Valid keys are:
-
- - ``bridge_ageing``: Set the bridge's ageing value.
- - ``bridge_bridgeprio``: Set the bridge device network priority.
- - ``bridge_fd``: Set the bridge's forward delay.
- - ``bridge_hello``: Set the bridge's hello value.
- - ``bridge_hw``: Set the bridge's MAC address.
- - ``bridge_maxage``: Set the bridge's maxage value.
- - ``bridge_maxwait``: Set how long network scripts should wait for the
- bridge to be up.
- - ``bridge_pathcost``: Set the cost of a specific port on the bridge.
- - ``bridge_portprio``: Set the priority of a specific port on the bridge.
- - ``bridge_ports``: List of devices that are part of the bridge.
- - ``bridge_stp``: Set spanning tree protocol on or off.
- - ``bridge_waitport``: Set amount of time in seconds to wait on specific
- ports to become available.
-
-
-**Bridge Example**::
-
- network:
- version: 1
- config:
- # Simple network adapter
- - type: physical
- name: interface0
- mac_address: '00:11:22:33:44:55'
- # Second nic with Jumbo frames
- - type: physical
- name: jumbo0
- mac_address: aa:11:22:33:44:55
- mtu: 9000
- - type: bridge
- name: br0
- bridge_interfaces:
- - jumbo0
- params:
- bridge_ageing: 250
- bridge_bridgeprio: 22
- bridge_fd: 1
- bridge_hello: 1
- bridge_maxage: 10
- bridge_maxwait: 0
- bridge_pathcost:
- - jumbo0 75
- bridge_pathprio:
- - jumbo0 28
- bridge_stp: 'off'
- bridge_maxwait:
- - jumbo0 0
-
-
-VLAN
-~~~~
-Type ``vlan`` requires the following keys:
-
-- ``name``: Set the name of the VLAN
-- ``vlan_link``: Specify the underlying link via its ``name``.
-- ``vlan_id``: Specify the VLAN numeric id.
-
-The following optional keys are supported:
-
-**mtu**: *<MTU SizeBytes>*
-
-The MTU key represents a device's Maximum Transmission Unit, the largest size
-packet or frame, specified in octets (eight-bit bytes), that can be sent in a
-packet- or frame-based network. Specifying ``mtu`` is optional.
-
-.. note::
-
- The possible supported values of a device's MTU is not available at
- configuration time. It's possible to specify a value too large or to
- small for a device and may be ignored by the device.
-
-
-**VLAN Example**::
-
- network:
- version: 1
- config:
- # Physical interfaces.
- - type: physical
- name: eth0
- mac_address: c0:d6:9f:2c:e8:80
- # VLAN interface.
- - type: vlan
- name: eth0.101
- vlan_link: eth0
- vlan_id: 101
- mtu: 1500
-
-Nameserver
-~~~~~~~~~~
-
-Users can specify a ``nameserver`` type. Nameserver dictionaries include
-the following keys:
-
-- ``address``: List of IPv4 or IPv6 address of nameservers.
-- ``search``: List of hostnames to include in the resolv.conf search path.
-- ``interface``: Optional. Ties the nameserver definition to the specified
- interface. The value specified here must match the `name` of an interface
- defined in this config. If unspecified, this nameserver will be considered
- a global nameserver.
-
-**Nameserver Example**::
-
- network:
- version: 1
- config:
- - type: physical
- name: interface0
- mac_address: '00:11:22:33:44:55'
- subnets:
- - type: static
- address: 192.168.23.14/27
- gateway: 192.168.23.1
- - type: nameserver
- interface: interface0 # Ties nameserver to interface0 only
- address:
- - 192.168.23.2
- - 8.8.8.8
- search:
- - exemplary
-
-
-
-Route
-~~~~~
-
-Users can include static routing information as well. A ``route`` dictionary
-has the following keys:
-
-- ``destination``: IPv4 network address with CIDR netmask notation.
-- ``gateway``: IPv4 gateway address with CIDR netmask notation.
-- ``metric``: Integer which sets the network metric value for this route.
-
-**Route Example**::
-
- network:
- version: 1
- config:
- - type: physical
- name: interface0
- mac_address: '00:11:22:33:44:55'
- subnets:
- - type: static
- address: 192.168.23.14/24
- gateway: 192.168.23.1
- - type: route
- destination: 192.168.24.0/24
- gateway: 192.168.24.1
- metric: 3
-
-Subnet/IP
-~~~~~~~~~
-
-For any network device (one of the Config Types) users can define a list of
-``subnets`` which contain ip configuration dictionaries. Multiple subnet
-entries will create interface alias allowing a single interface to use
-different ip configurations.
-
-Valid keys for ``subnets`` include the following:
-
-- ``type``: Specify the subnet type.
-- ``control``: Specify manual, auto or hotplug. Indicates how the interface
- will be handled during boot.
-- ``address``: IPv4 or IPv6 address. It may include CIDR netmask notation.
-- ``netmask``: IPv4 subnet mask in dotted format or CIDR notation.
-- ``gateway``: IPv4 address of the default gateway for this subnet.
-- ``dns_nameservers``: Specify a list of IPv4 dns server IPs to end up in
- resolv.conf.
-- ``dns_search``: Specify a list of search paths to be included in
- resolv.conf.
-- ``routes``: Specify a list of routes for a given interface
-
-
-Subnet types are one of the following:
-
-- ``dhcp4``: Configure this interface with IPv4 dhcp.
-- ``dhcp``: Alias for ``dhcp4``
-- ``dhcp6``: Configure this interface with IPv6 dhcp.
-- ``static``: Configure this interface with a static IPv4.
-- ``static6``: Configure this interface with a static IPv6 .
-- ``ipv6_dhcpv6-stateful``: Configure this interface with ``dhcp6``
-- ``ipv6_dhcpv6-stateless``: Configure this interface with SLAAC and DHCP
-- ``ipv6_slaac``: Configure address with SLAAC
-
-When making use of ``dhcp`` or either of the ``ipv6_dhcpv6`` types,
-no additional configuration is needed in the subnet dictionary.
-
-Using ``ipv6_dhcpv6-stateless`` or ``ipv6_slaac`` allows the IPv6 address to be
-automatically configured with StateLess Address AutoConfiguration (`SLAAC`_).
-SLAAC requires support from the network, so verify that your cloud or network
-offering has support before trying it out. With ``ipv6_dhcpv6-stateless``,
-DHCPv6 is still used to fetch other subnet details such as gateway or DNS
-servers. If you only want to discover the address, use ``ipv6_slaac``.
-
-
-**Subnet DHCP Example**::
-
- network:
- version: 1
- config:
- - type: physical
- name: interface0
- mac_address: '00:11:22:33:44:55'
- subnets:
- - type: dhcp
-
-
-**Subnet Static Example**::
-
- network:
- version: 1
- config:
- - type: physical
- name: interface0
- mac_address: '00:11:22:33:44:55'
- subnets:
- - type: static
- address: 192.168.23.14/27
- gateway: 192.168.23.1
- dns_nameservers:
- - 192.168.23.2
- - 8.8.8.8
- dns_search:
- - exemplary.maas
-
-The following will result in an ``interface0`` using DHCP and ``interface0:1``
-using the static subnet configuration.
-
-**Multiple subnet Example**::
-
- network:
- version: 1
- config:
- - type: physical
- name: interface0
- mac_address: '00:11:22:33:44:55'
- subnets:
- - type: dhcp
- - type: static
- address: 192.168.23.14/27
- gateway: 192.168.23.1
- dns_nameservers:
- - 192.168.23.2
- - 8.8.8.8
- dns_search:
- - exemplary
-
-**Subnet with routes Example**::
-
- network:
- version: 1
- config:
- - type: physical
- name: interface0
- mac_address: '00:11:22:33:44:55'
- subnets:
- - type: dhcp
- - type: static
- address: 10.184.225.122
- netmask: 255.255.255.252
- routes:
- - gateway: 10.184.225.121
- netmask: 255.240.0.0
- network: 10.176.0.0
- - gateway: 10.184.225.121
- netmask: 255.240.0.0
- network: 10.208.0.0
-
-
-Multi-layered configurations
-----------------------------
-
-Complex networking sometimes uses layers of configuration. The syntax allows
-users to build those layers one at a time. All of the virtual network devices
-supported allow specifying an underlying device by their ``name`` value.
-
-**Bonded VLAN Example**::
-
- network:
- version: 1
- config:
- # 10G pair
- - type: physical
- name: gbe0
- mac_address: cd:11:22:33:44:00
- - type: physical
- name: gbe1
- mac_address: cd:11:22:33:44:02
- # Bond.
- - type: bond
- name: bond0
- bond_interfaces:
- - gbe0
- - gbe1
- params:
- bond-mode: 802.3ad
- bond-lacp-rate: fast
- # A Bond VLAN.
- - type: vlan
- name: bond0.200
- vlan_link: bond0
- vlan_id: 200
- subnets:
- - type: dhcp4
-
-More Examples
--------------
-Some more examples to explore the various options available.
-
-**Multiple VLAN example**::
-
- network:
- version: 1
- config:
- - id: eth0
- mac_address: d4:be:d9:a8:49:13
- mtu: 1500
- name: eth0
- subnets:
- - address: 10.245.168.16/21
- dns_nameservers:
- - 10.245.168.2
- gateway: 10.245.168.1
- type: static
- type: physical
- - id: eth1
- mac_address: d4:be:d9:a8:49:15
- mtu: 1500
- name: eth1
- subnets:
- - address: 10.245.188.2/24
- dns_nameservers: []
- type: static
- type: physical
- - id: eth1.2667
- mtu: 1500
- name: eth1.2667
- subnets:
- - address: 10.245.184.2/24
- dns_nameservers: []
- type: static
- type: vlan
- vlan_id: 2667
- vlan_link: eth1
- - id: eth1.2668
- mtu: 1500
- name: eth1.2668
- subnets:
- - address: 10.245.185.1/24
- dns_nameservers: []
- type: static
- type: vlan
- vlan_id: 2668
- vlan_link: eth1
- - id: eth1.2669
- mtu: 1500
- name: eth1.2669
- subnets:
- - address: 10.245.186.1/24
- dns_nameservers: []
- type: static
- type: vlan
- vlan_id: 2669
- vlan_link: eth1
- - id: eth1.2670
- mtu: 1500
- name: eth1.2670
- subnets:
- - address: 10.245.187.2/24
- dns_nameservers: []
- type: static
- type: vlan
- vlan_id: 2670
- vlan_link: eth1
- - address: 10.245.168.2
- search:
- - dellstack
- type: nameserver
-
-.. _SLAAC: https://tools.ietf.org/html/rfc4862
-
-.. vi: textwidth=79
diff --git a/doc/rtd/topics/network-config.rst b/doc/rtd/topics/network-config.rst
deleted file mode 100644
index b6d7a9be..00000000
--- a/doc/rtd/topics/network-config.rst
+++ /dev/null
@@ -1,326 +0,0 @@
-.. _network_config:
-
-*********************
-Network Configuration
-*********************
-
-- Default Behavior
-- Disabling Network Configuration
-- Fallback Networking
-- Network Configuration Sources
-- Network Configuration Outputs
-- Network Output Policy
-- Network Configuration Tools
-- Examples
-
-.. _default_behavior:
-
-Default Behavior
-================
-
-`Cloud-init`_ 's searches for network configuration in order of increasing
-precedence; each item overriding the previous.
-
-**Datasource**
-
-For example, OpenStack may provide network config in the MetaData Service.
-
-**System Config**
-
-A ``network:`` entry in ``/etc/cloud/cloud.cfg.d/*`` configuration files.
-
-**Kernel Command Line**
-
-``ip=`` or ``network-config=<Base64 encoded YAML config string>``
-
-User-data cannot change an instance's network configuration. In the absence
-of network configuration in any of the above sources , `Cloud-init`_ will
-write out a network configuration that will issue a DHCP request on a "first"
-network interface.
-
-.. note::
-
- The network-config value is expected to be a Base64 encoded YAML string in
- :ref:`network_config_v1` or :ref:`network_config_v2` format. Optionally it
- can be compressed with ``gzip`` prior to Base64 encoding.
-
-
-Disabling Network Configuration
-===============================
-
-Users may disable `Cloud-init`_ 's network configuration capability and rely
-on other methods, such as embedded configuration or other customizations.
-
-`Cloud-init`_ supports the following methods for disabling cloud-init.
-
-
-**Kernel Command Line**
-
-`Cloud-init`_ will check additionally check for the parameter
-``network-config=disabled`` which will automatically disable any network
-configuration.
-
-Example disabling kernel command line entry: ::
-
- network-config=disabled
-
-
-**cloud config**
-
-In the combined cloud-init configuration dictionary, merged from
-``/etc/cloud/cloud.cfg`` and ``/etc/cloud/cloud.cfg.d/*``::
-
- network:
- config: disabled
-
-If `Cloud-init`_ 's networking config has not been disabled, and
-no other network information is found, then it will proceed
-to generate a fallback networking configuration.
-
-Disabling Network Activation
-----------------------------
-
-Some datasources may not be initialized until after network has been brought
-up. In this case, cloud-init will attempt to bring up the interfaces specified
-by the datasource metadata using a network activator discovered by
-`cloudinit.net.activators.select_activators`_.
-
-This behavior can be disabled in the cloud-init configuration dictionary,
-merged from ``/etc/cloud/cloud.cfg`` and ``/etc/cloud/cloud.cfg.d/*``::
-
- disable_network_activation: true
-
-Fallback Network Configuration
-==============================
-
-`Cloud-init`_ will attempt to determine which of any attached network devices
-is most likely to have a connection and then generate a network
-configuration to issue a DHCP request on that interface.
-
-`Cloud-init`_ runs during early boot and does not expect composed network
-devices (such as Bridges) to be available. `Cloud-init`_ does not consider
-the following interface devices as likely 'first' network interfaces for
-fallback configuration; they are filtered out from being selected.
-
-- **loopback**: ``name=lo``
-- **Virtual Ethernet**: ``name=veth*``
-- **Software Bridges**: ``type=bridge``
-- **Software VLANs**: ``type=vlan``
-
-
-`Cloud-init`_ will prefer network interfaces that indicate they are connected
-via the Linux ``carrier`` flag being set. If no interfaces are marked
-connected, then all unfiltered interfaces are potential connections.
-
-Of the potential interfaces, `Cloud-init`_ will attempt to pick the "right"
-interface given the information it has available.
-
-Finally after selecting the "right" interface, a configuration is
-generated and applied to the system.
-
-.. note::
-
- PhotonOS disables fallback networking configuration by default leaving
- network unrendered when no other network config is provided.
- If fallback config is still desired on PhotonOS, it can be enabled by
- providing `disable_fallback_netcfg: false` in
- `/etc/cloud/cloud.cfg:sys_config` settings.
-
-Network Configuration Sources
-=============================
-
-`Cloud-init`_ accepts a number of different network configuration formats in
-support of different cloud substrates. The Datasource for these clouds in
-`Cloud-init`_ will detect and consume Datasource-specific network
-configuration formats for use when writing an instance's network
-configuration.
-
-The following Datasources optionally provide network configuration:
-
-- :ref:`datasource_config_drive`
-
- - `OpenStack Metadata Service Network`_
- - :ref:`network_config_eni`
-
-- :ref:`datasource_digital_ocean`
-
- - `DigitalOcean JSON metadata`_
-
-- :ref:`datasource_nocloud`
-
- - :ref:`network_config_v1`
- - :ref:`network_config_v2`
- - :ref:`network_config_eni`
-
-- :ref:`datasource_opennebula`
-
- - :ref:`network_config_eni`
-
-- :ref:`datasource_openstack`
-
- - :ref:`network_config_eni`
- - `OpenStack Metadata Service Network`_
-
-- :ref:`datasource_smartos`
-
- - `SmartOS JSON Metadata`_
-
-- :ref:`datasource_upcloud`
-
- - `UpCloud JSON metadata`_
-
-- :ref:`datasource_vultr`
-
- - `Vultr JSON metadata`_
-
-For more information on network configuration formats
-
-.. toctree::
- :maxdepth: 1
-
- network-config-format-eni.rst
- network-config-format-v1.rst
- network-config-format-v2.rst
-
-
-Network Configuration Outputs
-=============================
-
-`Cloud-init`_ converts various forms of user supplied or automatically
-generated configuration into an internal network configuration state. From
-this state `Cloud-init`_ delegates rendering of the configuration to Distro
-supported formats. The following ``renderers`` are supported in cloud-init:
-
-- **NetworkManager**
-
-`NetworkManager <https://networkmanager.dev>`_ is the standard Linux network
-configuration tool suite. It supports a wide range of networking setups.
-Configuration is typically stored in ``/etc/NetworkManager``.
-
-It is the default for a number of Linux distributions, notably Fedora;
-CentOS/RHEL; and derivatives.
-
-- **ENI**
-
-/etc/network/interfaces or ``ENI`` is supported by the ``ifupdown`` package
-found in Alpine Linux, Debian and Ubuntu.
-
-- **Netplan**
-
-Introduced in Ubuntu 16.10 (Yakkety Yak), `netplan <https://netplan.io/>`_ has
-been the default network configuration tool in Ubuntu since 17.10 (Artful
-Aardvark). netplan consumes :ref:`network_config_v2` input and renders
-network configuration for supported backends such as ``systemd-networkd`` and
-``NetworkManager``.
-
-- **Sysconfig**
-
-Sysconfig format is used by RHEL, CentOS, Fedora and other derivatives.
-
-
-- **NetBSD, OpenBSD, FreeBSD**
-
-Network renders supporting BSD releases which typically write configuration to
-``/etc/rc.conf``. Unique to BSD renderers is that each renderer also calls
-something akin to `FreeBSD.start_services`_ which will invoke applicable
-network services to setup the network, making network activators unneeded
-for BSD flavors at the moment.
-
-
-Network Output Policy
-=====================
-
-The default policy for selecting a network ``renderer`` in order of preference
-is as follows:
-
-- ENI
-- Sysconfig
-- Netplan
-- NetworkManager
-- FreeBSD
-- NetBSD
-- OpenBSD
-- Networkd
-
-The default policy for selecting a network ``activator`` in order of preference
-is as follows:
-- ENI: using `ifup`, `ifdown` to manage device setup/teardown
-- Netplan: using `netplan apply` to manage device setup/teardown
-- NetworkManager: using `nmcli` to manage device setup/teardown
-- Networkd: using `ip` to manage device setup/teardown
-
-
-When applying the policy, `Cloud-init`_ checks if the current instance has the
-correct binaries and paths to support the renderer. The first renderer that
-can be used is selected. Users may override the network renderer policy by
-supplying an updated configuration in cloud-config. ::
-
- system_info:
- network:
- renderers: ['netplan', 'network-manager', 'eni', 'sysconfig', 'freebsd', 'netbsd', 'openbsd']
- activators: ['eni', 'netplan', 'network-manager', 'networkd']
-
-
-Network Configuration Tools
-===========================
-
-`Cloud-init`_ contains one tool used to test input/output conversion between
-formats. The ``tools/net-convert.py`` in the `Cloud-init`_ source repository
-is helpful for examining expected output for a given input format.
-
-CLI Interface :
-
-.. code-block:: shell-session
-
- % tools/net-convert.py --help
- usage: net-convert.py [-h] --network-data PATH --kind
- {eni,network_data.json,yaml} -d PATH [-m name,mac]
- --output-kind {eni,netplan,sysconfig}
-
- optional arguments:
- -h, --help show this help message and exit
- --network-data PATH, -p PATH
- --kind {eni,network_data.json,yaml}, -k {eni,network_data.json,yaml}
- -d PATH, --directory PATH
- directory to place output in
- -m name,mac, --mac name,mac
- interface name to mac mapping
- --output-kind {eni,netplan,sysconfig}, -ok {eni,netplan,sysconfig}
-
-
-Example output converting V2 to sysconfig:
-
-.. code-block:: shell-session
-
- % tools/net-convert.py --network-data v2.yaml --kind yaml \
- --output-kind sysconfig -d target
- % cat target/etc/sysconfig/network-scripts/ifcfg-eth*
- # Created by cloud-init on instance boot automatically, do not edit.
- #
- BOOTPROTO=static
- DEVICE=eth7
- IPADDR=192.168.1.5/255.255.255.0
- NM_CONTROLLED=no
- ONBOOT=yes
- TYPE=Ethernet
- USERCTL=no
- # Created by cloud-init on instance boot automatically, do not edit.
- #
- BOOTPROTO=dhcp
- DEVICE=eth9
- NM_CONTROLLED=no
- ONBOOT=yes
- TYPE=Ethernet
- USERCTL=no
-
-
-.. _Cloud-init: https://launchpad.net/cloud-init
-.. _DigitalOcean JSON metadata: https://developers.digitalocean.com/documentation/metadata/
-.. _OpenStack Metadata Service Network: https://specs.openstack.org/openstack/nova-specs/specs/liberty/implemented/metadata-service-network-info.html
-.. _SmartOS JSON Metadata: https://eng.joyent.com/mdata/datadict.html
-.. _UpCloud JSON metadata: https://developers.upcloud.com/1.3/8-servers/#metadata-service
-.. _Vultr JSON metadata: https://www.vultr.com/metadata/
-.. _cloudinit.net.activators.select_activators: https://github.com/canonical/cloud-init/blob/main/cloudinit/net/activators.py#L279
-.. _FreeBSD.start_services: https://github.com/canonical/cloud-init/blob/main/cloudinit/net/freebsd.py#L28
-
-.. vi: textwidth=79
diff --git a/doc/rtd/topics/tutorial.rst b/doc/rtd/topics/tutorial.rst
deleted file mode 100644
index e8bed272..00000000
--- a/doc/rtd/topics/tutorial.rst
+++ /dev/null
@@ -1,141 +0,0 @@
-.. _lxd_tutorial:
-
-Tutorial
-********
-
-In this tutorial, we will create our first cloud-init user data script
-and deploy it into an LXD container. We'll be using LXD_ for this tutorial
-because it provides first class support for cloud-init user data as well as
-systemd support. Because it is container based, it allows for quick
-testing and iterating on our user data definition.
-
-Setup LXD
-=========
-
-Skip this section if you already have LXD_ setup.
-
-Install LXD
------------
-
-.. code-block:: shell-session
-
- $ sudo snap install lxd
-
-If you don't have snap, you can install LXD using one of the
-`other installation options`_.
-
-Initialize LXD
---------------
-
-.. code-block:: shell-session
-
- $ lxd init --minimal
-
-The minimal configuration should work fine for our purposes. It can always
-be changed at a later time if needed.
-
-Define our user data
-====================
-
-Now that LXD is setup, we can define our user data. Create the
-following file on your local filesystem at ``/tmp/my-user-data``:
-
-.. code-block:: yaml
-
- #cloud-config
- runcmd:
- - echo 'Hello, World!' > /var/tmp/hello-world.txt
-
-Here we are defining our cloud-init user data in the
-:ref:`cloud-config<topics/format:Cloud Config Data>` format, using the
-`runcmd`_ module to define a command to run. When applied, it
-should write ``Hello, World!`` to ``/var/tmp/hello-world.txt``.
-
-Launch a container with our user data
-=====================================
-
-Now that we have LXD setup and our user data defined, we can launch an
-instance with our user data:
-
-.. code-block:: shell-session
-
- $ lxc launch ubuntu:focal my-test --config=user.user-data="$(cat /tmp/my-user-data)"
-
-Verify that cloud-init ran successfully
-=======================================
-
-After launching the container, we should be able to connect
-to our instance using
-
-.. code-block:: shell-session
-
- $ lxc shell my-test
-
-You should now be in a shell inside the LXD instance.
-Before validating the user data, let's wait for cloud-init to complete
-successfully:
-
-.. code-block:: shell-session
-
- $ cloud-init status --wait
- .....
- cloud-init status: done
- $
-
-We can now verify that cloud-init received the expected user data:
-
-.. code-block:: shell-session
-
- $ cloud-init query userdata
- #cloud-config
- runcmd:
- - echo 'Hello, World!' > /var/tmp/hello-world.txt
-
-We can also assert the user data we provided is a valid cloud-config:
-
-.. code-block:: shell-session
-
- $ cloud-init schema --system --annotate
- Valid cloud-config: system userdata
- $
-
-Finally, verify that our user data was applied successfully:
-
-.. code-block:: shell-session
-
- $ cat /var/tmp/hello-world.txt
- Hello, World!
- $
-
-We can see that cloud-init has consumed our user data successfully!
-
-Tear down
-=========
-
-Exit the container shell (i.e., using ``exit`` or ctrl-d). Once we have
-exited the container, we can stop the container using:
-
-.. code-block:: shell-session
-
- $ lxc stop my-test
-
-and we can remove the container using:
-
-.. code-block:: shell-session
-
- $ lxc rm my-test
-
-What's next?
-============
-
-In this tutorial, we used the runcmd_ module to execute a shell command.
-The full list of modules available can be found in
-:ref:`modules documentation<modules>`.
-Each module contains examples of how to use it.
-
-You can also head over to the :ref:`examples<yaml_examples>` page for
-examples of more common use cases.
-
-.. _LXD: https://linuxcontainers.org/lxd/
-.. _other installation options: https://linuxcontainers.org/lxd/getting-started-cli/#other-installation-options
-.. _runcmd: https://cloudinit.readthedocs.io/en/latest/topics/modules.html#runcmd
diff --git a/doc/rtd/topics/vendordata.rst b/doc/rtd/topics/vendordata.rst
deleted file mode 100644
index 6ef6b74b..00000000
--- a/doc/rtd/topics/vendordata.rst
+++ /dev/null
@@ -1,73 +0,0 @@
-.. _vendordata:
-
-***********
-Vendor Data
-***********
-
-Overview
-========
-
-Vendordata is data provided by the entity that launches an instance
-(for example, the cloud provider). This data can be used to
-customize the image to fit into the particular environment it is
-being run in.
-
-Vendordata follows the same rules as user-data, with the following
-caveats:
-
- 1. Users have ultimate control over vendordata. They can disable its
- execution or disable handling of specific parts of multipart input.
- 2. By default it only runs on first boot
- 3. Vendordata can be disabled by the user. If the use of vendordata is
- required for the instance to run, then vendordata should not be used.
- 4. user supplied cloud-config is merged over cloud-config from vendordata.
-
-Users providing cloud-config data can use the '#cloud-config-jsonp' method to
-more finely control their modifications to the vendor supplied cloud-config.
-For example, if both vendor and user have provided 'runcmd' then the default
-merge handler will cause the user's runcmd to override the one provided by the
-vendor. To append to 'runcmd', the user could better provide multipart input
-with a cloud-config-jsonp part like:
-
-.. code:: yaml
-
- #cloud-config-jsonp
- [{ "op": "add", "path": "/runcmd", "value": ["my", "command", "here"]}]
-
-Further, we strongly advise vendors to not 'be evil'. By evil, we
-mean any action that could compromise a system. Since users trust
-you, please take care to make sure that any vendordata is safe,
-atomic, idempotent and does not put your users at risk.
-
-Input Formats
-=============
-
-cloud-init will download and cache to filesystem any vendor-data that it
-finds. Vendordata is handled exactly like user-data. That means that the
-vendor can supply multipart input and have those parts acted on in the same
-way as user-data.
-
-The only differences are:
-
- * vendor-data-defined scripts are stored in a different location than
- user-data-defined scripts (to avoid namespace collision)
- * user can disable part handlers by cloud-config settings.
- For example, to disable handling of 'part-handlers' in vendor-data,
- the user could provide user-data like this:
-
- .. code:: yaml
-
- #cloud-config
- vendordata: {excluded: 'text/part-handler'}
-
-Examples
-========
-There are examples in the examples subdirectory.
-
-Additionally, the 'tools' directory contains 'write-mime-multipart',
-which can be used to easily generate mime-multi-part files from a list
-of input files. That data can then be given to an instance.
-
-See 'write-mime-multipart --help' for usage.
-
-.. vi: textwidth=79
diff --git a/doc/rtd/tutorial/index.rst b/doc/rtd/tutorial/index.rst
new file mode 100644
index 00000000..392d2465
--- /dev/null
+++ b/doc/rtd/tutorial/index.rst
@@ -0,0 +1,37 @@
+.. _tutorial_index:
+
+Tutorials
+*********
+
+This section contains step-by-step tutorials to help you get started with
+``cloud-init``. We hope our tutorials make as few assumptions as possible and
+are accessible to anyone with an interest in ``cloud-init``. They should be a
+great place to start learning about ``cloud-init``, how it works, and what it's
+capable of.
+
+-----
+
+Core tutorial
+=============
+
+This tutorial, which we recommend if you are completely new to ``cloudinit``,
+uses the QEMU emulator to introduce you to all of the key concepts, tools,
+processes and operations that you will need to get started.
+
+.. toctree::
+ :maxdepth: 1
+
+ qemu.rst
+
+Quick-start tutorial
+====================
+
+This tutorial is recommended if you have some familiarity with ``cloud-init``
+or the concepts around it, and are looking to get started as quickly as
+possible. Here, you will use an LXD container to deploy a ``cloud-init``
+user data script.
+
+.. toctree::
+ :maxdepth: 1
+
+ lxd.rst
diff --git a/doc/rtd/tutorial/lxd.rst b/doc/rtd/tutorial/lxd.rst
new file mode 100644
index 00000000..33866a21
--- /dev/null
+++ b/doc/rtd/tutorial/lxd.rst
@@ -0,0 +1,175 @@
+.. _tutorial_lxd:
+
+Quick-start tutorial with LXD
+*****************************
+
+In this tutorial, we will create our first ``cloud-init`` user data script
+and deploy it into an `LXD`_ container.
+
+Why LXD?
+========
+
+We'll be using LXD for this tutorial because it provides first class support
+for ``cloud-init`` user data, as well as ``systemd`` support. Because it is
+container based, it allows us to quickly test and iterate upon our user data
+definition.
+
+How to use this tutorial
+========================
+
+In this tutorial, the commands in each code block can be copied and pasted
+directly into the terminal. Omit the prompt (``$``) before each command, or
+use the "copy code" button on the right-hand side of the block, which will copy
+the command for you without the prompt.
+
+Each code block is preceded by a description of what the command does, and
+followed by an example of the type of output you should expect to see.
+
+Install and initialise LXD
+==========================
+
+If you already have LXD set up, you can skip this section. Otherwise, let's
+install LXD:
+
+.. code-block:: shell-session
+
+ $ sudo snap install lxd
+
+If you don't have snap, you can install LXD using one of the
+`other installation options`_.
+
+Now we need to initialise LXD. The minimal configuration will be enough for
+the purposes of this tutorial. If you need to, you can always change the
+configuration at a later time.
+
+.. code-block:: shell-session
+
+ $ lxd init --minimal
+
+Define our user data
+====================
+
+Now that LXD is set up, we can define our user data. Create the
+following file on your local filesystem at :file:`/tmp/my-user-data`:
+
+.. code-block:: yaml
+
+ #cloud-config
+ runcmd:
+ - echo 'Hello, World!' > /var/tmp/hello-world.txt
+
+Here, we are defining our ``cloud-init`` user data in the
+:ref:`#cloud-config<user_data_formats>` format, using the
+:ref:`runcmd module <mod-runcmd>` to define a command to run. When applied, it
+will write ``Hello, World!`` to :file:`/var/tmp/hello-world.txt` (as we shall
+see later!).
+
+Launch a LXD container with our user data
+=========================================
+
+Now that we have LXD set up and our user data defined, we can launch an
+instance with our user data:
+
+.. code-block:: shell-session
+
+ $ lxc launch ubuntu:focal my-test --config=user.user-data="$(cat /tmp/my-user-data)"
+
+Verify that ``cloud-init`` ran successfully
+-------------------------------------------
+
+After launching the container, we should be able to connect to our instance
+using:
+
+.. code-block:: shell-session
+
+ $ lxc shell my-test
+
+You should now be in a shell inside the LXD instance.
+
+Before validating the user data, let's wait for ``cloud-init`` to complete
+successfully:
+
+.. code-block:: shell-session
+
+ $ cloud-init status --wait
+
+Which provides the following output:
+
+.. code-block::
+
+ status: done
+
+Verify our user data
+--------------------
+
+Now we know that ``cloud-init`` has been successfully run, we can verify that
+it received the expected user data we provided earlier:
+
+.. code-block:: shell-session
+
+ $ cloud-init query userdata
+
+Which should print the following to the terminal window:
+
+.. code-block::
+
+ #cloud-config
+ runcmd:
+ - echo 'Hello, World!' > /var/tmp/hello-world.txt
+
+We can also assert the user data we provided is a valid cloud-config:
+
+.. code-block:: shell-session
+
+ $ cloud-init schema --system --annotate
+
+Which should print the following:
+
+.. code-block::
+
+ Valid cloud-config: system userdata
+
+Finally, let us verify that our user data was applied successfully:
+
+.. code-block:: shell-session
+
+ $ cat /var/tmp/hello-world.txt
+
+Which should then print:
+
+.. code-block::
+
+ Hello, World!
+
+We can see that ``cloud-init`` has received and consumed our user data
+successfully!
+
+Tear down
+=========
+
+Exit the container shell (by typing :command:`exit` or pressing :kbd:`ctrl-d`).
+Once we have exited the container, we can stop the container using:
+
+.. code-block:: shell-session
+
+ $ lxc stop my-test
+
+We can then remove the container completely using:
+
+.. code-block:: shell-session
+
+ $ lxc rm my-test
+
+What's next?
+============
+
+In this tutorial, we used the :ref:`runcmd module <mod-runcmd>` to execute a
+shell command. The full list of modules available can be found in our
+:ref:`modules documentation<modules>`.
+Each module contains examples of how to use it.
+
+You can also head over to the :ref:`examples page<yaml_examples>` for
+examples of more common use cases.
+
+.. _LXD: https://linuxcontainers.org/lxd/
+.. _other installation options: https://linuxcontainers.org/lxd/getting-started-cli/#other-installation-options
diff --git a/doc/rtd/tutorial/qemu-debugging.rst b/doc/rtd/tutorial/qemu-debugging.rst
new file mode 100644
index 00000000..08d4c4c5
--- /dev/null
+++ b/doc/rtd/tutorial/qemu-debugging.rst
@@ -0,0 +1,41 @@
+.. _qemu_debug_info:
+
+QEMU tutorial debugging
+***********************
+
+You may wish to test out the commands in this tutorial as a
+:download:`script<qemu-script.sh>` to check for copy-paste mistakes.
+
+If you successfully launched the virtual machine, but couldn't log in,
+there are a few places to check to debug your setup.
+
+To debug, answer the following questions:
+
+Did ``cloud-init`` discover the IMDS webserver?
+===============================================
+
+The webserver should print a message in the terminal for each request it
+receives. If it didn't print out any messages when the virtual machine booted,
+then ``cloud-init`` was unable to obtain the config. Make sure that the
+webserver can be locally accessed using :command:`curl` or :command:`wget`.
+
+.. code-block:: sh
+
+ $ curl 0.0.0.0:8000/user-data
+ $ curl 0.0.0.0:8000/meta-data
+ $ curl 0.0.0.0:8000/vendor-data
+
+Did the IMDS webserver serve the expected files?
+================================================
+
+If the webserver prints out ``404 errors`` when launching QEMU, then check
+that you started the server in the temp directory.
+
+Were the configurations inside the file correct?
+================================================
+
+When launching QEMU, if the webserver shows that it succeeded in serving
+:file:`user-data`, :file:`meta-data` and :file:`vendor-data`, but you cannot
+log in, then you may have provided incorrect cloud-config files. If you can
+mount a copy of the virtual machine's filesystem locally to inspect the logs,
+it should be possible to get clues about what went wrong.
diff --git a/doc/rtd/tutorial/qemu-script.sh b/doc/rtd/tutorial/qemu-script.sh
new file mode 100755
index 00000000..19a2cf85
--- /dev/null
+++ b/doc/rtd/tutorial/qemu-script.sh
@@ -0,0 +1,47 @@
+#!/bin/bash
+
+TEMP_DIR=temp
+IMAGE_URL="https://cloud-images.ubuntu.com/jammy/current/jammy-server-cloudimg-amd64.img"
+
+# setup
+mkdir "$TEMP_DIR" && cd "$TEMP_DIR" || {
+ echo "Error: Failed to create directory [$TEMP_DIR], aborting early"
+ exit 1
+}
+
+wget "$IMAGE_URL"
+
+# Create user-data, vendor-data, meta-data
+cat << EOF > user-data
+#cloud-config
+password: password
+chpasswd:
+ expire: False
+EOF
+
+cat << EOF > meta-data
+instance-id: someid/somehostname
+local-hostname: jammy
+EOF
+
+touch vendor-data
+
+# start ad hoc imds webserver
+python3 -m http.server --directory . &
+
+# start an instance of your image in a virtual machine
+qemu-system-x86_64 \
+ -net nic \
+ -net user \
+ -machine accel=kvm:tcg \
+ -cpu host \
+ -m 512 \
+ -nographic \
+ -hda jammy-server-cloudimg-amd64.img \
+ -smbios type=1,serial=ds='nocloud-net;s=http://10.0.2.2:8000/'
+
+echo -e "\nTo reuse the image and config files, start the python webserver and "
+echo -e "virtual machine from $(pwd), which contains these files:\n$(ls -1)\n"
+
+# end the python server on exit
+trap "trap - SIGTERM && kill -- -$$" EXIT
diff --git a/doc/rtd/tutorial/qemu.rst b/doc/rtd/tutorial/qemu.rst
new file mode 100644
index 00000000..4370051a
--- /dev/null
+++ b/doc/rtd/tutorial/qemu.rst
@@ -0,0 +1,292 @@
+.. _tutorial_qemu:
+
+Core tutorial with QEMU
+***********************
+
+.. toctree::
+ :titlesonly:
+ :hidden:
+
+ qemu-debugging.rst
+
+In this tutorial, we will launch an Ubuntu cloud image in a virtual machine
+that uses ``cloud-init`` to pre-configure the system during boot.
+
+The goal of this tutorial is to provide a minimal demonstration of
+``cloud-init``, which you can then use as a development environment to test
+your ``cloud-init`` configurations locally before launching to the cloud.
+
+Why QEMU?
+=========
+
+`QEMU`_ is a cross-platform emulator capable of running performant virtual
+machines. QEMU is used at the core of a broad range of production operating
+system deployments and open source software projects (including libvirt, LXD,
+and vagrant) and is capable of running Windows, Linux, and Unix guest operating
+systems. While QEMU is flexibile and feature-rich, we are using it because of
+the broad support it has due to its broad adoption and ability to run on
+\*nix-derived operating systems.
+
+How to use this tutorial
+========================
+
+In this tutorial, the commands in each code block can be copied and pasted
+directly into the terminal. Omit the prompt (``$``) before each command, or
+use the "copy code" button on the right-hand side of the block, which will copy
+the command for you without the prompt.
+
+Each code block is preceded by a description of what the command does, and
+followed by an example of the type of output you should expect to see.
+
+Install QEMU
+============
+
+.. code-block:: sh
+
+ $ sudo apt install qemu-system-x86
+
+If you are not using Ubuntu, you can visit QEMU's `install instructions`_ for
+additional information.
+
+Create a temporary directory
+============================
+
+This directory will store our cloud image and configuration files for
+:ref:`user data<user_data_formats>`, :ref:`metadata<instance_metadata>`, and
+:ref:`vendor data<vendordata>`.
+
+You should run all commands from this temporary directory. If you run the
+commands from anywhere else, your virtual machine will not be configured.
+
+Let's create a temporary directory and make it our current working directory
+with :command:`cd`:
+
+.. code-block:: sh
+
+ $ mkdir temp
+ $ cd temp
+
+Download a cloud image
+======================
+
+Cloud images typically come with ``cloud-init`` pre-installed and configured to
+run on first boot. You will not need to worry about installing ``cloud-init``
+for now, since we are not manually creating our own image in this tutorial.
+
+In our case, we want to select the latest Ubuntu LTS_. Let's download the
+server image using :command:`wget`:
+
+.. code-block:: sh
+
+ $ wget https://cloud-images.ubuntu.com/jammy/current/jammy-server-cloudimg-amd64.img
+
+Define our user data
+====================
+
+Now we need to create our :file:`user-data` file. This user data cloud-config
+sets the password of the default user, and sets that password to never expire.
+For more details you can refer to the
+:ref:`Set Passwords module page<mod-set_passwords>`.
+
+Run the following command, which creates a file named :file:`user-data`
+containing our configuration data.
+
+.. code-block:: sh
+
+ $ cat << EOF > user-data
+ #cloud-config
+ password: password
+ chpasswd:
+ expire: False
+
+ EOF
+
+What is user data?
+==================
+
+Before moving forward, let's inspect our :file:`user-data` file.
+
+.. code-block:: sh
+
+ $ cat user-data
+
+You should see the following contents:
+
+.. code-block:: yaml
+
+ #cloud-config
+ password: password
+ chpasswd:
+ expire: False
+
+The first line starts with ``#cloud-config``, which tells ``cloud-init``
+what type of user data is in the config. Cloud-config is a YAML-based
+configuration type that tells ``cloud-init`` how to configure the virtual
+machine instance. Multiple different format types are supported by
+``cloud-init``. For more information, see the
+:ref:`documentation describing different formats<user_data_formats>`.
+
+The second line, ``password: password``, as per
+:ref:`the Users and Groups module docs<mod-users_groups>`, sets the default
+user's password to ``password``.
+
+The third and fourth lines direct ``cloud-init`` to not require a password
+reset on first login.
+
+Define our metadata
+===================
+
+Now let's run the following command, which creates a file named
+:file:`meta-data` containing configuration data.
+
+.. code-block:: sh
+
+ $ cat << EOF > meta-data
+ instance-id: someid/somehostname
+ local-hostname: jammy
+
+ EOF
+
+Define our vendor data
+======================
+
+Now we will create the empty file :file:`vendor-data` in our temporary
+directory. This will speed up the retry wait time.
+
+.. code-block:: sh
+
+ $ touch vendor-data
+
+
+Start an ad hoc IMDS webserver
+==============================
+
+Open up a second terminal window, change to your temporary directory and then
+start the built-in Python webserver:
+
+.. code-block:: sh
+
+ $ cd temp
+ $ python3 -m http.server --directory .
+
+What is an IMDS?
+----------------
+
+Instance Metadata Service (IMDS) is a service provided by most cloud providers
+as a means of providing information to virtual machine instances. This service
+is used by cloud providers to expose information to a virtual machine. This
+service is used for many different things, and is the primary mechanism for
+some clouds to expose ``cloud-init`` configuration data to the instance.
+
+How does ``cloud-init`` use the IMDS?
+-------------------------------------
+
+The IMDS uses a private http webserver to provide metadata to each operating
+system instance. During early boot, ``cloud-init`` sets up network access and
+queries this webserver to gather configuration data. This allows ``cloud-init``
+to configure your operating system while it boots.
+
+In this tutorial we are emulating this workflow using QEMU and a simple Python
+webserver. This workflow is suitable for developing and testing
+``cloud-init`` configurations prior to cloud deployments.
+
+Launch a virtual machine with our user data
+===========================================
+
+Switch back to your original terminal, and run the following command so we can
+launch our virtual machine. By default, QEMU will print the kernel logs and
+``systemd`` logs to the terminal while the operating system boots. This may
+take a few moments to complete.
+
+.. code-block:: sh
+
+ $ qemu-system-x86_64 \
+ -net nic \
+ -net user \
+ -machine accel=kvm:tcg \
+ -cpu host \
+ -m 512 \
+ -nographic \
+ -hda jammy-server-cloudimg-amd64.img \
+ -smbios type=1,serial=ds='nocloud-net;s=http://10.0.2.2:8000/'
+
+.. note::
+ If the output stopped scrolling but you don't see a prompt yet, press
+ :kbd:`Enter` to get to the login prompt.
+
+How is QEMU configured for ``cloud-init``?
+------------------------------------------
+
+When launching QEMU, our machine configuration is specified on the command
+line. Many things may be configured: memory size, graphical output, networking
+information, hard drives and more.
+
+Let us examine the final two lines of our previous command. The first of them,
+:command:`-hda jammy-server-cloudimg-amd64.img`, tells QEMU to use the cloud
+image as a virtual hard drive. This will cause the virtual machine to
+boot Ubuntu, which already has ``cloud-init`` installed.
+
+The second line tells ``cloud-init`` where it can find user data, using the
+:ref:`NoCloud datasource<datasource_nocloud>`. During boot, ``cloud-init``
+checks the ``SMBIOS`` serial number for ``ds=nocloud-net``. If found,
+``cloud-init`` will use the specified URL to source its user data config files.
+
+In this case, we use the default gateway of the virtual machine (``10.0.2.2``)
+and default port number of the Python webserver (``8000``), so that
+``cloud-init`` will, inside the virtual machine, query the server running on
+host.
+
+Verify that ``cloud-init`` ran successfully
+===========================================
+
+After launching the virtual machine, we should be able to connect to our
+instance using the default distro username.
+
+In this case the default username is ``ubuntu`` and the password we configured
+is ``password``.
+
+If you can log in using the configured password, it worked!
+
+If you couldn't log in, see
+:ref:`this page for debug information<qemu_debug_info>`.
+
+Check ``cloud-init`` status
+===========================
+
+Run the following command, which will allow us to check if ``cloud-init`` has
+finished running:
+
+.. code-block:: sh
+
+ $ cloud-init status --wait
+
+If you see ``status: done`` in the output, it succeeded!
+
+If you see a failed status, you'll want to check
+:file:`/var/log/cloud-init.log` for warning/error messages.
+
+Tear down
+=========
+
+In our main terminal, let's exit the QEMU shell using :kbd:`ctrl-a x` (that's
+:kbd:`ctrl` and :kbd:`a` simultaneously, followed by :kbd:`x`).
+
+In the second terminal, where the Python webserver is running, we can stop the
+server using (:kbd:`ctrl-c`).
+
+What's next?
+============
+
+In this tutorial, we configured the default user's password and ran
+``cloud-init`` inside our QEMU virtual machine.
+
+The full list of modules available can be found in
+:ref:`our modules documentation<modules>`.
+The documentation for each module contains examples of how to use it.
+
+You can also head over to the :ref:`examples page<yaml_examples>` for
+examples of more common use cases.
+
+.. _QEMU: https://www.qemu.org
+.. _install instructions: https://www.qemu.org/download/#linux
+.. _LTS: https://wiki.ubuntu.com/Releases
diff --git a/integration-requirements.txt b/integration-requirements.txt
index 1056f0e2..03d4fc25 100644
--- a/integration-requirements.txt
+++ b/integration-requirements.txt
@@ -1,5 +1,5 @@
# PyPI requirements for cloud-init integration testing
# https://cloudinit.readthedocs.io/en/latest/topics/integration_tests.html
#
-pycloudlib @ git+https://github.com/canonical/pycloudlib.git@d76228e24d400937ba99cdb516460dd757dd3348
+pycloudlib==1!1
pytest
diff --git a/packages/README.md b/packages/README.md
index 61681a2b..ee75fa5a 100644
--- a/packages/README.md
+++ b/packages/README.md
@@ -2,9 +2,24 @@
Package builders under this folder are development only templates. Do not rely on them.
-Downstream packaging resources:
+## Build/Install
+Cloud-init's build/install procedure is not OS/Distro independent as cloud-init
+is tightly couple to OS implementation details, as for example,
+the init units' definitions, see [systemd/](systemd/) and [sysvinit/](sysvinit/).
+
+For users interested in trying out cloud-init, a pre-built image is the easiest option.
+
+For users interested in packaging cloud-init, see the reference implementations under this folder
+and official packages in the following section.
+
+## Downstream packaging resources
+
+* [arch](https://archlinux.org/packages/community/any/cloud-init/)
+* [alpine](https://pkgs.alpinelinux.org/packages?name=cloud-init)
* [debian](https://packages.debian.org/sid/cloud-init)
* [fedora](https://src.fedoraproject.org/rpms/cloud-init)
+* [freebsd](https://www.freshports.org/net/cloud-init/) [devel package](https://www.freshports.org/net/cloud-init-devel)
+* [gentoo](https://packages.gentoo.org/packages/app-emulation/cloud-init)
* [opensuse](https://build.opensuse.org/package/show/Cloud:Tools/cloud-init)
* [ubuntu](https://launchpad.net/cloud-init)
diff --git a/packages/bddeb b/packages/bddeb
index fdb541d4..44d82a78 100755
--- a/packages/bddeb
+++ b/packages/bddeb
@@ -100,8 +100,10 @@ def write_debian_folder(root, templ_data, cloud_util_deps):
requires.extend(['python3'] + reqs + test_reqs)
if templ_data['debian_release'] == 'xenial':
requires.append('python3-pytest-catchlog')
- elif templ_data['debian_release'] == 'impish':
- requires.remove('dh-systemd')
+ elif templ_data['debian_release'] in (
+ 'buster', 'xenial', 'bionic', 'focal'
+ ):
+ requires.append('dh-systemd')
templater.render_to_file(util.abs_join(find_root(),
'packages', 'debian', 'control.in'),
util.abs_join(deb_dir, 'control'),
diff --git a/packages/pkg-deps.json b/packages/pkg-deps.json
index 8ba27e85..4ee0982a 100644
--- a/packages/pkg-deps.json
+++ b/packages/pkg-deps.json
@@ -3,7 +3,6 @@
"build-requires" : [
"debhelper",
"dh-python",
- "dh-systemd",
"python3-debconf"
],
"renames" : {
@@ -59,12 +58,14 @@
},
"suse" : {
"renames" : {
+ "jinja2" : "python3-Jinja2",
+ "pyyaml" : "python3-PyYAML"
},
"build-requires" : [
"fdupes",
"filesystem",
- "python-devel",
- "python-setuptools"
+ "python3-devel",
+ "python3-setuptools"
],
"requires" : [
"iproute2",
diff --git a/pyproject.toml b/pyproject.toml
index d566b4a2..88b350b0 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -21,10 +21,10 @@ module = [
"configobj",
"debconf",
"httplib",
- "httpretty",
"jsonpatch",
"netifaces",
"paramiko.*",
+ "pip.*",
"pycloudlib.*",
"responses",
"serial",
diff --git a/setup.py b/setup.py
index 470dd774..04aae5b2 100644
--- a/setup.py
+++ b/setup.py
@@ -73,7 +73,13 @@ def in_virtualenv():
def get_version():
cmd = [sys.executable, "tools/read-version"]
ver = subprocess.check_output(cmd)
- return ver.decode("utf-8").strip()
+ version = ver.decode("utf-8").strip()
+ # read-version can spit out something like 22.4-15-g7f97aee24
+ # which is invalid under PEP440. If we replace the first - with a +
+ # that should give us a valid version.
+ if "-" in version:
+ version = version.replace("-", "+", 1)
+ return version
def read_requires():
diff --git a/systemd/cloud-init-generator.tmpl b/systemd/cloud-init-generator.tmpl
index f8121e99..d71e3b89 100644
--- a/systemd/cloud-init-generator.tmpl
+++ b/systemd/cloud-init-generator.tmpl
@@ -21,7 +21,7 @@ CLOUD_SYSTEM_TARGET="/usr/lib/systemd/system/cloud-init.target"
CLOUD_SYSTEM_TARGET="/lib/systemd/system/cloud-init.target"
{% endif %}
{% if variant in ["almalinux", "centos", "cloudlinux", "eurolinux", "fedora",
- "miraclelinux", "openEuler", "openmandriva", "rhel", "rocky", "virtuozzo"] %}
+ "miraclelinux", "openEuler", "OpenCloudOS", "openmandriva", "rhel", "rocky", "TencentOS", "virtuozzo"] %}
dsidentify="/usr/libexec/cloud-init/ds-identify"
{% else %}
dsidentify="/usr/lib/cloud-init/ds-identify"
diff --git a/systemd/cloud-init.service.tmpl b/systemd/cloud-init.service.tmpl
index a9e180ee..1b1f9a86 100644
--- a/systemd/cloud-init.service.tmpl
+++ b/systemd/cloud-init.service.tmpl
@@ -13,9 +13,12 @@ After=systemd-networkd-wait-online.service
After=networking.service
{% endif %}
{% if variant in ["almalinux", "centos", "cloudlinux", "eurolinux", "fedora",
- "miraclelinux", "openEuler", "openmandriva", "rhel", "rocky", "virtuozzo"] %}
+ "miraclelinux", "openEuler", "OpenCloudOS", "openmandriva", "rhel", "rocky",
+ "suse", "TencentOS", "virtuozzo"] %}
+
After=network.service
After=NetworkManager.service
+After=NetworkManager-wait-online.service
{% endif %}
{% if variant in ["suse"] %}
After=wicked.service
diff --git a/sysvinit/freebsd/cloudconfig b/sysvinit/freebsd/cloudconfig
index fb604f4d..13c47280 100755
--- a/sysvinit/freebsd/cloudconfig
+++ b/sysvinit/freebsd/cloudconfig
@@ -21,8 +21,8 @@ cloudconfig_start()
${command} modules --mode config
}
-load_rc_config $name
+load_rc_config 'cloudinit'
-: ${cloudconfig_enable="NO"}
+: ${cloudinit_enable="NO"}
run_rc_command "$1"
diff --git a/sysvinit/freebsd/cloudfinal b/sysvinit/freebsd/cloudfinal
index 72047653..76a584ec 100755
--- a/sysvinit/freebsd/cloudfinal
+++ b/sysvinit/freebsd/cloudfinal
@@ -21,8 +21,8 @@ cloudfinal_start()
${command} modules --mode final
}
-load_rc_config $name
+load_rc_config 'cloudinit'
-: ${cloudfinal_enable="NO"}
+: ${cloudinit_enable="NO"}
run_rc_command "$1"
diff --git a/sysvinit/freebsd/cloudinit b/sysvinit/freebsd/cloudinit
index d26f3d0f..679adf5d 100755
--- a/sysvinit/freebsd/cloudinit
+++ b/sysvinit/freebsd/cloudinit
@@ -21,7 +21,7 @@ cloudinit_start()
${command} init
}
-load_rc_config $name
+load_rc_config 'cloudinit'
: ${cloudinit_enable="NO"}
diff --git a/sysvinit/freebsd/cloudinitlocal b/sysvinit/freebsd/cloudinitlocal
index cb67b4a2..d6c3579e 100755
--- a/sysvinit/freebsd/cloudinitlocal
+++ b/sysvinit/freebsd/cloudinitlocal
@@ -21,8 +21,8 @@ cloudlocal_start()
${command} init --local
}
-load_rc_config $name
+load_rc_config 'cloudinit'
-: ${cloudinitlocal_enable="NO"}
+: ${cloudinit_enable="NO"}
run_rc_command "$1"
diff --git a/templates/chrony.conf.opensuse-leap.tmpl b/templates/chrony.conf.opensuse-leap.tmpl
new file mode 100644
index 00000000..a3d3e0ec
--- /dev/null
+++ b/templates/chrony.conf.opensuse-leap.tmpl
@@ -0,0 +1,38 @@
+## template:jinja
+# Use public servers from the pool.ntp.org project.
+# Please consider joining the pool (http://www.pool.ntp.org/join.html).
+{% if pools %}# pools
+{% endif %}
+{% for pool in pools -%}
+pool {{pool}} iburst
+{% endfor %}
+{%- if servers %}# servers
+{% endif %}
+{% for server in servers -%}
+server {{server}} iburst
+{% endfor %}
+
+# Record the rate at which the system clock gains/losses time.
+driftfile /var/lib/chrony/drift
+
+# In first three updates step the system clock instead of slew
+# if the adjustment is larger than 1 second.
+makestep 1.0 3
+
+# Enable kernel synchronization of the real-time clock (RTC).
+rtcsync
+
+# Allow NTP client access from local network.
+#allow 192.168/16
+
+# Serve time even if not synchronized to any NTP server.
+#local stratum 10
+
+# Specify file containing keys for NTP authentication.
+#keyfile /etc/chrony.keys
+
+# Specify directory for log files.
+logdir /var/log/chrony
+
+# Select which information is logged.
+#log measurements statistics tracking
diff --git a/templates/chrony.conf.opensuse-microos.tmpl b/templates/chrony.conf.opensuse-microos.tmpl
new file mode 100644
index 00000000..a3d3e0ec
--- /dev/null
+++ b/templates/chrony.conf.opensuse-microos.tmpl
@@ -0,0 +1,38 @@
+## template:jinja
+# Use public servers from the pool.ntp.org project.
+# Please consider joining the pool (http://www.pool.ntp.org/join.html).
+{% if pools %}# pools
+{% endif %}
+{% for pool in pools -%}
+pool {{pool}} iburst
+{% endfor %}
+{%- if servers %}# servers
+{% endif %}
+{% for server in servers -%}
+server {{server}} iburst
+{% endfor %}
+
+# Record the rate at which the system clock gains/losses time.
+driftfile /var/lib/chrony/drift
+
+# In first three updates step the system clock instead of slew
+# if the adjustment is larger than 1 second.
+makestep 1.0 3
+
+# Enable kernel synchronization of the real-time clock (RTC).
+rtcsync
+
+# Allow NTP client access from local network.
+#allow 192.168/16
+
+# Serve time even if not synchronized to any NTP server.
+#local stratum 10
+
+# Specify file containing keys for NTP authentication.
+#keyfile /etc/chrony.keys
+
+# Specify directory for log files.
+logdir /var/log/chrony
+
+# Select which information is logged.
+#log measurements statistics tracking
diff --git a/templates/chrony.conf.opensuse-tumbleweed.tmpl b/templates/chrony.conf.opensuse-tumbleweed.tmpl
new file mode 100644
index 00000000..a3d3e0ec
--- /dev/null
+++ b/templates/chrony.conf.opensuse-tumbleweed.tmpl
@@ -0,0 +1,38 @@
+## template:jinja
+# Use public servers from the pool.ntp.org project.
+# Please consider joining the pool (http://www.pool.ntp.org/join.html).
+{% if pools %}# pools
+{% endif %}
+{% for pool in pools -%}
+pool {{pool}} iburst
+{% endfor %}
+{%- if servers %}# servers
+{% endif %}
+{% for server in servers -%}
+server {{server}} iburst
+{% endfor %}
+
+# Record the rate at which the system clock gains/losses time.
+driftfile /var/lib/chrony/drift
+
+# In first three updates step the system clock instead of slew
+# if the adjustment is larger than 1 second.
+makestep 1.0 3
+
+# Enable kernel synchronization of the real-time clock (RTC).
+rtcsync
+
+# Allow NTP client access from local network.
+#allow 192.168/16
+
+# Serve time even if not synchronized to any NTP server.
+#local stratum 10
+
+# Specify file containing keys for NTP authentication.
+#keyfile /etc/chrony.keys
+
+# Specify directory for log files.
+logdir /var/log/chrony
+
+# Select which information is logged.
+#log measurements statistics tracking
diff --git a/templates/chrony.conf.sle-micro.tmpl b/templates/chrony.conf.sle-micro.tmpl
new file mode 100644
index 00000000..a3d3e0ec
--- /dev/null
+++ b/templates/chrony.conf.sle-micro.tmpl
@@ -0,0 +1,38 @@
+## template:jinja
+# Use public servers from the pool.ntp.org project.
+# Please consider joining the pool (http://www.pool.ntp.org/join.html).
+{% if pools %}# pools
+{% endif %}
+{% for pool in pools -%}
+pool {{pool}} iburst
+{% endfor %}
+{%- if servers %}# servers
+{% endif %}
+{% for server in servers -%}
+server {{server}} iburst
+{% endfor %}
+
+# Record the rate at which the system clock gains/losses time.
+driftfile /var/lib/chrony/drift
+
+# In first three updates step the system clock instead of slew
+# if the adjustment is larger than 1 second.
+makestep 1.0 3
+
+# Enable kernel synchronization of the real-time clock (RTC).
+rtcsync
+
+# Allow NTP client access from local network.
+#allow 192.168/16
+
+# Serve time even if not synchronized to any NTP server.
+#local stratum 10
+
+# Specify file containing keys for NTP authentication.
+#keyfile /etc/chrony.keys
+
+# Specify directory for log files.
+logdir /var/log/chrony
+
+# Select which information is logged.
+#log measurements statistics tracking
diff --git a/templates/chrony.conf.sle_hpc.tmpl b/templates/chrony.conf.sle_hpc.tmpl
new file mode 100644
index 00000000..a3d3e0ec
--- /dev/null
+++ b/templates/chrony.conf.sle_hpc.tmpl
@@ -0,0 +1,38 @@
+## template:jinja
+# Use public servers from the pool.ntp.org project.
+# Please consider joining the pool (http://www.pool.ntp.org/join.html).
+{% if pools %}# pools
+{% endif %}
+{% for pool in pools -%}
+pool {{pool}} iburst
+{% endfor %}
+{%- if servers %}# servers
+{% endif %}
+{% for server in servers -%}
+server {{server}} iburst
+{% endfor %}
+
+# Record the rate at which the system clock gains/losses time.
+driftfile /var/lib/chrony/drift
+
+# In first three updates step the system clock instead of slew
+# if the adjustment is larger than 1 second.
+makestep 1.0 3
+
+# Enable kernel synchronization of the real-time clock (RTC).
+rtcsync
+
+# Allow NTP client access from local network.
+#allow 192.168/16
+
+# Serve time even if not synchronized to any NTP server.
+#local stratum 10
+
+# Specify file containing keys for NTP authentication.
+#keyfile /etc/chrony.keys
+
+# Specify directory for log files.
+logdir /var/log/chrony
+
+# Select which information is logged.
+#log measurements statistics tracking
diff --git a/tests/data/netinfo/freebsd-duplicate-macs-ifconfig-output b/tests/data/netinfo/freebsd-duplicate-macs-ifconfig-output
new file mode 100644
index 00000000..769fe6df
--- /dev/null
+++ b/tests/data/netinfo/freebsd-duplicate-macs-ifconfig-output
@@ -0,0 +1,13 @@
+hn0: flags=8843<UP,BROADCAST,RUNNING,SIMPLEX,MULTICAST> metric 0 mtu 1500
+ options=8051b<RXCSUM,TXCSUM,VLAN_MTU,VLAN_HWTAGGING,TSO4,LRO,LINKSTATE>
+ ether 00:0d:3a:54:ad:1e
+ inet 10.0.0.35 netmask 0xffffff00 broadcast 10.0.0.255
+ media: Ethernet 100GBase-CR4 <full-duplex,rxpause,txpause>
+ status: active
+ nd6 options=29<PERFORMNUD,IFDISABLED,AUTO_LINKLOCAL>
+mce0: flags=8a43<UP,BROADCAST,RUNNING,ALLMULTI,SIMPLEX,MULTICAST> metric 0 mtu 1500
+ options=8805bb<RXCSUM,TXCSUM,VLAN_MTU,VLAN_HWTAGGING,JUMBO_MTU,VLAN_HWCSUM,TSO4,LRO,LINKSTATE>
+ ether 00:0d:3a:54:ad:1e
+ media: Ethernet 100GBase-CR4 <full-duplex,rxpause,txpause>
+ status: active
+ nd6 options=29<PERFORMNUD,IFDISABLED,AUTO_LINKLOCAL> \ No newline at end of file
diff --git a/tests/data/netinfo/freebsd-ifconfig-output b/tests/data/netinfo/freebsd-ifconfig-output
index 3ca0d2b2..05a69b88 100644
--- a/tests/data/netinfo/freebsd-ifconfig-output
+++ b/tests/data/netinfo/freebsd-ifconfig-output
@@ -38,4 +38,4 @@ lo0: flags=8049<UP,LOOPBACK,RUNNING,MULTICAST> metric 0 mtu 16384
inet6 fe80::1%lo0 prefixlen 64 scopeid 0x2
inet 127.0.0.1 netmask 0xff000000
groups: lo
- nd6 options=21<PERFORMNUD,AUTO_LINKLOCAL>
+ nd6 options=21<PERFORMNUD,AUTO_LINKLOCAL> \ No newline at end of file
diff --git a/tests/data/vmware/cust-dhcp-2nic-instance-id.cfg b/tests/data/vmware/cust-dhcp-2nic-instance-id.cfg
new file mode 100644
index 00000000..70a9d313
--- /dev/null
+++ b/tests/data/vmware/cust-dhcp-2nic-instance-id.cfg
@@ -0,0 +1,37 @@
+[NETWORK]
+NETWORKING = yes
+BOOTPROTO = dhcp
+HOSTNAME = myhost1
+DOMAINNAME = eng.vmware.com
+
+[NIC-CONFIG]
+NICS = NIC1,NIC2
+
+[NIC1]
+MACADDR = 00:50:56:a6:8c:08
+ONBOOT = yes
+IPv4_MODE = BACKWARDS_COMPATIBLE
+BOOTPROTO = dhcp
+
+[NIC2]
+MACADDR = 00:50:56:a6:5a:de
+ONBOOT = yes
+IPv4_MODE = BACKWARDS_COMPATIBLE
+BOOTPROTO = dhcp
+
+# some random comment
+
+[PASSWORD]
+# secret
+-PASS = c2VjcmV0Cg==
+
+[DNS]
+DNSFROMDHCP=yes
+SUFFIX|1 = eng.vmware.com
+
+[DATETIME]
+TIMEZONE = Africa/Abidjan
+UTC = yes
+
+[CLOUDINIT]
+INSTANCE-ID = guest-os-customization-uuid
diff --git a/tests/integration_tests/clouds.py b/tests/integration_tests/clouds.py
index c4dd4eec..308ffedd 100644
--- a/tests/integration_tests/clouds.py
+++ b/tests/integration_tests/clouds.py
@@ -13,6 +13,7 @@ from uuid import UUID
from pycloudlib import (
EC2,
GCE,
+ IBM,
OCI,
Azure,
LXDContainer,
@@ -408,3 +409,14 @@ class OpenstackCloud(IntegrationCloud):
"OS image id: {}".format(image.image_id)
) from e
return image.image_id
+
+
+class IbmCloud(IntegrationCloud):
+ datasource = "ibm"
+ cloud_instance: IBM
+
+ def _get_cloud_instance(self) -> IBM:
+ # Note: IBM image names starting with `ibm` are reserved.
+ return IBM(
+ tag="integration-test-ibm",
+ )
diff --git a/tests/integration_tests/cmd/test_schema.py b/tests/integration_tests/cmd/test_schema.py
index 0d92f146..0930309b 100644
--- a/tests/integration_tests/cmd/test_schema.py
+++ b/tests/integration_tests/cmd/test_schema.py
@@ -20,9 +20,9 @@ class TestSchemaDeprecations:
log = class_client.read_from_file("/var/log/cloud-init.log")
verify_clean_log(log, ignore_deprecations=True)
assert "WARNING]: Deprecated cloud-config provided:" in log
- assert "apt_reboot_if_required: DEPRECATED" in log
- assert "apt_update: DEPRECATED" in log
- assert "apt_upgrade: DEPRECATED" in log
+ assert "apt_reboot_if_required: Default: ``false``. Deprecated " in log
+ assert "apt_update: Default: ``false``. Deprecated in version" in log
+ assert "apt_upgrade: Default: ``false``. Deprecated in version" in log
def test_schema_deprecations(self, class_client: IntegrationInstance):
"""Test schema behavior with deprecated configs."""
@@ -37,9 +37,18 @@ class TestSchemaDeprecations:
), "`schema` cmd must return 0 even with deprecated configs"
assert not result.stderr
assert "Cloud config schema deprecations:" in result.stdout
- assert "apt_update: DEPRECATED" in result.stdout
- assert "apt_upgrade: DEPRECATED" in result.stdout
- assert "apt_reboot_if_required: DEPRECATED" in result.stdout
+ assert (
+ "apt_update: Default: ``false``. Deprecated in version"
+ in result.stdout
+ )
+ assert (
+ "apt_upgrade: Default: ``false``. Deprecated in version"
+ in result.stdout
+ )
+ assert (
+ "apt_reboot_if_required: Default: ``false``. Deprecated in version"
+ in result.stdout
+ )
annotated_result = class_client.execute(
f"cloud-init schema --annotate --config-file {user_data_fn}"
@@ -56,9 +65,9 @@ class TestSchemaDeprecations:
apt_reboot_if_required: false\t\t# D3
# Deprecations: -------------
- # D1: DEPRECATED: Dropped after April 2027. Use ``package_update``. Default: ``false``
- # D2: DEPRECATED: Dropped after April 2027. Use ``package_upgrade``. Default: ``false``
- # D3: DEPRECATED: Dropped after April 2027. Use ``package_reboot_if_required``. Default: ``false``
+ # D1: Default: ``false``. Deprecated in version 22.2. Use ``package_update`` instead.
+ # D2: Default: ``false``. Deprecated in version 22.2. Use ``package_upgrade`` instead.
+ # D3: Default: ``false``. Deprecated in version 22.2. Use ``package_reboot_if_required`` instead.
Valid cloud-config: /root/user-data""" # noqa: E501
diff --git a/tests/integration_tests/conftest.py b/tests/integration_tests/conftest.py
index 6157bad8..782ca7e5 100644
--- a/tests/integration_tests/conftest.py
+++ b/tests/integration_tests/conftest.py
@@ -17,6 +17,7 @@ from tests.integration_tests.clouds import (
AzureCloud,
Ec2Cloud,
GceCloud,
+ IbmCloud,
ImageSpecification,
IntegrationCloud,
LxdContainerCloud,
@@ -39,6 +40,7 @@ platforms: Dict[str, Type[IntegrationCloud]] = {
"gce": GceCloud,
"azure": AzureCloud,
"oci": OciCloud,
+ "ibm": IbmCloud,
"lxd_container": LxdContainerCloud,
"lxd_vm": LxdVmCloud,
"openstack": OpenstackCloud,
diff --git a/tests/integration_tests/datasources/test_lxd_hotplug.py b/tests/integration_tests/datasources/test_lxd_hotplug.py
index 8c403e04..81cff252 100644
--- a/tests/integration_tests/datasources/test_lxd_hotplug.py
+++ b/tests/integration_tests/datasources/test_lxd_hotplug.py
@@ -4,10 +4,14 @@ import pytest
from cloudinit import safeyaml
from cloudinit.subp import subp
+from cloudinit.util import is_true
from tests.integration_tests.clouds import ImageSpecification
from tests.integration_tests.decorators import retry
from tests.integration_tests.instances import IntegrationInstance
-from tests.integration_tests.util import lxd_has_nocloud
+from tests.integration_tests.util import (
+ get_feature_flag_value,
+ lxd_has_nocloud,
+)
USER_DATA = """\
#cloud-config
@@ -116,6 +120,7 @@ class TestLxdHotplug:
top_key = "user"
else:
top_key = "cloud-init"
+
assert subp(
[
"lxc",
@@ -148,6 +153,17 @@ class TestLxdHotplug:
assert post_netplan == expected_netplan, client.read_from_file(
"/var/log/cloud-init.log"
)
+ file_perms = class_client.execute(
+ "stat -c %a /etc/netplan/50-cloud-init.yaml"
+ )
+ assert file_perms.ok, "Unable to check perms on 50-cloud-init.yaml"
+ feature_netplan_root_only = is_true(
+ get_feature_flag_value(
+ class_client, "NETPLAN_CONFIG_ROOT_READ_ONLY"
+ )
+ )
+ config_perms = "600" if feature_netplan_root_only else "644"
+ assert config_perms == file_perms.stdout.strip()
ip_info = json.loads(client.execute("ip --json address"))
eth2s = [i for i in ip_info if i["ifname"] == "eth2"]
assert len(eth2s) == 1
diff --git a/tests/integration_tests/integration_settings.py b/tests/integration_tests/integration_settings.py
index abc70fe4..c4f28fcb 100644
--- a/tests/integration_tests/integration_settings.py
+++ b/tests/integration_tests/integration_settings.py
@@ -21,6 +21,7 @@ RUN_UNSTABLE = False
# azure
# ec2
# gce
+# ibm
# oci
# openstack
PLATFORM = "lxd_container"
diff --git a/tests/integration_tests/modules/test_ansible.py b/tests/integration_tests/modules/test_ansible.py
index d781dabf..3d0f96cb 100644
--- a/tests/integration_tests/modules/test_ansible.py
+++ b/tests/integration_tests/modules/test_ansible.py
@@ -12,8 +12,8 @@ REPO_D = "/root/playbooks"
USER_DATA = """\
#cloud-config
version: v1
-packages_update: true
-packages_upgrade: true
+package_update: true
+package_upgrade: true
packages:
- git
- python3-pip
@@ -114,8 +114,8 @@ ANSIBLE_CONTROL = """\
# This example installs a playbook repository from a remote private repository
# and then runs two of the plays.
-packages_update: true
-packages_upgrade: true
+package_update: true
+package_upgrade: true
packages:
- git
- python3-pip
@@ -301,6 +301,9 @@ def test_ansible_pull_distro(client):
@pytest.mark.user_data(ANSIBLE_CONTROL)
@pytest.mark.lxd_vm
+# Not bionic because test uses pip install and version in pip is removing
+# support for python version in bionic
+@pytest.mark.not_bionic
def test_ansible_controller(client):
log = client.read_from_file("/var/log/cloud-init.log")
verify_clean_log(log)
diff --git a/tests/integration_tests/modules/test_ca_certs.py b/tests/integration_tests/modules/test_ca_certs.py
index 8d18fb76..2baedda9 100644
--- a/tests/integration_tests/modules/test_ca_certs.py
+++ b/tests/integration_tests/modules/test_ca_certs.py
@@ -76,10 +76,10 @@ class TestCaCerts:
unlinked_files.append(filename)
assert ["ca-certificates.crt"] == unlinked_files
- assert "cloud-init-ca-certs.pem" == links["a535c1f3.0"]
+ assert "cloud-init-ca-cert-1.pem" == links["a535c1f3.0"]
assert (
- "/usr/share/ca-certificates/cloud-init-ca-certs.crt"
- == links["cloud-init-ca-certs.pem"]
+ "/usr/local/share/ca-certificates/cloud-init-ca-cert-1.crt"
+ == links["cloud-init-ca-cert-1.pem"]
)
def test_cert_installed(self, class_client: IntegrationInstance):
diff --git a/tests/integration_tests/modules/test_cli.py b/tests/integration_tests/modules/test_cli.py
index 4b8f53a8..30f56ad7 100644
--- a/tests/integration_tests/modules/test_cli.py
+++ b/tests/integration_tests/modules/test_cli.py
@@ -41,7 +41,7 @@ def test_valid_userdata(client: IntegrationInstance):
"""
result = client.execute("cloud-init schema --system")
assert result.ok
- assert "Valid cloud-config: system userdata" == result.stdout.strip()
+ assert "Valid cloud-config: user-data" in result.stdout.strip()
result = client.execute("cloud-init status --long")
if not result.ok:
raise AssertionError(
diff --git a/tests/integration_tests/modules/test_combined.py b/tests/integration_tests/modules/test_combined.py
index 32537729..647e8728 100644
--- a/tests/integration_tests/modules/test_combined.py
+++ b/tests/integration_tests/modules/test_combined.py
@@ -15,10 +15,12 @@ from pathlib import Path
import pytest
import cloudinit.config
+from cloudinit.util import is_true
from tests.integration_tests.clouds import ImageSpecification
from tests.integration_tests.decorators import retry
from tests.integration_tests.instances import IntegrationInstance
from tests.integration_tests.util import (
+ get_feature_flag_value,
get_inactive_modules,
lxd_has_nocloud,
verify_clean_log,
@@ -77,6 +79,23 @@ timezone: US/Aleutian
@pytest.mark.ci
@pytest.mark.user_data(USER_DATA)
class TestCombined:
+ @pytest.mark.ubuntu # Because netplan
+ def test_netplan_permissions(self, class_client: IntegrationInstance):
+ """
+ Test that netplan config file is generated with proper permissions
+ """
+ file_perms = class_client.execute(
+ "stat -c %a /etc/netplan/50-cloud-init.yaml"
+ )
+ assert file_perms.ok, "Unable to check perms on 50-cloud-init.yaml"
+ feature_netplan_root_only = is_true(
+ get_feature_flag_value(
+ class_client, "NETPLAN_CONFIG_ROOT_READ_ONLY"
+ )
+ )
+ config_perms = "600" if feature_netplan_root_only else "644"
+ assert config_perms == file_perms.stdout.strip()
+
def test_final_message(self, class_client: IntegrationInstance):
"""Test that final_message module works as expected.
@@ -154,8 +173,8 @@ class TestCombined:
def test_snap(self, class_client: IntegrationInstance):
"""Integration test for the snap module.
- This test specifies a command to be executed by the ``snap`` module
- and then checks that if that command was executed during boot.
+ This test verify that the snap packages specified in the user-data
+ were installed by the ``snap`` module during boot.
"""
client = class_client
snap_output = client.execute("snap list")
@@ -405,6 +424,23 @@ class TestCombined:
assert v1_data["instance_id"] == client.instance.instance_id
assert v1_data["local_hostname"] == client.instance.name
+ @pytest.mark.lxd_container
+ @pytest.mark.azure
+ @pytest.mark.gce
+ @pytest.mark.ec2
+ def test_instance_cloud_id_across_reboot(
+ self, class_client: IntegrationInstance
+ ):
+ client = class_client
+ platform = client.settings.PLATFORM
+ cloud_id_alias = {"ec2": "aws", "lxd_container": "lxd"}
+ cloud_file = f"cloud-id-{cloud_id_alias.get(platform, platform)}"
+ assert client.execute(f"test -f /run/cloud-init/{cloud_file}").ok
+ assert client.execute("test -f /run/cloud-init/cloud-id").ok
+ client.restart()
+ assert client.execute(f"test -f /run/cloud-init/{cloud_file}").ok
+ assert client.execute("test -f /run/cloud-init/cloud-id").ok
+
@pytest.mark.user_data(USER_DATA)
class TestCombinedNoCI:
diff --git a/tests/integration_tests/modules/test_lxd.py b/tests/integration_tests/modules/test_lxd.py
index 55d82a54..f84cdff6 100644
--- a/tests/integration_tests/modules/test_lxd.py
+++ b/tests/integration_tests/modules/test_lxd.py
@@ -46,7 +46,6 @@ lxd:
ipv4.address: auto
ipv6.address: auto
description: ""
- managed: false
name: lxdbr0
type: ""
storage_pools:
@@ -220,6 +219,10 @@ def validate_preseed_projects(client: IntegrationInstance, preseed_cfg):
# https://discuss.linuxcontainers.org/t/lxd-5-5-has-been-released/14899
if "features.storage.buckets" in project["config"]:
assert "true" == project["config"].pop("features.storage.buckets")
+ # `features.networks.zones` was introduced in lxd 5.9. More info:
+ # https://discuss.linuxcontainers.org/t/lxd-5-9-has-been-released/
+ if "features.networks.zones" in project["config"]:
+ assert "true" == project["config"].pop("features.networks.zones")
assert project == src_project
diff --git a/tests/integration_tests/modules/test_puppet.py b/tests/integration_tests/modules/test_puppet.py
index 1bd9cee4..b8613866 100644
--- a/tests/integration_tests/modules/test_puppet.py
+++ b/tests/integration_tests/modules/test_puppet.py
@@ -17,7 +17,12 @@ def test_puppet_service(client: IntegrationInstance):
"""Basic test that puppet gets installed and runs."""
log = client.read_from_file("/var/log/cloud-init.log")
verify_clean_log(log)
- assert client.execute("systemctl is-active puppet").ok
+ puppet_ok = client.execute("systemctl is-active puppet.service").ok
+ puppet_agent_ok = client.execute(
+ "systemctl is-active puppet-agent.service"
+ ).ok
+ assert True in [puppet_ok, puppet_agent_ok]
+ assert False in [puppet_ok, puppet_agent_ok]
assert "Running command ['puppet', 'agent'" not in log
diff --git a/tests/integration_tests/modules/test_set_password.py b/tests/integration_tests/modules/test_set_password.py
index 4e0ee122..765dd30c 100644
--- a/tests/integration_tests/modules/test_set_password.py
+++ b/tests/integration_tests/modules/test_set_password.py
@@ -191,6 +191,15 @@ class Mixin:
# We look for the exact line match, to avoid a commented line matching
assert "PasswordAuthentication yes" in sshd_config.splitlines()
+ @pytest.mark.ubuntu
+ def test_check_ssh_service(self, class_client):
+ """Ensure we check the sshd status because we modified the config"""
+ log = class_client.read_from_file("/var/log/cloud-init.log")
+ assert (
+ "'systemctl', 'show', '--property', 'ActiveState', "
+ "'--value', 'ssh'" in log
+ )
+
def test_sshd_config(self, class_client):
"""Test that SSH password auth is enabled."""
sshd_config = class_client.execute("sshd -T").stdout
diff --git a/tests/integration_tests/modules/test_ssh_keys_provided.py b/tests/integration_tests/modules/test_ssh_keys_provided.py
index 8e73267a..b6069376 100644
--- a/tests/integration_tests/modules/test_ssh_keys_provided.py
+++ b/tests/integration_tests/modules/test_ssh_keys_provided.py
@@ -70,6 +70,7 @@ ssh_keys:
1M6G15dqjQ2XkNVOEnb5AAAAD3Jvb3RAeGVuaWFsLWx4ZAECAwQFBg==
-----END OPENSSH PRIVATE KEY-----
ed25519_public: ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAINudAZSu4vjZpVWzId5pXmZg1M6G15dqjQ2XkNVOEnb5 root@xenial-lxd
+ ed25519_certificate: ssh-ed25519-cert-v01@openssh.com AAAAIHNzaC1lZDI1NTE5LWNlcnQtdjAxQG9wZW5zc2guY29tAAAAIAGbMtat76PmaoqQ7B2lDvhnzE47psvMvmnPhz6f423ZAAAAINudAZSu4vjZpVWzId5pXmZg1M6G15dqjQ2XkNVOEnb5AAAAAAAAAAAAAAACAAAAA2x4ZAAAAAAAAAAAY+0LHAAAAABlzO1rAAAAAAAAAAAAAAAAAAABFwAAAAdzc2gtcnNhAAAAAwEAAQAAAQEAtPx6PqN3iSEsnTtibyIEy52Tra8T5fn0ryXyg46Di2NBwdnjo8trNv9jenfV/UhmePl58lXjT43wV8OCMl6KsYXyBdegM35NNtono4I4mLLKFMR99TOtDn6iYcaNenVhF3ZCj9Z2nNOlTrdc0uchHqKMrxLjCRCUrL91Uf+xioTF901YRM+ZqC5lT92yAL76F4qPF+Lq1QtUfNfUIwwvOp5ccDZLPxij0YvyBzubYye9hJHuyjbJv78R4JHV+L2WhzSoX3W/6WrxVzeXqFGqH894ccOaC/7tnqSP6V8lIQ6fE2+cDurJcpM3CJRgkndGHjtU55Y71YkcdLksSMvezQAAARQAAAAMcnNhLXNoYTItNTEyAAABAC8VDdaBkdt9jRW2Wh7A54rtbWyoafEtA8rud9UHgq3fSLFvWMBBe19/MJZXs+xWkdvSuG49ZeaEWi7ZO3SQaUbmXp2L5CH6TNnok3yo5QL2h01gP6+ydn98cA8lktvZt/+ihSqXpeSAg6S755W0zqlaeT5iyopSmNt4/wLh8FvgXR+TrAEe2EEXcPcLEXrBrPkjoLZ8j/pzLFJHHmlme/JcHPGMB7ksGG9nKr6ZViB3VPshdxP4iqpORv4Ro+UBUaS1AoHe0mZsccr7gKg7Xe6lhqHT2Fwlkk9B1zsWWUTjWU4TeG9FrJCjSAGCHLdHUszhCOsQHOOf9aR2095mbI8= root@xenial-lxd
ecdsa_private: |
-----BEGIN EC PRIVATE KEY-----
MHcCAQEEIDuK+QFc1wmyJY8uDqQVa1qHte30Rk/fdLxGIBkwJAyOoAoGCCqGSM49
@@ -137,13 +138,15 @@ class TestSshKeysProvided:
out = class_client.read_from_file(config_path).strip()
assert expected_out in out
- @pytest.mark.parametrize(
- "expected_out", ("HostCertificate /etc/ssh/ssh_host_rsa_key-cert.pub")
- )
- def test_sshd_config(self, expected_out, class_client):
+ def test_sshd_config(self, class_client):
+ expected_certs = (
+ "HostCertificate /etc/ssh/ssh_host_rsa_key-cert.pub",
+ "HostCertificate /etc/ssh/ssh_host_ed25519_key-cert.pub",
+ )
if ImageSpecification.from_os_image().release in {"bionic"}:
sshd_config_path = "/etc/ssh/sshd_config"
else:
sshd_config_path = "/etc/ssh/sshd_config.d/50-cloud-init.conf"
sshd_config = class_client.read_from_file(sshd_config_path).strip()
- assert expected_out in sshd_config
+ for expected_cert in expected_certs:
+ assert expected_cert in sshd_config
diff --git a/tests/integration_tests/modules/test_write_files.py b/tests/integration_tests/modules/test_write_files.py
index a713b9c5..e6bb8625 100644
--- a/tests/integration_tests/modules/test_write_files.py
+++ b/tests/integration_tests/modules/test_write_files.py
@@ -51,6 +51,13 @@ write_files:
owner: 'myuser'
permissions: '0644'
append: true
+- path: '/home/testuser/subdir1/subdir2/my-file'
+ content: |
+ echo 'hello world!'
+ defer: true
+ owner: 'myuser'
+ permissions: '0644'
+ append: true
""".format(
B64_CONTENT.decode("ascii")
)
@@ -97,3 +104,25 @@ class TestWriteFiles:
class_client.restart()
out = class_client.read_from_file("/home/testuser/my-file")
assert "echo 'hello world!'" == out
+
+ def test_write_files_deferred_with_newly_created_dir(self, class_client):
+ """Test that newly created directory works as expected.
+
+ Users get created after write_files module runs, so ensure that
+ with `defer: true`, the file and directories gets written with correct
+ ownership.
+ """
+ out = class_client.read_from_file(
+ "/home/testuser/subdir1/subdir2/my-file"
+ )
+ assert "echo 'hello world!'" == out
+ assert (
+ class_client.execute(
+ 'stat -c "%U %a" /home/testuser/subdir1/subdir2'
+ )
+ == "myuser 755"
+ )
+ assert (
+ class_client.execute('stat -c "%U %a" /home/testuser/subdir1')
+ == "myuser 755"
+ )
diff --git a/tests/integration_tests/util.py b/tests/integration_tests/util.py
index 69214e9f..1c2a9284 100644
--- a/tests/integration_tests/util.py
+++ b/tests/integration_tests/util.py
@@ -172,3 +172,13 @@ def lxd_has_nocloud(client: IntegrationInstance) -> bool:
["lxc", "config", "metadata", "show", client.instance.name]
)
return "/var/lib/cloud/seed/nocloud" in lxd_image_metadata.stdout
+
+
+def get_feature_flag_value(client: IntegrationInstance, key):
+ value = client.execute(
+ 'python3 -c "from cloudinit import features; '
+ f'print(features.{key})"'
+ ).strip()
+ if "NameError" in value:
+ raise NameError(f"name '{key}' is not defined")
+ return value
diff --git a/tests/unittests/cmd/test_clean.py b/tests/unittests/cmd/test_clean.py
index 232cc731..c5385b79 100644
--- a/tests/unittests/cmd/test_clean.py
+++ b/tests/unittests/cmd/test_clean.py
@@ -238,12 +238,22 @@ class TestClean:
assert 0 == retcode
assert [(["shutdown", "-r", "now"], False)] == called_cmds
- @pytest.mark.parametrize("machine_id", (True, False))
+ @pytest.mark.parametrize(
+ "machine_id,systemd_val",
+ (
+ pytest.param(True, True, id="machine_id_on_systemd_uninitialized"),
+ pytest.param(
+ True, False, id="machine_id_non_systemd_removes_file"
+ ),
+ pytest.param(False, False, id="no_machine_id_param_file_remains"),
+ ),
+ )
+ @mock.patch("cloudinit.cmd.clean.uses_systemd")
def test_handle_clean_args_removed_machine_id(
- self, machine_id, clean_paths, init_class
+ self, uses_systemd, machine_id, systemd_val, clean_paths, init_class
):
"""handle_clean_args removes /etc/machine-id when arg is True."""
-
+ uses_systemd.return_value = systemd_val
myargs = namedtuple(
"MyArgs", "remove_logs remove_seed reboot machine_id"
)
@@ -271,7 +281,13 @@ class TestClean:
args=cmdargs,
)
assert 0 == retcode
- assert machine_id_path.exists() is bool(not machine_id)
+ if systemd_val:
+ if machine_id:
+ assert "uninitialized\n" == machine_id_path.read()
+ else:
+ assert "SOME-AMAZN-MACHINE-ID" == machine_id_path.read()
+ else:
+ assert machine_id_path.exists() is bool(not machine_id)
def test_status_main(self, clean_paths, init_class):
"""clean.main can be run as a standalone script."""
diff --git a/tests/unittests/cmd/test_cloud_id.py b/tests/unittests/cmd/test_cloud_id.py
index 80600555..bf87269a 100644
--- a/tests/unittests/cmd/test_cloud_id.py
+++ b/tests/unittests/cmd/test_cloud_id.py
@@ -45,6 +45,16 @@ STATUS_DETAILS_RUNNING = status.StatusDetails(
)
+STATUS_DETAILS_RUNNING_DS_NONE = status.StatusDetails(
+ status.UXAppStatus.RUNNING,
+ status.UXAppBootStatusCode.UNKNOWN,
+ "",
+ [],
+ "",
+ None,
+)
+
+
@pytest.fixture(autouse=True)
def setup_mocks(mocker):
mocker.patch(
@@ -203,6 +213,7 @@ class TestCloudId:
(STATUS_DETAILS_DISABLED, 2),
(STATUS_DETAILS_NOT_RUN, 3),
(STATUS_DETAILS_RUNNING, 0),
+ (STATUS_DETAILS_RUNNING_DS_NONE, 0),
),
)
@mock.patch(M_PATH + "get_status_details")
diff --git a/tests/unittests/cmd/test_status.py b/tests/unittests/cmd/test_status.py
index 6ae3b398..52a02c35 100644
--- a/tests/unittests/cmd/test_status.py
+++ b/tests/unittests/cmd/test_status.py
@@ -38,6 +38,40 @@ def config(tmpdir):
class TestStatus:
maxDiff = None
+ @mock.patch(
+ M_PATH + "load_file",
+ return_value=(
+ '{"v1": {"datasource": null, "init": {"errors": [], "finished": '
+ 'null, "start": null}, "init-local": {"errors": [], "finished": '
+ 'null, "start": 1669231096.9621563}, "modules-config": '
+ '{"errors": [], "finished": null, "start": null},'
+ '"modules-final": {"errors": [], "finished": null, '
+ '"start": null}, "modules-init": {"errors": [], "finished": '
+ 'null, "start": null}, "stage": "init-local"} }'
+ ),
+ )
+ @mock.patch(M_PATH + "os.path.exists", return_value=True)
+ @mock.patch(
+ M_PATH + "get_bootstatus",
+ return_value=(
+ status.UXAppBootStatusCode.ENABLED_BY_GENERATOR,
+ "Cloud-init enabled by systemd cloud-init-generator",
+ ),
+ )
+ def test_get_status_details_ds_none(
+ self, m_get_boot_status, m_p_exists, m_load_json, tmpdir
+ ):
+ paths = mock.Mock()
+ paths.run_dir = str(tmpdir)
+ assert status.StatusDetails(
+ status.UXAppStatus.RUNNING,
+ status.UXAppBootStatusCode.ENABLED_BY_GENERATOR,
+ "Running in stage: init-local",
+ [],
+ "Wed, 23 Nov 2022 19:18:16 +0000",
+ None, # datasource
+ ) == status.get_status_details(paths)
+
@pytest.mark.parametrize(
[
"ensured_file",
diff --git a/tests/unittests/config/test_apt_source_v3.py b/tests/unittests/config/test_apt_source_v3.py
index 5bb87385..8d7ba5dc 100644
--- a/tests/unittests/config/test_apt_source_v3.py
+++ b/tests/unittests/config/test_apt_source_v3.py
@@ -14,6 +14,8 @@ import tempfile
from unittest import TestCase, mock
from unittest.mock import call
+import pytest
+
from cloudinit import gpg, subp, util
from cloudinit.config import cc_apt_configure
from tests.unittests import helpers as t_help
@@ -955,6 +957,7 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase):
self.assertEqual(mirrors["PRIMARY"], pmir)
self.assertEqual(mirrors["SECURITY"], smir)
+ @pytest.mark.allow_dns_lookup
def test_apt_v3_url_resolvable(self):
"""test_apt_v3_url_resolvable - Test resolving urls"""
diff --git a/tests/unittests/config/test_cc_ansible.py b/tests/unittests/config/test_cc_ansible.py
index a0d6bcab..bd8ec9bf 100644
--- a/tests/unittests/config/test_cc_ansible.py
+++ b/tests/unittests/config/test_cc_ansible.py
@@ -18,7 +18,7 @@ from tests.unittests.helpers import skipUnlessJsonSchema
from tests.unittests.util import get_cloud
try:
- import pip as _pip # type: ignore # noqa: F401
+ import pip as _pip # noqa: F401
HAS_PIP = True
except ImportError:
diff --git a/tests/unittests/config/test_cc_ca_certs.py b/tests/unittests/config/test_cc_ca_certs.py
index a0b402ac..19e5d422 100644
--- a/tests/unittests/config/test_cc_ca_certs.py
+++ b/tests/unittests/config/test_cc_ca_certs.py
@@ -74,15 +74,19 @@ class TestConfig(TestCase):
mock.patch.object(cc_ca_certs, "update_ca_certs")
)
self.mock_remove = self.mocks.enter_context(
- mock.patch.object(cc_ca_certs, "remove_default_ca_certs")
+ mock.patch.object(cc_ca_certs, "disable_default_ca_certs")
)
- def test_no_trusted_list(self):
+ @mock.patch(
+ "cloudinit.distros.networking.subp.subp",
+ return_value=("", None),
+ )
+ def test_no_trusted_list(self, _):
"""
Test that no certificates are written if the 'trusted' key is not
present.
"""
- config = {"ca-certs": {}}
+ config = {"ca_certs": {}}
for distro_name in cc_ca_certs.distros:
self._mock_init()
@@ -93,9 +97,13 @@ class TestConfig(TestCase):
self.assertEqual(self.mock_update.call_count, 1)
self.assertEqual(self.mock_remove.call_count, 0)
- def test_empty_trusted_list(self):
+ @mock.patch(
+ "cloudinit.distros.networking.subp.subp",
+ return_value=("", None),
+ )
+ def test_empty_trusted_list(self, _):
"""Test that no certificate are written if 'trusted' list is empty."""
- config = {"ca-certs": {"trusted": []}}
+ config = {"ca_certs": {"trusted": []}}
for distro_name in cc_ca_certs.distros:
self._mock_init()
@@ -106,9 +114,13 @@ class TestConfig(TestCase):
self.assertEqual(self.mock_update.call_count, 1)
self.assertEqual(self.mock_remove.call_count, 0)
- def test_single_trusted(self):
+ @mock.patch(
+ "cloudinit.distros.networking.subp.subp",
+ return_value=("", None),
+ )
+ def test_single_trusted(self, _):
"""Test that a single cert gets passed to add_ca_certs."""
- config = {"ca-certs": {"trusted": ["CERT1"]}}
+ config = {"ca_certs": {"trusted": ["CERT1"]}}
for distro_name in cc_ca_certs.distros:
self._mock_init()
@@ -120,9 +132,13 @@ class TestConfig(TestCase):
self.assertEqual(self.mock_update.call_count, 1)
self.assertEqual(self.mock_remove.call_count, 0)
- def test_multiple_trusted(self):
+ @mock.patch(
+ "cloudinit.distros.networking.subp.subp",
+ return_value=("", None),
+ )
+ def test_multiple_trusted(self, _):
"""Test that multiple certs get passed to add_ca_certs."""
- config = {"ca-certs": {"trusted": ["CERT1", "CERT2"]}}
+ config = {"ca_certs": {"trusted": ["CERT1", "CERT2"]}}
for distro_name in cc_ca_certs.distros:
self._mock_init()
@@ -134,7 +150,11 @@ class TestConfig(TestCase):
self.assertEqual(self.mock_update.call_count, 1)
self.assertEqual(self.mock_remove.call_count, 0)
- def test_remove_default_ca_certs(self):
+ @mock.patch(
+ "cloudinit.distros.networking.subp.subp",
+ return_value=("", None),
+ )
+ def test_remove_default_ca_certs(self, _):
"""Test remove_defaults works as expected."""
config = {"ca_certs": {"remove_defaults": True}}
@@ -147,7 +167,11 @@ class TestConfig(TestCase):
self.assertEqual(self.mock_update.call_count, 1)
self.assertEqual(self.mock_remove.call_count, 1)
- def test_no_remove_defaults_if_false(self):
+ @mock.patch(
+ "cloudinit.distros.networking.subp.subp",
+ return_value=("", None),
+ )
+ def test_no_remove_defaults_if_false(self, _):
"""Test remove_defaults is not called when config value is False."""
config = {"ca_certs": {"remove_defaults": False}}
@@ -160,8 +184,14 @@ class TestConfig(TestCase):
self.assertEqual(self.mock_update.call_count, 1)
self.assertEqual(self.mock_remove.call_count, 0)
- def test_correct_order_for_remove_then_add(self):
- """Test remove_defaults is not called when config value is False."""
+ @mock.patch(
+ "cloudinit.distros.networking.subp.subp",
+ return_value=("", None),
+ )
+ def test_correct_order_for_remove_then_add(self, _):
+ """
+ Test remove_defaults is called before add.
+ """
config = {"ca_certs": {"remove_defaults": True, "trusted": ["CERT1"]}}
for distro_name in cc_ca_certs.distros:
@@ -170,9 +200,9 @@ class TestConfig(TestCase):
conf = cc_ca_certs._distro_ca_certs_configs(distro_name)
cc_ca_certs.handle(self.name, config, cloud, self.log, self.args)
+ self.assertEqual(self.mock_remove.call_count, 1)
self.mock_add.assert_called_once_with(conf, ["CERT1"])
self.assertEqual(self.mock_update.call_count, 1)
- self.assertEqual(self.mock_remove.call_count, 1)
class TestAddCaCerts(TestCase):
@@ -200,51 +230,10 @@ class TestAddCaCerts(TestCase):
cc_ca_certs.add_ca_certs(conf, [])
self.assertEqual(mockobj.call_count, 0)
- def test_single_cert_trailing_cr(self):
- """Test adding a single certificate to the trusted CAs
- when existing ca-certificates has trailing newline"""
- cert = "CERT1\nLINE2\nLINE3"
-
- ca_certs_content = "line1\nline2\ncloud-init-ca-certs.crt\nline3\n"
- expected = "line1\nline2\nline3\ncloud-init-ca-certs.crt\n"
-
- self.m_stat.return_value.st_size = 1
-
- for distro_name in cc_ca_certs.distros:
- conf = cc_ca_certs._distro_ca_certs_configs(distro_name)
-
- with ExitStack() as mocks:
- mock_write = mocks.enter_context(
- mock.patch.object(util, "write_file")
- )
- mock_load = mocks.enter_context(
- mock.patch.object(
- util, "load_file", return_value=ca_certs_content
- )
- )
-
- cc_ca_certs.add_ca_certs(conf, [cert])
-
- mock_write.assert_has_calls(
- [mock.call(conf["ca_cert_full_path"], cert, mode=0o644)]
- )
- if conf["ca_cert_config"] is not None:
- mock_write.assert_has_calls(
- [
- mock.call(
- conf["ca_cert_config"], expected, omode="wb"
- )
- ]
- )
- mock_load.assert_called_once_with(conf["ca_cert_config"])
-
- def test_single_cert_no_trailing_cr(self):
- """Test adding a single certificate to the trusted CAs
- when existing ca-certificates has no trailing newline"""
+ def test_single_cert(self):
+ """Test adding a single certificate to the trusted CAs."""
cert = "CERT1\nLINE2\nLINE3"
- ca_certs_content = "line1\nline2\nline3"
-
self.m_stat.return_value.st_size = 1
for distro_name in cc_ca_certs.distros:
@@ -254,65 +243,24 @@ class TestAddCaCerts(TestCase):
mock_write = mocks.enter_context(
mock.patch.object(util, "write_file")
)
- mock_load = mocks.enter_context(
- mock.patch.object(
- util, "load_file", return_value=ca_certs_content
- )
- )
cc_ca_certs.add_ca_certs(conf, [cert])
mock_write.assert_has_calls(
- [mock.call(conf["ca_cert_full_path"], cert, mode=0o644)]
- )
- if conf["ca_cert_config"] is not None:
- mock_write.assert_has_calls(
- [
- mock.call(
- conf["ca_cert_config"],
- "%s\n%s\n"
- % (ca_certs_content, conf["ca_cert_filename"]),
- omode="wb",
- )
- ]
- )
-
- mock_load.assert_called_once_with(conf["ca_cert_config"])
-
- def test_single_cert_to_empty_existing_ca_file(self):
- """Test adding a single certificate to the trusted CAs
- when existing ca-certificates.conf is empty"""
- cert = "CERT1\nLINE2\nLINE3"
-
- expected = "cloud-init-ca-certs.crt\n"
-
- self.m_stat.return_value.st_size = 0
-
- for distro_name in cc_ca_certs.distros:
- conf = cc_ca_certs._distro_ca_certs_configs(distro_name)
- with mock.patch.object(
- util, "write_file", autospec=True
- ) as m_write:
-
- cc_ca_certs.add_ca_certs(conf, [cert])
-
- m_write.assert_has_calls(
- [mock.call(conf["ca_cert_full_path"], cert, mode=0o644)]
+ [
+ mock.call(
+ conf["ca_cert_full_path"].format(cert_index=1),
+ cert,
+ mode=0o644,
+ )
+ ]
)
- if conf["ca_cert_config"] is not None:
- m_write.assert_has_calls(
- [
- mock.call(
- conf["ca_cert_config"], expected, omode="wb"
- )
- ]
- )
def test_multiple_certs(self):
"""Test adding multiple certificates to the trusted CAs."""
certs = ["CERT1\nLINE2\nLINE3", "CERT2\nLINE2\nLINE3"]
- expected_cert_file = "\n".join(certs)
- ca_certs_content = "line1\nline2\nline3"
+ expected_cert_1_file = certs[0]
+ expected_cert_2_file = certs[1]
self.m_stat.return_value.st_size = 1
@@ -323,36 +271,23 @@ class TestAddCaCerts(TestCase):
mock_write = mocks.enter_context(
mock.patch.object(util, "write_file")
)
- mock_load = mocks.enter_context(
- mock.patch.object(
- util, "load_file", return_value=ca_certs_content
- )
- )
cc_ca_certs.add_ca_certs(conf, certs)
mock_write.assert_has_calls(
[
mock.call(
- conf["ca_cert_full_path"],
- expected_cert_file,
+ conf["ca_cert_full_path"].format(cert_index=1),
+ expected_cert_1_file,
mode=0o644,
- )
+ ),
+ mock.call(
+ conf["ca_cert_full_path"].format(cert_index=2),
+ expected_cert_2_file,
+ mode=0o644,
+ ),
]
)
- if conf["ca_cert_config"] is not None:
- mock_write.assert_has_calls(
- [
- mock.call(
- conf["ca_cert_config"],
- "%s\n%s\n"
- % (ca_certs_content, conf["ca_cert_filename"]),
- omode="wb",
- )
- ]
- )
-
- mock_load.assert_called_once_with(conf["ca_cert_config"])
class TestUpdateCaCerts(unittest.TestCase):
@@ -378,6 +313,12 @@ class TestRemoveDefaultCaCerts(TestCase):
)
def test_commands(self):
+ ca_certs_content = "# line1\nline2\nline3\n"
+ expected = (
+ "# line1\n# Modified by cloud-init to deselect certs due to"
+ " user-data\n!line2\n!line3\n"
+ )
+
for distro_name in cc_ca_certs.distros:
conf = cc_ca_certs._distro_ca_certs_configs(distro_name)
@@ -385,33 +326,42 @@ class TestRemoveDefaultCaCerts(TestCase):
mock_delete = mocks.enter_context(
mock.patch.object(util, "delete_dir_contents")
)
- mock_write = mocks.enter_context(
- mock.patch.object(util, "write_file")
+ mock_load = mocks.enter_context(
+ mock.patch.object(
+ util, "load_file", return_value=ca_certs_content
+ )
)
mock_subp = mocks.enter_context(
mock.patch.object(subp, "subp")
)
-
- cc_ca_certs.remove_default_ca_certs(distro_name, conf)
-
- mock_delete.assert_has_calls(
- [
- mock.call(conf["ca_cert_path"]),
- mock.call(conf["ca_cert_system_path"]),
- ]
+ mock_write = mocks.enter_context(
+ mock.patch.object(util, "write_file")
)
- if conf["ca_cert_config"] is not None:
+ cc_ca_certs.disable_default_ca_certs(distro_name, conf)
+
+ if distro_name == "rhel":
+ mock_delete.assert_has_calls(
+ [
+ mock.call(conf["ca_cert_path"]),
+ mock.call(conf["ca_cert_local_path"]),
+ ]
+ )
+ self.assertEqual([], mock_subp.call_args_list)
+ elif distro_name in ["alpine", "debian", "ubuntu"]:
+ mock_load.assert_called_once_with(conf["ca_cert_config"])
mock_write.assert_called_once_with(
- conf["ca_cert_config"], "", mode=0o644
+ conf["ca_cert_config"], expected, omode="wb"
)
- if distro_name in ["debian", "ubuntu"]:
- mock_subp.assert_called_once_with(
- ("debconf-set-selections", "-"),
- "ca-certificates ca-certificates/trust_new_crts"
- " select no",
- )
+ if distro_name in ["debian", "ubuntu"]:
+ mock_subp.assert_called_once_with(
+ ("debconf-set-selections", "-"),
+ "ca-certificates ca-certificates/trust_new_crts"
+ " select no",
+ )
+ else:
+ assert mock_subp.call_count == 0
class TestCACertsSchema:
@@ -423,11 +373,10 @@ class TestCACertsSchema:
# Valid, yet deprecated schemas
(
{"ca-certs": {"remove-defaults": True}},
- "Cloud config schema deprecations: "
- "ca-certs: DEPRECATED. Dropped after April 2027. "
- "Use ``ca_certs``., "
- "ca-certs.remove-defaults: DEPRECATED. "
- "Dropped after April 2027. Use ``remove_defaults``.",
+ "Cloud config schema deprecations: ca-certs: "
+ "Deprecated in version 22.3. Use ``ca_certs`` instead.,"
+ " ca-certs.remove-defaults: Deprecated in version 22.3"
+ ". Use ``remove_defaults`` instead.",
),
# Invalid schemas
(
diff --git a/tests/unittests/config/test_cc_disk_setup.py b/tests/unittests/config/test_cc_disk_setup.py
index 9c02a651..496ad8e1 100644
--- a/tests/unittests/config/test_cc_disk_setup.py
+++ b/tests/unittests/config/test_cc_disk_setup.py
@@ -1,6 +1,7 @@
# This file is part of cloud-init. See LICENSE file for license information.
import random
+import tempfile
import pytest
@@ -102,7 +103,7 @@ class TestGetMbrHddSize(TestCase):
class TestGetPartitionMbrLayout(TestCase):
def test_single_partition_using_boolean(self):
self.assertEqual(
- "0,", cc_disk_setup.get_partition_mbr_layout(1000, True)
+ ",,83", cc_disk_setup.get_partition_mbr_layout(1000, True)
)
def test_single_partition_using_list(self):
@@ -200,6 +201,23 @@ class TestUpdateFsSetupDevices(TestCase):
)
+class TestPurgeDisk(TestCase):
+ @mock.patch(
+ "cloudinit.config.cc_disk_setup.read_parttbl", return_value=None
+ )
+ def test_purge_disk_ptable(self, *args):
+ pseudo_device = tempfile.NamedTemporaryFile()
+
+ cc_disk_setup.purge_disk_ptable(pseudo_device.name)
+
+ with pseudo_device as f:
+ actual = f.read()
+
+ expected = b"\0" * (1024 * 1024)
+
+ self.assertEqual(expected, actual)
+
+
@mock.patch(
"cloudinit.config.cc_disk_setup.assert_and_settle_device",
return_value=None,
@@ -211,7 +229,6 @@ class TestUpdateFsSetupDevices(TestCase):
@mock.patch("cloudinit.config.cc_disk_setup.device_type", return_value=None)
@mock.patch("cloudinit.config.cc_disk_setup.subp.subp", return_value=("", ""))
class TestMkfsCommandHandling(CiTestCase):
-
with_logs = True
def test_with_cmd(self, subp, *args):
@@ -321,5 +338,25 @@ class TestDebugSchema:
with pytest.raises(SchemaValidationError, match=error_msg):
validate_cloudconfig_schema(config, schema, strict=True)
-
-# vi: ts=4 expandtab
+ @pytest.mark.parametrize(
+ "config",
+ (
+ (
+ {
+ "disk_setup": {
+ "/dev/disk/by-id/google-home": {
+ "table_type": "gpt",
+ "layout": [
+ [100, "933AC7E1-2EB4-4F13-B844-0E14E2AEF915"]
+ ],
+ }
+ }
+ }
+ ),
+ ),
+ )
+ @skipUnlessJsonSchema()
+ def test_valid_schema(self, config):
+ """Assert expected schema validation and no error messages."""
+ schema = get_schema()
+ validate_cloudconfig_schema(config, schema, strict=True)
diff --git a/tests/unittests/config/test_cc_growpart.py b/tests/unittests/config/test_cc_growpart.py
index c808333b..13622332 100644
--- a/tests/unittests/config/test_cc_growpart.py
+++ b/tests/unittests/config/test_cc_growpart.py
@@ -599,9 +599,10 @@ class TestGrowpartSchema:
pytest.raises(
SchemaValidationError,
match=(
- "deprecations: growpart.mode: DEPRECATED. Specifying"
- " a boolean ``false`` value for this key is"
- " deprecated. Use ``off`` instead."
+ "Cloud config schema deprecations: "
+ "growpart.mode: Changed in version 22.3. "
+ "Specifying a boolean ``false`` value for "
+ "``mode`` is deprecated. Use ``off`` instead."
),
),
),
diff --git a/tests/unittests/config/test_cc_grub_dpkg.py b/tests/unittests/config/test_cc_grub_dpkg.py
index 0f9cc232..aa076d19 100644
--- a/tests/unittests/config/test_cc_grub_dpkg.py
+++ b/tests/unittests/config/test_cc_grub_dpkg.py
@@ -206,9 +206,10 @@ class TestGrubDpkgSchema:
pytest.raises(
SchemaValidationError,
match=(
- r"^Cloud config schema deprecations:"
- r" grub_dpkg.grub-pc/install_devices_empty:"
- r" DEPRECATED. Use a boolean value instead.$"
+ "Cloud config schema deprecations: "
+ "grub_dpkg.grub-pc/install_devices_empty: "
+ "Changed in version 22.3. Use a boolean value "
+ "instead."
),
),
False,
@@ -234,8 +235,9 @@ class TestGrubDpkgSchema:
pytest.raises(
SchemaValidationError,
match=(
- r"^Cloud config schema deprecations: grub-dpkg:"
- r" DEPRECATED. Use ``grub_dpkg`` instead$"
+ "Cloud config schema deprecations: grub-dpkg:"
+ " Deprecated in version 22.2. Use "
+ "``grub_dpkg`` instead."
),
),
False,
diff --git a/tests/unittests/config/test_cc_package_update_upgrade_install.py b/tests/unittests/config/test_cc_package_update_upgrade_install.py
index e8fce98f..07c5b932 100644
--- a/tests/unittests/config/test_cc_package_update_upgrade_install.py
+++ b/tests/unittests/config/test_cc_package_update_upgrade_install.py
@@ -21,25 +21,26 @@ class TestPackageUpdateUpgradeSchema:
(
{"apt_update": False},
(
- "deprecations: apt_update: DEPRECATED."
- " Dropped after April 2027. Use ``package_update``."
- " Default: ``false``"
+ "Cloud config schema deprecations: apt_update: "
+ "Default: ``false``. Deprecated in version 22.2. "
+ "Use ``package_update`` instead."
),
),
(
{"apt_upgrade": False},
(
- "deprecations: apt_upgrade: DEPRECATED."
- " Dropped after April 2027. Use ``package_upgrade``."
- " Default: ``false``"
+ "Cloud config schema deprecations: apt_upgrade: "
+ "Default: ``false``. Deprecated in version 22.2. "
+ "Use ``package_upgrade`` instead."
),
),
(
{"apt_reboot_if_required": False},
(
- "deprecations: apt_reboot_if_required: DEPRECATED."
- " Dropped after April 2027."
- " Use ``package_reboot_if_required``. Default: ``false``"
+ "Cloud config schema deprecations: "
+ "apt_reboot_if_required: Default: ``false``. "
+ "Deprecated in version 22.2. Use "
+ "``package_reboot_if_required`` instead."
),
),
],
diff --git a/tests/unittests/config/test_cc_power_state_change.py b/tests/unittests/config/test_cc_power_state_change.py
index 81750f5b..fbdc06ef 100644
--- a/tests/unittests/config/test_cc_power_state_change.py
+++ b/tests/unittests/config/test_cc_power_state_change.py
@@ -180,18 +180,20 @@ class TestPowerStateChangeSchema:
(
{"power_state": {"mode": "halt", "delay": "5"}},
(
- "power_state.delay: DEPRECATED:"
- " Use of string for this value will be dropped after"
- " April 2027. Use ``now`` or integer type."
+ "Cloud config schema deprecations: "
+ "power_state.delay: Changed in version 22.3. Use "
+ "of type string for this value is deprecated. Use "
+ "``now`` or integer type."
),
),
({"power_state": {"mode": "halt", "delay": "now"}}, None),
(
{"power_state": {"mode": "halt", "delay": "+5"}},
(
- "power_state.delay: DEPRECATED:"
- " Use of string for this value will be dropped after"
- " April 2027. Use ``now`` or integer type."
+ "Cloud config schema deprecations: "
+ "power_state.delay: Changed in version 22.3. Use "
+ "of type string for this value is deprecated. Use "
+ "``now`` or integer type."
),
),
({"power_state": {"mode": "halt", "delay": "+"}}, ""),
diff --git a/tests/unittests/config/test_cc_puppet.py b/tests/unittests/config/test_cc_puppet.py
index 27a49722..23461c2b 100644
--- a/tests/unittests/config/test_cc_puppet.py
+++ b/tests/unittests/config/test_cc_puppet.py
@@ -12,6 +12,7 @@ from cloudinit.config.schema import (
get_schema,
validate_cloudconfig_schema,
)
+from cloudinit.subp import ProcessExecutionError
from tests.unittests.helpers import CiTestCase, mock, skipUnlessJsonSchema
from tests.unittests.util import get_cloud
@@ -25,67 +26,55 @@ def fake_tempdir(mocker, tmpdir):
).return_value.__enter__.return_value = str(tmpdir)
-@mock.patch("cloudinit.config.cc_puppet.subp.which")
@mock.patch("cloudinit.config.cc_puppet.subp.subp")
-@mock.patch("cloudinit.config.cc_puppet.os")
-class TestAutostartPuppet(CiTestCase):
- def test_wb_autostart_puppet_updates_puppet_default(
- self, m_os, m_subp, m_subpw
- ):
- """Update /etc/default/puppet to autostart if it exists."""
-
- def _fake_exists(path):
- return path == "/etc/default/puppet"
-
- m_os.path.exists.side_effect = _fake_exists
- cc_puppet._autostart_puppet(LOG)
- self.assertEqual(
- [
- mock.call(
- [
- "sed",
- "-i",
- "-e",
- "s/^START=.*/START=yes/",
- "/etc/default/puppet",
- ],
- capture=False,
- )
- ],
- m_subp.call_args_list,
- )
+class TestManagePuppetServices(CiTestCase):
+ def setUp(self):
+ super(TestManagePuppetServices, self).setUp()
+ self.cloud = get_cloud()
- def test_wb_autostart_pupppet_enables_puppet_systemctl(
- self, m_os, m_subp, m_subpw
+ def test_wb_manage_puppet_services_enables_puppet_systemctl(
+ self,
+ m_subp,
):
- """If systemctl is present, enable puppet via systemctl."""
-
- m_os.path.exists.return_value = False
- m_subpw.return_value = "/usr/bin/systemctl"
- cc_puppet._autostart_puppet(LOG)
+ cc_puppet._manage_puppet_services(LOG, self.cloud, "enable")
expected_calls = [
- mock.call(["systemctl", "enable", "puppet.service"], capture=False)
+ mock.call(
+ ["systemctl", "enable", "puppet-agent.service"],
+ capture=True,
+ )
]
- self.assertEqual(expected_calls, m_subp.call_args_list)
+ self.assertIn(expected_calls, m_subp.call_args_list)
- def test_wb_autostart_pupppet_enables_puppet_chkconfig(
- self, m_os, m_subp, m_subpw
+ def test_wb_manage_puppet_services_starts_puppet_systemctl(
+ self,
+ m_subp,
):
- """If chkconfig is present, enable puppet via checkcfg."""
-
- def _fake_exists(path):
- return path == "/sbin/chkconfig"
+ cc_puppet._manage_puppet_services(LOG, self.cloud, "start")
+ expected_calls = [
+ mock.call(
+ ["systemctl", "start", "puppet-agent.service"],
+ capture=True,
+ )
+ ]
+ self.assertIn(expected_calls, m_subp.call_args_list)
- m_subpw.return_value = None
- m_os.path.exists.side_effect = _fake_exists
- cc_puppet._autostart_puppet(LOG)
+ def test_enable_fallback_on_failure(self, m_subp):
+ m_subp.side_effect = (ProcessExecutionError, 0)
+ cc_puppet._manage_puppet_services(LOG, self.cloud, "enable")
expected_calls = [
- mock.call(["/sbin/chkconfig", "puppet", "on"], capture=False)
+ mock.call(
+ ["systemctl", "enable", "puppet-agent.service"],
+ capture=True,
+ ),
+ mock.call(
+ ["systemctl", "enable", "puppet.service"],
+ capture=True,
+ ),
]
self.assertEqual(expected_calls, m_subp.call_args_list)
-@mock.patch("cloudinit.config.cc_puppet._autostart_puppet")
+@mock.patch("cloudinit.config.cc_puppet._manage_puppet_services")
class TestPuppetHandle(CiTestCase):
with_logs = True
@@ -97,35 +86,36 @@ class TestPuppetHandle(CiTestCase):
self.csr_attributes_path = self.tmp_path("csr_attributes.yaml")
self.cloud = get_cloud()
- def test_skips_missing_puppet_key_in_cloudconfig(self, m_auto):
+ def test_skips_missing_puppet_key_in_cloudconfig(self, m_man_puppet):
"""Cloud-config containing no 'puppet' key is skipped."""
cfg = {}
cc_puppet.handle("notimportant", cfg, self.cloud, LOG, None)
self.assertIn("no 'puppet' configuration found", self.logs.getvalue())
- self.assertEqual(0, m_auto.call_count)
+ self.assertEqual(0, m_man_puppet.call_count)
@mock.patch("cloudinit.config.cc_puppet.subp.subp", return_value=("", ""))
- def test_puppet_config_starts_puppet_service(self, m_subp, m_auto):
+ def test_puppet_config_starts_puppet_service(self, m_subp, m_man_puppet):
"""Cloud-config 'puppet' configuration starts puppet."""
cfg = {"puppet": {"install": False}}
cc_puppet.handle("notimportant", cfg, self.cloud, LOG, None)
- self.assertEqual(1, m_auto.call_count)
- self.assertIn(
- [mock.call(["service", "puppet", "start"], capture=False)],
- m_subp.call_args_list,
- )
+ self.assertEqual(2, m_man_puppet.call_count)
+ expected_calls = [
+ mock.call(LOG, self.cloud, "enable"),
+ mock.call(LOG, self.cloud, "start"),
+ ]
+ self.assertEqual(expected_calls, m_man_puppet.call_args_list)
@mock.patch("cloudinit.config.cc_puppet.subp.subp", return_value=("", ""))
- def test_empty_puppet_config_installs_puppet(self, m_subp, m_auto):
+ def test_empty_puppet_config_installs_puppet(self, m_subp, m_man_puppet):
"""Cloud-config empty 'puppet' configuration installs latest puppet."""
self.cloud.distro = mock.MagicMock()
cfg = {"puppet": {}}
cc_puppet.handle("notimportant", cfg, self.cloud, LOG, None)
self.assertEqual(
- [mock.call(("puppet", None))],
+ [mock.call(("puppet-agent", None))],
self.cloud.distro.install_packages.call_args_list,
)
@@ -136,8 +126,8 @@ class TestPuppetHandle(CiTestCase):
self.cloud.distro = mock.MagicMock()
cfg = {"puppet": {"install": True}}
cc_puppet.handle("notimportant", cfg, self.cloud, LOG, None)
- self.assertEqual(
- [mock.call(("puppet", None))],
+ self.assertIn(
+ [mock.call(("puppet-agent", None))],
self.cloud.distro.install_packages.call_args_list,
)
@@ -246,14 +236,14 @@ class TestPuppetHandle(CiTestCase):
cfg = {"puppet": {"version": "3.8"}}
cc_puppet.handle("notimportant", cfg, self.cloud, LOG, None)
self.assertEqual(
- [mock.call(("puppet", "3.8"))],
+ [mock.call(("puppet-agent", "3.8"))],
self.cloud.distro.install_packages.call_args_list,
)
@mock.patch("cloudinit.config.cc_puppet.get_config_value")
@mock.patch("cloudinit.config.cc_puppet.subp.subp", return_value=("", ""))
def test_puppet_config_updates_puppet_conf(
- self, m_subp, m_default, m_auto
+ self, m_subp, m_default, m_man_puppet
):
"""When 'conf' is provided update values in PUPPET_CONF_PATH."""
@@ -277,7 +267,7 @@ class TestPuppetHandle(CiTestCase):
@mock.patch("cloudinit.config.cc_puppet.get_config_value")
@mock.patch("cloudinit.config.cc_puppet.subp.subp")
def test_puppet_writes_csr_attributes_file(
- self, m_subp, m_default, m_auto
+ self, m_subp, m_default, m_man_puppet
):
"""When csr_attributes is provided
creates file in PUPPET_CSR_ATTRIBUTES_PATH."""
@@ -321,44 +311,50 @@ class TestPuppetHandle(CiTestCase):
self.assertEqual(expected, content)
@mock.patch("cloudinit.config.cc_puppet.subp.subp", return_value=("", ""))
- def test_puppet_runs_puppet_if_requested(self, m_subp, m_auto):
+ def test_puppet_runs_puppet_if_requested(self, m_subp, m_man_puppet):
"""Run puppet with default args if 'exec' is set to True."""
cfg = {"puppet": {"exec": True}}
cc_puppet.handle("notimportant", cfg, self.cloud, LOG, None)
- self.assertEqual(1, m_auto.call_count)
+ self.assertEqual(2, m_man_puppet.call_count)
+ expected_calls = [
+ mock.call(LOG, self.cloud, "enable"),
+ mock.call(LOG, self.cloud, "start"),
+ ]
+ self.assertEqual(expected_calls, m_man_puppet.call_args_list)
self.assertIn(
[mock.call(["puppet", "agent", "--test"], capture=False)],
m_subp.call_args_list,
)
@mock.patch("cloudinit.config.cc_puppet.subp.subp", return_value=("", ""))
- def test_puppet_starts_puppetd(self, m_subp, m_auto):
+ def test_puppet_starts_puppetd(self, m_subp, m_man_puppet):
"""Run puppet with default args if 'exec' is set to True."""
cfg = {"puppet": {}}
cc_puppet.handle("notimportant", cfg, self.cloud, LOG, None)
- self.assertEqual(1, m_auto.call_count)
- self.assertIn(
- [mock.call(["service", "puppet", "start"], capture=False)],
- m_subp.call_args_list,
- )
+ self.assertEqual(2, m_man_puppet.call_count)
+ expected_calls = [
+ mock.call(LOG, self.cloud, "enable"),
+ mock.call(LOG, self.cloud, "start"),
+ ]
+ self.assertEqual(expected_calls, m_man_puppet.call_args_list)
@mock.patch("cloudinit.config.cc_puppet.subp.subp", return_value=("", ""))
- def test_puppet_skips_puppetd(self, m_subp, m_auto):
+ def test_puppet_skips_puppetd(self, m_subp, m_man_puppet):
"""Run puppet with default args if 'exec' is set to True."""
cfg = {"puppet": {"start_service": False}}
cc_puppet.handle("notimportant", cfg, self.cloud, LOG, None)
- self.assertEqual(0, m_auto.call_count)
+ self.assertEqual(0, m_man_puppet.call_count)
self.assertNotIn(
- [mock.call(["service", "puppet", "start"], capture=False)],
+ [mock.call(["systemctl", "start", "puppet-agent"], capture=False)],
m_subp.call_args_list,
)
@mock.patch("cloudinit.config.cc_puppet.subp.subp", return_value=("", ""))
def test_puppet_runs_puppet_with_args_list_if_requested(
- self, m_subp, m_auto
+ self, m_subp, m_man_puppet
):
"""Run puppet with 'exec_args' list if 'exec' is set to True."""
@@ -369,7 +365,7 @@ class TestPuppetHandle(CiTestCase):
}
}
cc_puppet.handle("notimportant", cfg, self.cloud, LOG, None)
- self.assertEqual(1, m_auto.call_count)
+ self.assertEqual(2, m_man_puppet.call_count)
self.assertIn(
[
mock.call(
@@ -382,7 +378,7 @@ class TestPuppetHandle(CiTestCase):
@mock.patch("cloudinit.config.cc_puppet.subp.subp", return_value=("", ""))
def test_puppet_runs_puppet_with_args_string_if_requested(
- self, m_subp, m_auto
+ self, m_subp, m_man_puppet
):
"""Run puppet with 'exec_args' string if 'exec' is set to True."""
@@ -393,7 +389,7 @@ class TestPuppetHandle(CiTestCase):
}
}
cc_puppet.handle("notimportant", cfg, self.cloud, LOG, None)
- self.assertEqual(1, m_auto.call_count)
+ self.assertEqual(2, m_man_puppet.call_count)
self.assertIn(
[
mock.call(
@@ -404,6 +400,48 @@ class TestPuppetHandle(CiTestCase):
m_subp.call_args_list,
)
+ @mock.patch("cloudinit.config.cc_puppet.subp.subp", return_value=("", ""))
+ def test_puppet_falls_back_to_older_name(self, m_subp, m_man_puppet):
+ cfg = {"puppet": {}}
+ with mock.patch(
+ "tests.unittests.util.MockDistro.install_packages"
+ ) as install_pkg:
+ # puppet-agent not installed, but puppet is
+ install_pkg.side_effect = (ProcessExecutionError, 0)
+
+ cc_puppet.handle("notimportant", cfg, self.cloud, LOG, None)
+ expected_calls = [
+ mock.call(LOG, self.cloud, "enable"),
+ mock.call(LOG, self.cloud, "start"),
+ ]
+ self.assertEqual(expected_calls, m_man_puppet.call_args_list)
+
+ @mock.patch("cloudinit.config.cc_puppet.subp.subp", return_value=("", ""))
+ def test_puppet_with_conf_package_name_fails(self, m_subp, m_man_puppet):
+ cfg = {"puppet": {"package_name": "puppet"}}
+ with mock.patch(
+ "tests.unittests.util.MockDistro.install_packages"
+ ) as install_pkg:
+ # puppet-agent not installed, but puppet is
+ install_pkg.side_effect = (ProcessExecutionError, 0)
+ with pytest.raises(ProcessExecutionError):
+ cc_puppet.handle("notimportant", cfg, self.cloud, LOG, None)
+ self.assertEqual(0, m_man_puppet.call_count)
+ self.assertNotIn(
+ [
+ mock.call(
+ ["systemctl", "start", "puppet-agent"], capture=True
+ )
+ ],
+ m_subp.call_args_list,
+ )
+
+ @mock.patch("cloudinit.config.cc_puppet.subp.subp", return_value=("", ""))
+ def test_puppet_with_conf_package_name_success(self, m_subp, m_man_puppet):
+ cfg = {"puppet": {"package_name": "puppet"}}
+ cc_puppet.handle("notimportant", cfg, self.cloud, LOG, None)
+ self.assertEqual(2, m_man_puppet.call_count)
+
URL_MOCK = mock.Mock()
URL_MOCK.contents = b'#!/bin/bash\necho "Hi Mom"'
diff --git a/tests/unittests/config/test_cc_resizefs.py b/tests/unittests/config/test_cc_resizefs.py
index b46fab51..d7fda1b8 100644
--- a/tests/unittests/config/test_cc_resizefs.py
+++ b/tests/unittests/config/test_cc_resizefs.py
@@ -444,10 +444,12 @@ class TestMaybeGetDevicePathAsWritableBlock(CiTestCase):
@mock.patch("cloudinit.util.mount_is_read_write")
@mock.patch("cloudinit.config.cc_resizefs.os.path.isdir")
- def test_resize_btrfs_mount_is_ro(self, m_is_dir, m_is_rw):
+ @mock.patch("cloudinit.subp.subp")
+ def test_resize_btrfs_mount_is_ro(self, m_subp, m_is_dir, m_is_rw):
"""Do not resize / directly if it is read-only. (LP: #1734787)."""
m_is_rw.return_value = False
m_is_dir.return_value = True
+ m_subp.return_value = ("btrfs-progs v4.19 \n", "")
self.assertEqual(
("btrfs", "filesystem", "resize", "max", "//.snapshots"),
_resize_btrfs("/", "/dev/sda1"),
@@ -455,15 +457,32 @@ class TestMaybeGetDevicePathAsWritableBlock(CiTestCase):
@mock.patch("cloudinit.util.mount_is_read_write")
@mock.patch("cloudinit.config.cc_resizefs.os.path.isdir")
- def test_resize_btrfs_mount_is_rw(self, m_is_dir, m_is_rw):
+ @mock.patch("cloudinit.subp.subp")
+ def test_resize_btrfs_mount_is_rw(self, m_subp, m_is_dir, m_is_rw):
"""Do not resize / directly if it is read-only. (LP: #1734787)."""
m_is_rw.return_value = True
m_is_dir.return_value = True
+ m_subp.return_value = ("btrfs-progs v4.19 \n", "")
self.assertEqual(
("btrfs", "filesystem", "resize", "max", "/"),
_resize_btrfs("/", "/dev/sda1"),
)
+ @mock.patch("cloudinit.util.mount_is_read_write")
+ @mock.patch("cloudinit.config.cc_resizefs.os.path.isdir")
+ @mock.patch("cloudinit.subp.subp")
+ def test_resize_btrfs_mount_is_rw_has_queue(
+ self, m_subp, m_is_dir, m_is_rw
+ ):
+ """Queue the resize request if btrfs >= 5.10"""
+ m_is_rw.return_value = True
+ m_is_dir.return_value = True
+ m_subp.return_value = ("btrfs-progs v5.10 \n", "")
+ self.assertEqual(
+ ("btrfs", "filesystem", "resize", "--enqueue", "max", "/"),
+ _resize_btrfs("/", "/dev/sda1"),
+ )
+
@mock.patch("cloudinit.util.is_container", return_value=True)
@mock.patch("cloudinit.util.is_FreeBSD")
def test_maybe_get_writable_device_path_zfs_freebsd(
diff --git a/tests/unittests/config/test_cc_scripts_vendor.py b/tests/unittests/config/test_cc_scripts_vendor.py
index 1dcd0573..9d3e90e0 100644
--- a/tests/unittests/config/test_cc_scripts_vendor.py
+++ b/tests/unittests/config/test_cc_scripts_vendor.py
@@ -19,9 +19,10 @@ class TestScriptsVendorSchema:
pytest.raises(
SchemaValidationError,
match=(
- "deprecations: vendor_data.enabled: DEPRECATED."
- " Use of string for this value is DEPRECATED."
- " Use a boolean value instead."
+ "Cloud config schema deprecations: "
+ "vendor_data.enabled: Deprecated in version "
+ "22.3. Use of type string for this value is "
+ "deprecated. Use a boolean instead."
),
),
),
diff --git a/tests/unittests/config/test_cc_set_hostname.py b/tests/unittests/config/test_cc_set_hostname.py
index 3d1d86ee..2c92949f 100644
--- a/tests/unittests/config/test_cc_set_hostname.py
+++ b/tests/unittests/config/test_cc_set_hostname.py
@@ -5,6 +5,7 @@ import os
import shutil
import tempfile
from io import BytesIO
+from pathlib import Path
from unittest import mock
from configobj import ConfigObj
@@ -242,5 +243,21 @@ class TestHostname(t_help.FilesystemMockingTestCase):
str(ctx_mgr.exception),
)
+ def test_ignore_empty_previous_artifact_file(self):
+ cfg = {
+ "hostname": "blah",
+ "fqdn": "blah.blah.blah.yahoo.com",
+ }
+ distro = self._fetch_distro("debian")
+ paths = helpers.Paths({"cloud_dir": self.tmp})
+ ds = None
+ cc = cloud.Cloud(ds, paths, {}, distro, None)
+ self.patchUtils(self.tmp)
+ prev_fn = Path(cc.get_cpath("data")) / "set-hostname"
+ prev_fn.touch()
+ cc_set_hostname.handle("cc_set_hostname", cfg, cc, LOG, [])
+ contents = util.load_file("/etc/hostname")
+ self.assertEqual("blah", contents.strip())
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/config/test_cc_set_passwords.py b/tests/unittests/config/test_cc_set_passwords.py
index be26103f..a375c00b 100644
--- a/tests/unittests/config/test_cc_set_passwords.py
+++ b/tests/unittests/config/test_cc_set_passwords.py
@@ -17,6 +17,11 @@ from tests.unittests.util import get_cloud
MODPATH = "cloudinit.config.cc_set_passwords."
LOG = logging.getLogger(__name__)
+SYSTEMD_CHECK_CALL = mock.call(
+ ["systemctl", "show", "--property", "ActiveState", "--value", "ssh"]
+)
+SYSTEMD_RESTART_CALL = mock.call(["systemctl", "restart", "ssh"], capture=True)
+SERVICE_RESTART_CALL = mock.call(["service", "ssh", "restart"], capture=True)
@pytest.fixture(autouse=True)
@@ -26,52 +31,23 @@ def common_fixtures(mocker):
class TestHandleSSHPwauth:
- @pytest.mark.parametrize(
- "uses_systemd,cmd",
- (
- (True, ["systemctl", "status", "ssh"]),
- (False, ["service", "ssh", "status"]),
- ),
- )
@mock.patch("cloudinit.distros.subp.subp")
- def test_unknown_value_logs_warning(
- self, m_subp, uses_systemd, cmd, caplog
- ):
+ def test_unknown_value_logs_warning(self, m_subp, caplog):
cloud = get_cloud("ubuntu")
- with mock.patch.object(
- cloud.distro, "uses_systemd", return_value=uses_systemd
- ):
- setpass.handle_ssh_pwauth("floo", cloud.distro)
+ setpass.handle_ssh_pwauth("floo", cloud.distro)
assert "Unrecognized value: ssh_pwauth=floo" in caplog.text
- assert [mock.call(cmd, capture=True)] == m_subp.call_args_list
+ assert SYSTEMD_CHECK_CALL not in m_subp.call_args_list
+ assert SYSTEMD_RESTART_CALL not in m_subp.call_args_list
+ assert SERVICE_RESTART_CALL not in m_subp.call_args_list
@pytest.mark.parametrize(
- "uses_systemd,ssh_updated,cmd,expected_log",
+ "uses_systemd,ssh_updated,systemd_state",
(
- (
- True,
- True,
- ["systemctl", "restart", "ssh"],
- "Restarted the SSH daemon.",
- ),
- (
- True,
- False,
- ["systemctl", "status", "ssh"],
- "No need to restart SSH",
- ),
- (
- False,
- True,
- ["service", "ssh", "restart"],
- "Restarted the SSH daemon.",
- ),
- (
- False,
- False,
- ["service", "ssh", "status"],
- "No need to restart SSH",
- ),
+ (True, True, "activating"),
+ (True, True, "inactive"),
+ (True, False, None),
+ (False, True, None),
+ (False, False, None),
),
)
@mock.patch(f"{MODPATH}update_ssh_config")
@@ -82,23 +58,31 @@ class TestHandleSSHPwauth:
update_ssh_config,
uses_systemd,
ssh_updated,
- cmd,
- expected_log,
+ systemd_state,
caplog,
):
update_ssh_config.return_value = ssh_updated
+ m_subp.return_value = subp.SubpResult(systemd_state, "")
cloud = get_cloud("ubuntu")
with mock.patch.object(
cloud.distro, "uses_systemd", return_value=uses_systemd
):
setpass.handle_ssh_pwauth(True, cloud.distro)
- if ssh_updated:
- m_subp.assert_called_with(cmd, capture=True)
+
+ if not ssh_updated:
+ assert "No need to restart SSH" in caplog.text
+ assert m_subp.call_args_list == []
+ elif uses_systemd:
+ assert SYSTEMD_CHECK_CALL in m_subp.call_args_list
+ assert SERVICE_RESTART_CALL not in m_subp.call_args_list
+ if systemd_state == "activating":
+ assert SYSTEMD_RESTART_CALL in m_subp.call_args_list
+ else:
+ assert SYSTEMD_RESTART_CALL not in m_subp.call_args_list
else:
- assert [mock.call(cmd, capture=True)] == m_subp.call_args_list
- assert expected_log in "\n".join(
- r.msg for r in caplog.records if r.levelname == "DEBUG"
- )
+ assert SERVICE_RESTART_CALL in m_subp.call_args_list
+ assert SYSTEMD_CHECK_CALL not in m_subp.call_args_list
+ assert SYSTEMD_RESTART_CALL not in m_subp.call_args_list
@mock.patch(f"{MODPATH}update_ssh_config", return_value=True)
@mock.patch("cloudinit.distros.subp.subp")
@@ -107,9 +91,9 @@ class TestHandleSSHPwauth:
update_ssh_config.assert_not_called()
cloud = get_cloud("ubuntu")
setpass.handle_ssh_pwauth("unchanged", cloud.distro)
- assert [
- mock.call(["systemctl", "status", "ssh"], capture=True)
- ] == m_subp.call_args_list
+ assert SYSTEMD_CHECK_CALL not in m_subp.call_args_list
+ assert SYSTEMD_RESTART_CALL not in m_subp.call_args_list
+ assert SERVICE_RESTART_CALL not in m_subp.call_args_list
@pytest.mark.allow_subp_for("systemctl")
@mock.patch("cloudinit.distros.subp.subp")
@@ -118,135 +102,13 @@ class TestHandleSSHPwauth:
cloud = get_cloud("ubuntu")
upname = f"{MODPATH}update_ssh_config"
optname = "PasswordAuthentication"
- for n, value in enumerate(util.FALSE_STRINGS + util.TRUE_STRINGS, 1):
+ for _, value in enumerate(util.FALSE_STRINGS + util.TRUE_STRINGS, 1):
optval = "yes" if value in util.TRUE_STRINGS else "no"
with mock.patch(upname, return_value=False) as m_update:
setpass.handle_ssh_pwauth(value, cloud.distro)
assert (
mock.call({optname: optval}) == m_update.call_args_list[-1]
)
- assert m_subp.call_count == n
-
- @pytest.mark.parametrize(
- [
- "uses_systemd",
- "raised_error",
- "warning_log",
- "debug_logs",
- "update_ssh_call_count",
- ],
- (
- (
- True,
- subp.ProcessExecutionError(
- stderr="Service is not running.", exit_code=3
- ),
- None,
- [
- "Writing config 'ssh_pwauth: True'. SSH service"
- " 'ssh' will not be restarted because it is stopped.",
- "Not restarting SSH service: service is stopped.",
- ],
- 1,
- ),
- (
- True,
- subp.ProcessExecutionError(
- stderr="Service is not installed.", exit_code=4
- ),
- "Ignoring config 'ssh_pwauth: True'. SSH service 'ssh' is"
- " not installed.",
- [],
- 0,
- ),
- (
- True,
- subp.ProcessExecutionError(
- stderr="Service is not available.", exit_code=2
- ),
- "Ignoring config 'ssh_pwauth: True'. SSH service 'ssh'"
- " is not available. Error: ",
- [],
- 0,
- ),
- (
- False,
- subp.ProcessExecutionError(
- stderr="Service is not available.", exit_code=25
- ),
- None,
- [
- "Writing config 'ssh_pwauth: True'. SSH service"
- " 'ssh' will not be restarted because it is not running"
- " or not available.",
- "Not restarting SSH service: service is stopped.",
- ],
- 1,
- ),
- (
- False,
- subp.ProcessExecutionError(
- stderr="Service is not available.", exit_code=3
- ),
- None,
- [
- "Writing config 'ssh_pwauth: True'. SSH service"
- " 'ssh' will not be restarted because it is not running"
- " or not available.",
- "Not restarting SSH service: service is stopped.",
- ],
- 1,
- ),
- (
- False,
- subp.ProcessExecutionError(
- stderr="Service is not available.", exit_code=4
- ),
- None,
- [
- "Writing config 'ssh_pwauth: True'. SSH service"
- " 'ssh' will not be restarted because it is not running"
- " or not available.",
- "Not restarting SSH service: service is stopped.",
- ],
- 1,
- ),
- ),
- )
- @mock.patch(f"{MODPATH}update_ssh_config", return_value=True)
- @mock.patch("cloudinit.distros.subp.subp")
- def test_no_restart_when_service_is_not_running(
- self,
- m_subp,
- m_update_ssh_config,
- uses_systemd,
- raised_error,
- warning_log,
- debug_logs,
- update_ssh_call_count,
- caplog,
- ):
- """Write config but don't restart SSH service when not running."""
- cloud = get_cloud("ubuntu")
- cloud.distro.manage_service = mock.Mock(side_effect=raised_error)
- cloud.distro.uses_systemd = mock.Mock(return_value=uses_systemd)
-
- setpass.handle_ssh_pwauth(True, cloud.distro)
- logs_by_level = {logging.WARNING: [], logging.DEBUG: []}
- for _, level, msg in caplog.record_tuples:
- logs_by_level[level].append(msg)
- if warning_log:
- assert warning_log in "\n".join(
- logs_by_level[logging.WARNING]
- ), logs_by_level
- for debug_log in debug_logs:
- assert debug_log in logs_by_level[logging.DEBUG]
- assert [
- mock.call("status", "ssh")
- ] == cloud.distro.manage_service.call_args_list
- assert m_update_ssh_config.call_count == update_ssh_call_count
- assert m_subp.call_count == 0
- assert cloud.distro.uses_systemd.call_count == 1
def get_chpasswd_calls(cfg, cloud, log):
@@ -275,9 +137,9 @@ class TestSetPasswordsHandle:
"Leaving SSH config 'PasswordAuthentication' unchanged. "
"ssh_pwauth=None"
) in caplog.text
- assert [
- mock.call(["systemctl", "status", "ssh"], capture=True)
- ] == m_subp.call_args_list
+ assert SYSTEMD_CHECK_CALL not in m_subp.call_args_list
+ assert SYSTEMD_RESTART_CALL not in m_subp.call_args_list
+ assert SERVICE_RESTART_CALL not in m_subp.call_args_list
@mock.patch(f"{MODPATH}subp.subp")
def test_handle_on_chpasswd_list_parses_common_hashes(
@@ -380,7 +242,6 @@ class TestSetPasswordsHandle:
),
mock.call(["pw", "usermod", "ubuntu", "-p", "01-Jan-1970"]),
mock.call(["pw", "usermod", "sadegh", "-p", "01-Jan-1970"]),
- mock.call(["service", "sshd", "status"], capture=True),
] == m_subp.call_args_list
@pytest.mark.parametrize(
@@ -576,11 +437,6 @@ class TestSetPasswordsHandle:
def_1 = get_chpasswd_calls(list_def, cloud, LOG)
def_2 = get_chpasswd_calls(users_def, cloud, LOG)
assert def_1 == def_2
- assert def_1[-1] == mock.call(
- ["systemctl", "status", "ssh"], capture=True
- )
- for val in def_1:
- assert val
expire_cases = [
@@ -709,10 +565,9 @@ class TestSetPasswordsSchema:
pytest.raises(
SchemaValidationError,
match=(
- "deprecations: ssh_pwauth: DEPRECATED. Use of"
- " non-boolean values for this field is DEPRECATED and"
- " will result in an error in a future version of"
- " cloud-init."
+ "Cloud config schema deprecations: ssh_pwauth:"
+ " Changed in version 22.3. Use of non-boolean"
+ " values for this field is deprecated."
),
),
),
@@ -721,16 +576,17 @@ class TestSetPasswordsSchema:
pytest.raises(
SchemaValidationError,
match=(
- "deprecations: ssh_pwauth: DEPRECATED. Use of"
- " non-boolean values for this field is DEPRECATED and"
- " will result in an error in a future version of"
- " cloud-init."
+ "Cloud config schema deprecations: ssh_pwauth:"
+ " Changed in version 22.3. Use of non-boolean"
+ " values for this field is deprecated."
),
),
),
(
{"chpasswd": {"list": "blah"}},
- pytest.raises(SchemaValidationError, match="DEPRECATED"),
+ pytest.raises(
+ SchemaValidationError, match="Deprecated in version"
+ ),
),
# Valid combinations
(
@@ -843,7 +699,9 @@ class TestSetPasswordsSchema:
# Test regex
(
{"chpasswd": {"list": ["user:pass"]}},
- pytest.raises(SchemaValidationError, match="DEPRECATED"),
+ pytest.raises(
+ SchemaValidationError, match="Deprecated in version"
+ ),
),
# Test valid
({"password": "pass"}, does_not_raise()),
diff --git a/tests/unittests/config/test_cc_ssh.py b/tests/unittests/config/test_cc_ssh.py
index 8f2ca8bf..66368d0f 100644
--- a/tests/unittests/config/test_cc_ssh.py
+++ b/tests/unittests/config/test_cc_ssh.py
@@ -311,6 +311,7 @@ class TestHandleSsh:
cfg = {"ssh_keys": {}}
expected_calls = []
+ cert_content = ""
for key_type in cc_ssh.GENERATE_KEY_NAMES:
private_name = "{}_private".format(key_type)
public_name = "{}_public".format(key_type)
@@ -330,26 +331,32 @@ class TestHandleSsh:
mock.call(
"/etc/ssh/ssh_host_{}_key".format(key_type),
private_value,
- 384,
+ 0o600,
),
mock.call(
"/etc/ssh/ssh_host_{}_key.pub".format(key_type),
public_value,
- 384,
+ 0o644,
),
mock.call(
"/etc/ssh/ssh_host_{}_key-cert.pub".format(key_type),
cert_value,
- 384,
- ),
- mock.call(
- sshd_conf_fname,
- "HostCertificate /etc/ssh/ssh_host_{}_key-cert.pub"
- "\n".format(key_type),
- preserve_mode=True,
+ 0o644,
),
]
)
+ cert_content += (
+ f"HostCertificate /etc/ssh/ssh_host_{key_type}_key-cert.pub\n"
+ )
+
+ expected_calls.append(
+ mock.call(
+ sshd_conf_fname,
+ cert_content,
+ omode="ab",
+ preserve_mode=True,
+ )
+ )
# Run the handler.
m_nug.return_value = ([], {})
diff --git a/tests/unittests/config/test_cc_update_etc_hosts.py b/tests/unittests/config/test_cc_update_etc_hosts.py
index d48656f7..6ee6f197 100644
--- a/tests/unittests/config/test_cc_update_etc_hosts.py
+++ b/tests/unittests/config/test_cc_update_etc_hosts.py
@@ -87,9 +87,10 @@ class TestUpdateEtcHosts:
pytest.raises(
SchemaValidationError,
match=(
- "deprecations: manage_etc_hosts: DEPRECATED. Value"
- " ``template`` will be dropped after April 2027."
- " Use ``true`` instead"
+ "Cloud config schema deprecations: "
+ "manage_etc_hosts: Changed in version 22.3. "
+ "Use of ``template`` is deprecated, use "
+ "``true`` instead."
),
),
),
diff --git a/tests/unittests/config/test_cc_users_groups.py b/tests/unittests/config/test_cc_users_groups.py
index 00eca93b..6a026a87 100644
--- a/tests/unittests/config/test_cc_users_groups.py
+++ b/tests/unittests/config/test_cc_users_groups.py
@@ -87,7 +87,9 @@ class TestHandleUsersGroups(CiTestCase):
m_linux_group,
):
"""When users in config, create users with freebsd.create_user."""
- cfg = {"users": ["default", {"name": "me2"}]} # merged cloud-config
+ cfg = {
+ "users": ["default", {"name": "me2", "uid": 1234}]
+ } # merged cloud-config
# System config defines a default user for the distro.
sys_cfg = {
"default_user": {
@@ -115,7 +117,7 @@ class TestHandleUsersGroups(CiTestCase):
lock_passwd=True,
shell="/bin/tcsh",
),
- mock.call("me2", default=False),
+ mock.call("me2", uid=1234, default=False),
],
)
m_fbsd_group.assert_not_called()
@@ -363,9 +365,10 @@ class TestUsersGroupsSchema:
pytest.raises(
SchemaValidationError,
match=(
- "users.0.lock-passwd: DEPRECATED."
- " Dropped after April 2027. Use ``lock_passwd``."
- " Default: ``true``"
+ "Cloud config schema deprecations: "
+ "users.0.lock-passwd: Default: ``true`` "
+ "Deprecated in version 22.3. Use "
+ "``lock_passwd`` instead."
),
),
False,
@@ -385,10 +388,14 @@ class TestUsersGroupsSchema:
pytest.raises(
SchemaValidationError,
match=(
- "Cloud config schema deprecations: users.0.groups.adm:"
- " DEPRECATED. When providing an object for"
- " users.groups the ``<group_name>`` keys are the"
- " groups to add this user to,"
+ "Cloud config schema deprecations: "
+ "users.0.groups.adm: When providing an object "
+ "for users.groups the ``<group_name>`` keys "
+ "are the groups to add this user to Deprecated"
+ " in version 23.1., users.0.groups.sudo: When "
+ "providing an object for users.groups the "
+ "``<group_name>`` keys are the groups to add "
+ "this user to Deprecated in version 23.1."
),
),
False,
@@ -438,10 +445,11 @@ class TestUsersGroupsSchema:
pytest.raises(
SchemaValidationError,
match=(
- "deprecations: user.groups.sbuild: DEPRECATED. "
- "When providing an object for users.groups the "
- "``<group_name>`` keys are the groups to add this "
- "user to"
+ "Cloud config schema deprecations: "
+ "user.groups.sbuild: When providing an object "
+ "for users.groups the ``<group_name>`` keys "
+ "are the groups to add this user to Deprecated"
+ " in version 23.1."
),
),
False,
@@ -451,9 +459,10 @@ class TestUsersGroupsSchema:
pytest.raises(
SchemaValidationError,
match=(
- "deprecations: user.sudo: DEPRECATED. The value"
- " ``false`` will be dropped after April 2027."
- " Use ``null`` or no ``sudo`` key instead."
+ "Cloud config schema deprecations: user.sudo:"
+ " Changed in version 22.2. The value "
+ "``false`` is deprecated for this key, use "
+ "``null`` instead."
),
),
False,
@@ -468,9 +477,10 @@ class TestUsersGroupsSchema:
pytest.raises(
SchemaValidationError,
match=(
- "users.0.uid: DEPRECATED. The use of ``string`` type"
- " will be dropped after April 2027. Use an ``integer``"
- " instead."
+ "Cloud config schema deprecations: "
+ "users.0.uid: Changed in version 22.3. The "
+ "use of ``string`` type is deprecated. Use "
+ "an ``integer`` instead."
),
),
False,
diff --git a/tests/unittests/config/test_schema.py b/tests/unittests/config/test_schema.py
index 50128f2c..d43af5cc 100644
--- a/tests/unittests/config/test_schema.py
+++ b/tests/unittests/config/test_schema.py
@@ -9,6 +9,7 @@ import logging
import os
import re
import sys
+import unittest
from collections import namedtuple
from copy import deepcopy
from pathlib import Path
@@ -17,7 +18,6 @@ from types import ModuleType
from typing import List, Optional, Sequence, Set
import pytest
-import responses
from cloudinit import stages
from cloudinit.config.schema import (
@@ -147,6 +147,54 @@ class TestVersionedSchemas:
)
+class TestCheckSchema(unittest.TestCase):
+ def test_schema_bools_have_dates(self):
+ """ensure that new/changed/deprecated keys have an associated
+ version key
+ """
+
+ def check_deprecation_keys(schema, search_key):
+ if search_key in schema:
+ assert f"{search_key}_version" in schema
+ for sub_item in schema.values():
+ if isinstance(sub_item, dict):
+ check_deprecation_keys(sub_item, search_key)
+ return True
+
+ # ensure that check_deprecation_keys works as expected
+ assert check_deprecation_keys(
+ {"changed": True, "changed_version": "22.3"}, "changed"
+ )
+ assert check_deprecation_keys(
+ {"properties": {"deprecated": True, "deprecated_version": "22.3"}},
+ "deprecated",
+ )
+ assert check_deprecation_keys(
+ {
+ "properties": {
+ "properties": {"new": True, "new_version": "22.3"}
+ }
+ },
+ "new",
+ )
+ with self.assertRaises(AssertionError):
+ check_deprecation_keys({"changed": True}, "changed")
+ with self.assertRaises(AssertionError):
+ check_deprecation_keys(
+ {"properties": {"deprecated": True}}, "deprecated"
+ )
+ with self.assertRaises(AssertionError):
+ check_deprecation_keys(
+ {"properties": {"properties": {"new": True}}}, "new"
+ )
+
+ # test the in-repo schema
+ schema = get_schema()
+ assert check_deprecation_keys(schema, "new")
+ assert check_deprecation_keys(schema, "changed")
+ assert check_deprecation_keys(schema, "deprecated")
+
+
class TestGetSchema:
def test_static_schema_file_is_valid(self, caplog):
with caplog.at_level(logging.WARNING):
@@ -282,7 +330,7 @@ class TestValidateCloudConfigSchema:
((None, 1), ({"properties": {"p1": {"type": "string"}}}, 0)),
)
@skipUnlessJsonSchema()
- @mock.patch("cloudinit.config.schema.get_schema")
+ @mock.patch(M_PATH + "get_schema")
def test_validateconfig_schema_use_full_schema_when_no_schema_param(
self, get_schema, schema, call_count
):
@@ -403,13 +451,17 @@ class TestValidateCloudConfigSchema:
"a-b": {
"type": "string",
"deprecated": True,
+ "deprecated_version": "22.1",
+ "new": True,
+ "new_version": "22.1",
"description": "<desc>",
},
"a_b": {"type": "string", "description": "noop"},
},
},
{"a-b": "asdf"},
- "Deprecated cloud-config provided:\na-b: DEPRECATED: <desc>",
+ "Deprecated cloud-config provided:\na-b: <desc> "
+ "Deprecated in version 22.1.",
),
(
{
@@ -421,6 +473,7 @@ class TestValidateCloudConfigSchema:
{
"type": "string",
"deprecated": True,
+ "deprecated_version": "22.1",
"description": "<desc>",
},
]
@@ -428,7 +481,8 @@ class TestValidateCloudConfigSchema:
},
},
{"x": "+5"},
- "Deprecated cloud-config provided:\nx: DEPRECATED: <desc>",
+ "Deprecated cloud-config provided:\nx: <desc> "
+ "Deprecated in version 22.1.",
),
(
{
@@ -439,6 +493,8 @@ class TestValidateCloudConfigSchema:
{"type": "string", "description": "noop"},
{
"deprecated": True,
+ "deprecated_version": "22.1",
+ "deprecated_description": "<dep desc>",
"description": "<desc>",
},
]
@@ -446,7 +502,8 @@ class TestValidateCloudConfigSchema:
},
},
{"x": "5"},
- "Deprecated cloud-config provided:\nx: DEPRECATED: <desc>",
+ "Deprecated cloud-config provided:\nx: <desc> "
+ "Deprecated in version 22.1. <dep desc>",
),
(
{
@@ -458,6 +515,7 @@ class TestValidateCloudConfigSchema:
{
"type": "string",
"deprecated": True,
+ "deprecated_version": "22.1",
"description": "<desc>",
},
]
@@ -465,7 +523,8 @@ class TestValidateCloudConfigSchema:
},
},
{"x": "5"},
- "Deprecated cloud-config provided:\nx: DEPRECATED: <desc>",
+ "Deprecated cloud-config provided:\nx: <desc> "
+ "Deprecated in version 22.1.",
),
(
{
@@ -474,12 +533,14 @@ class TestValidateCloudConfigSchema:
"x": {
"type": "string",
"deprecated": True,
+ "deprecated_version": "22.1",
"description": "<desc>",
},
},
},
{"x": "+5"},
- "Deprecated cloud-config provided:\nx: DEPRECATED: <desc>",
+ "Deprecated cloud-config provided:\nx: <desc> "
+ "Deprecated in version 22.1.",
),
(
{
@@ -501,6 +562,7 @@ class TestValidateCloudConfigSchema:
"$defs": {
"my_ref": {
"deprecated": True,
+ "deprecated_version": "32.3",
"description": "<desc>",
}
},
@@ -514,7 +576,8 @@ class TestValidateCloudConfigSchema:
},
},
{"x": "+5"},
- "Deprecated cloud-config provided:\nx: DEPRECATED: <desc>",
+ "Deprecated cloud-config provided:\nx: <desc> "
+ "Deprecated in version 32.3.",
),
(
{
@@ -522,6 +585,7 @@ class TestValidateCloudConfigSchema:
"$defs": {
"my_ref": {
"deprecated": True,
+ "deprecated_version": "27.2",
}
},
"properties": {
@@ -537,7 +601,8 @@ class TestValidateCloudConfigSchema:
},
},
{"x": "+5"},
- "Deprecated cloud-config provided:\nx: DEPRECATED.",
+ "Deprecated cloud-config provided:\nx: Deprecated in "
+ "version 27.2.",
),
(
{
@@ -546,12 +611,14 @@ class TestValidateCloudConfigSchema:
"^.+$": {
"minItems": 1,
"deprecated": True,
+ "deprecated_version": "27.2",
"description": "<desc>",
}
},
},
{"a-b": "asdf"},
- "Deprecated cloud-config provided:\na-b: DEPRECATED: <desc>",
+ "Deprecated cloud-config provided:\na-b: <desc> "
+ "Deprecated in version 27.2.",
),
pytest.param(
{
@@ -560,11 +627,17 @@ class TestValidateCloudConfigSchema:
"^.+$": {
"minItems": 1,
"deprecated": True,
+ "deprecated_version": "27.2",
+ "changed": True,
+ "changed_version": "22.2",
+ "changed_description": "Drop ballast.",
}
},
},
{"a-b": "asdf"},
- "Deprecated cloud-config provided:\na-b: DEPRECATED.",
+ "Deprecated cloud-config provided:\na-b: Deprecated "
+ "in version 27.2.\na-b: Changed in version 22.2. "
+ "Drop ballast.",
id="deprecated_pattern_property_without_description",
),
],
@@ -638,14 +711,6 @@ class TestValidateCloudConfigFile:
"""Tests for validate_cloudconfig_file."""
@pytest.mark.parametrize("annotate", (True, False))
- def test_validateconfig_file_error_on_absent_file(self, annotate):
- """On absent config_path, validate_cloudconfig_file errors."""
- with pytest.raises(
- RuntimeError, match="Configfile /not/here does not exist"
- ):
- validate_cloudconfig_file("/not/here", {}, annotate)
-
- @pytest.mark.parametrize("annotate", (True, False))
def test_validateconfig_file_error_on_invalid_header(
self, annotate, tmpdir
):
@@ -708,59 +773,10 @@ class TestValidateCloudConfigFile:
validate_cloudconfig_file(config_file.strpath, schema, annotate)
@skipUnlessJsonSchema()
- @responses.activate
- @pytest.mark.parametrize("annotate", (True, False))
- @mock.patch("cloudinit.url_helper.time.sleep")
- @mock.patch(M_PATH + "os.getuid", return_value=0)
- def test_validateconfig_file_include_validates_schema(
- self, m_getuid, m_sleep, annotate, mocker
- ):
- """validate_cloudconfig_file raises errors on invalid schema
- when user-data uses `#include`."""
- schema = {"properties": {"p1": {"type": "string", "format": "string"}}}
- included_data = "#cloud-config\np1: -1"
- included_url = "http://asdf/user-data"
- blob = f"#include {included_url}"
- responses.add(responses.GET, included_url, included_data)
-
- ci = stages.Init()
- ci.datasource = FakeDataSource(blob)
- mocker.patch(M_PATH + "Init", return_value=ci)
-
- error_msg = (
- "Cloud config schema errors: p1: -1 is not of type 'string'"
- )
- with pytest.raises(SchemaValidationError, match=error_msg):
- validate_cloudconfig_file(None, schema, annotate)
-
- @skipUnlessJsonSchema()
- @responses.activate
- @pytest.mark.parametrize("annotate", (True, False))
- @mock.patch("cloudinit.url_helper.time.sleep")
- @mock.patch(M_PATH + "os.getuid", return_value=0)
- def test_validateconfig_file_include_success(
- self, m_getuid, m_sleep, annotate, mocker
- ):
- """validate_cloudconfig_file raises errors on invalid schema
- when user-data uses `#include`."""
- schema = {"properties": {"p1": {"type": "string", "format": "string"}}}
- included_data = "#cloud-config\np1: asdf"
- included_url = "http://asdf/user-data"
- blob = f"#include {included_url}"
- responses.add(responses.GET, included_url, included_data)
-
- ci = stages.Init()
- ci.datasource = FakeDataSource(blob)
- mocker.patch(M_PATH + "Init", return_value=ci)
-
- validate_cloudconfig_file(None, schema, annotate)
-
- @skipUnlessJsonSchema()
@pytest.mark.parametrize("annotate", (True, False))
@mock.patch("cloudinit.url_helper.time.sleep")
- @mock.patch(M_PATH + "os.getuid", return_value=0)
def test_validateconfig_file_no_cloud_cfg(
- self, m_getuid, m_sleep, annotate, capsys, mocker
+ self, m_sleep, annotate, capsys, mocker
):
"""validate_cloudconfig_file does noop with empty user-data."""
schema = {"properties": {"p1": {"type": "string", "format": "string"}}}
@@ -769,15 +785,18 @@ class TestValidateCloudConfigFile:
ci = stages.Init()
ci.datasource = FakeDataSource(blob)
mocker.patch(M_PATH + "Init", return_value=ci)
+ cloud_config_file = ci.paths.get_ipath_cur("cloud_config")
+ write_file(cloud_config_file, b"")
with pytest.raises(
SchemaValidationError,
match=re.escape(
- "Cloud config schema errors: format-l1.c1: File None needs"
- ' to begin with "#cloud-config"'
+ "Cloud config schema errors: format-l1.c1:"
+ f" File {cloud_config_file} needs to begin with"
+ ' "#cloud-config"'
),
):
- validate_cloudconfig_file(None, schema, annotate)
+ validate_cloudconfig_file(cloud_config_file, schema, annotate)
class TestSchemaDocMarkdown:
@@ -847,7 +866,7 @@ class TestSchemaDocMarkdown:
**Supported distros:** debian, rhel
**Config schema**:
- **prop1:** (array of integer) prop-description.
+ **prop1:** (array of integer) prop-description
**Examples**::
@@ -867,12 +886,12 @@ class TestSchemaDocMarkdown:
"properties": {
"prop1": {
"type": "array",
- "description": "prop-description",
+ "description": "prop-description.",
"items": {"type": "string"},
},
"prop2": {
"type": "boolean",
- "description": "prop2-description",
+ "description": "prop2-description.",
},
},
}
@@ -947,7 +966,7 @@ class TestSchemaDocMarkdown:
"patternProperties": {
"^.+$": {
"label": "<opaque_label>",
- "description": "List of cool strings",
+ "description": "List of cool strings.",
"type": "array",
"items": {"type": "string"},
"minItems": 1,
@@ -1101,7 +1120,7 @@ class TestSchemaDocMarkdown:
"properties": {
"prop1": {
"type": "array",
- "description": "prop-description",
+ "description": "prop-description.",
"items": {"type": "integer"},
}
},
@@ -1157,7 +1176,7 @@ class TestSchemaDocMarkdown:
- option2
- option3
- The default value is option1.
+ The default value is option1
"""
)
@@ -1268,7 +1287,29 @@ class TestSchemaDocMarkdown:
}
}
},
- "**prop1:** (string/integer) DEPRECATED: <description>",
+ "**prop1:** (string/integer) <description>\n\n "
+ "*Deprecated in version <missing deprecated_version "
+ "key, please file a bug report>.*",
+ ),
+ (
+ {
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "properties": {
+ "prop1": {
+ "type": ["string", "integer"],
+ "description": "<description>",
+ "deprecated": True,
+ "deprecated_version": "2",
+ "changed": True,
+ "changed_version": "1",
+ "new": True,
+ "new_version": "1",
+ },
+ },
+ },
+ "**prop1:** (string/integer) <description>\n\n "
+ "*Deprecated in version 2.*\n\n *Changed in version"
+ " 1.*\n\n *New in version 1.*",
),
(
{
@@ -1278,10 +1319,20 @@ class TestSchemaDocMarkdown:
"type": ["string", "integer"],
"description": "<description>",
"deprecated": True,
+ "deprecated_version": "2",
+ "deprecated_description": "dep",
+ "changed": True,
+ "changed_version": "1",
+ "changed_description": "chg",
+ "new": True,
+ "new_version": "1",
+ "new_description": "new",
},
},
},
- "**prop1:** (string/integer) DEPRECATED: <description>",
+ "**prop1:** (string/integer) <description>\n\n "
+ "*Deprecated in version 2. dep*\n\n *Changed in "
+ "version 1. chg*\n\n *New in version 1. new*",
),
(
{
@@ -1299,7 +1350,9 @@ class TestSchemaDocMarkdown:
}
},
},
- "**prop1:** (string/integer) DEPRECATED: <description>",
+ "**prop1:** (string/integer) <description>\n\n "
+ "*Deprecated in version <missing deprecated_version "
+ "key, please file a bug report>.*",
),
(
{
@@ -1319,7 +1372,9 @@ class TestSchemaDocMarkdown:
}
},
},
- "**prop1:** (string/integer) DEPRECATED: <description>",
+ "**prop1:** (string/integer) <description>\n\n "
+ "*Deprecated in version <missing deprecated_version "
+ "key, please file a bug report>.*",
),
(
{
@@ -1330,14 +1385,17 @@ class TestSchemaDocMarkdown:
"anyOf": [
{
"type": ["string", "integer"],
- "description": "<deprecated_description>",
+ "description": "<deprecated_description>.",
"deprecated": True,
},
],
},
},
},
- "**prop1:** (UNDEFINED) <description>. DEPRECATED: <deprecat",
+ "**prop1:** (UNDEFINED) <description>. "
+ "<deprecated_description>.\n\n *Deprecated in "
+ "version <missing deprecated_version key, please "
+ "file a bug report>.*",
),
(
{
@@ -1347,7 +1405,7 @@ class TestSchemaDocMarkdown:
"anyOf": [
{
"type": ["string", "integer"],
- "description": "<deprecated_description>",
+ "description": "<deprecated_description>.",
"deprecated": True,
},
{
@@ -1358,8 +1416,9 @@ class TestSchemaDocMarkdown:
},
},
},
- "**prop1:** (number) <description>. DEPRECATED:"
- " <deprecated_description>",
+ "**prop1:** (number) <deprecated_description>.\n\n"
+ " *Deprecated in version <missing "
+ "deprecated_version key, please file a bug report>.*",
),
(
{
@@ -1371,6 +1430,7 @@ class TestSchemaDocMarkdown:
"type": ["string", "integer"],
"description": "<deprecated_description>",
"deprecated": True,
+ "deprecated_version": "22.1",
},
{
"type": "string",
@@ -1381,8 +1441,9 @@ class TestSchemaDocMarkdown:
},
},
},
- "**prop1:** (``none``/``unchanged``/``os``) <description>."
- " DEPRECATED: <deprecated_description>.",
+ "**prop1:** (``none``/``unchanged``/``os``) "
+ "<description>. <deprecated_description>\n\n "
+ "*Deprecated in version 22.1.*",
),
(
{
@@ -1403,8 +1464,9 @@ class TestSchemaDocMarkdown:
},
},
},
- "**prop1:** (string/integer/``none``/``unchanged``/``os``)"
- " <description_1>. <description>_2.\n",
+ "**prop1:** (string/integer/``none``/"
+ "``unchanged``/``os``) <description_1>. "
+ "<description>_2\n",
),
(
{
@@ -1425,7 +1487,7 @@ class TestSchemaDocMarkdown:
},
},
},
- "**prop1:** (array of object) <desc_1>.\n",
+ "**prop1:** (array of object) <desc_1>\n",
),
],
)
@@ -1582,7 +1644,7 @@ class TestMain:
main()
assert 1 == context_manager.value.code
_out, err = capsys.readouterr()
- assert "Error:\nConfigfile NOT_A_FILE does not exist\n" == err
+ assert "Error: Config file NOT_A_FILE does not exist\n" == err
def test_main_invalid_flag_combo(self, capsys):
"""Main exits non-zero when invalid flag combo used."""
@@ -1614,24 +1676,48 @@ class TestMain:
with mock.patch("sys.argv", myargs):
assert 0 == main(), "Expected 0 exit code"
out, _err = capsys.readouterr()
- assert "Valid cloud-config: {0}\n".format(myyaml) == out
+ assert f"Valid cloud-config: {myyaml}\n" == out
@mock.patch(M_PATH + "os.getuid", return_value=0)
- def test_main_validates_system_userdata(
- self, m_getuid, capsys, mocker, paths
+ def test_main_validates_system_userdata_and_vendordata(
+ self, _getuid, capsys, mocker, paths
):
"""When --system is provided, main validates system userdata."""
m_init = mocker.patch(M_PATH + "Init")
m_init.return_value.paths.get_ipath = paths.get_ipath_cur
cloud_config_file = paths.get_ipath_cur("cloud_config")
write_file(cloud_config_file, b"#cloud-config\nntp:")
+ vd_file = paths.get_ipath_cur("vendor_cloud_config")
+ write_file(vd_file, b"#cloud-config\nssh_import_id: [me]")
+ vd2_file = paths.get_ipath_cur("vendor2_cloud_config")
+ write_file(vd2_file, b"#cloud-config\nssh_pw_auth: true")
myargs = ["mycmd", "--system"]
with mock.patch("sys.argv", myargs):
- assert 0 == main(), "Expected 0 exit code"
+ main()
out, _err = capsys.readouterr()
- assert "Valid cloud-config: system userdata\n" == out
- @mock.patch("cloudinit.config.schema.os.getuid", return_value=1000)
+ expected = dedent(
+ """\
+ Found cloud-config data types: user-data, vendor-data, vendor2-data
+
+ 1. user-data at {ud_file}:
+ Valid cloud-config: user-data
+
+ 2. vendor-data at {vd_file}:
+ Valid cloud-config: vendor-data
+
+ 3. vendor2-data at {vd2_file}:
+ Valid cloud-config: vendor2-data
+ """
+ )
+ assert (
+ expected.format(
+ ud_file=cloud_config_file, vd_file=vd_file, vd2_file=vd2_file
+ )
+ == out
+ )
+
+ @mock.patch(M_PATH + "os.getuid", return_value=1000)
def test_main_system_userdata_requires_root(self, m_getuid, capsys, paths):
"""Non-root user can't use --system param"""
myargs = ["mycmd", "--system"]
@@ -1641,8 +1727,8 @@ class TestMain:
assert 1 == context_manager.value.code
_out, err = capsys.readouterr()
expected = (
- "Error:\nUnable to read system userdata as non-root user. "
- "Try using sudo\n"
+ "Error:\nUnable to read system userdata or vendordata as non-root"
+ " user. Try using sudo.\n"
)
assert expected == err
@@ -1784,17 +1870,16 @@ class TestHandleSchemaArgs:
#cloud-config
packages:
- htop
- apt_update: true # D1
- apt_upgrade: true # D2
- apt_reboot_if_required: true # D3
+ apt_update: true # D1
+ apt_upgrade: true # D2
+ apt_reboot_if_required: true # D3
# Deprecations: -------------
- # D1: DEPRECATED: Dropped after April 2027. Use ``package_update``. Default: ``false``
- # D2: DEPRECATED: Dropped after April 2027. Use ``package_upgrade``. Default: ``false``
- # D3: DEPRECATED: Dropped after April 2027. Use ``package_reboot_if_required``. Default: ``false``
-
+ # D1: Default: ``false``. Deprecated in version 22.2. Use ``package_update`` instead.
+ # D2: Default: ``false``. Deprecated in version 22.2. Use ``package_upgrade`` instead.
+ # D3: Default: ``false``. Deprecated in version 22.2. Use ``package_reboot_if_required`` instead.
- Valid cloud-config: {}
+ Valid cloud-config: {cfg_file}
""" # noqa: E501
),
),
@@ -1803,10 +1888,12 @@ class TestHandleSchemaArgs:
dedent(
"""\
Cloud config schema deprecations: \
-apt_reboot_if_required: DEPRECATED: Dropped after April 2027. Use ``package_reboot_if_required``. Default: ``false``, \
-apt_update: DEPRECATED: Dropped after April 2027. Use ``package_update``. Default: ``false``, \
-apt_upgrade: DEPRECATED: Dropped after April 2027. Use ``package_upgrade``. Default: ``false``
- Valid cloud-config: {}
+apt_reboot_if_required: Default: ``false``. Deprecated in version 22.2.\
+ Use ``package_reboot_if_required`` instead., apt_update: Default: \
+``false``. Deprecated in version 22.2. Use ``package_update`` instead.,\
+ apt_upgrade: Default: ``false``. Deprecated in version 22.2. Use \
+``package_upgrade`` instead.\
+ Valid cloud-config: {cfg_file}
""" # noqa: E501
),
),
@@ -1837,6 +1924,9 @@ apt_upgrade: DEPRECATED: Dropped after April 2027. Use ``package_upgrade``. Defa
)
handle_schema_args("unused", args)
out, err = capsys.readouterr()
- assert expected_output.format(user_data_fn) == out
+ assert (
+ expected_output.format(cfg_file=user_data_fn).split()
+ == out.split()
+ )
assert not err
assert "deprec" not in caplog.text
diff --git a/tests/unittests/conftest.py b/tests/unittests/conftest.py
index 1ab17e8b..a3daaf22 100644
--- a/tests/unittests/conftest.py
+++ b/tests/unittests/conftest.py
@@ -2,6 +2,7 @@ import builtins
import glob
import os
from pathlib import Path
+from unittest import mock
import pytest
@@ -58,6 +59,21 @@ def fake_filesystem(mocker, tmpdir):
mocker.patch.object(mod, f, trap_func)
+@pytest.fixture(autouse=True)
+def disable_dns_lookup(request):
+ if "allow_dns_lookup" in request.keywords:
+ yield
+ return
+
+ def side_effect(args, *other_args, **kwargs):
+ raise AssertionError("Unexpectedly used util.is_resolvable")
+
+ with mock.patch(
+ "cloudinit.util.is_resolvable", side_effect=side_effect, autospec=True
+ ):
+ yield
+
+
PYTEST_VERSION_TUPLE = tuple(map(int, pytest.__version__.split(".")))
if PYTEST_VERSION_TUPLE < (3, 9, 0):
diff --git a/tests/unittests/distros/test__init__.py b/tests/unittests/distros/test__init__.py
index 7c5187fd..ea017d58 100644
--- a/tests/unittests/distros/test__init__.py
+++ b/tests/unittests/distros/test__init__.py
@@ -221,6 +221,102 @@ class TestGenericDistro(helpers.FilesystemMockingTestCase):
["pw", "usermod", "myuser", "-p", "01-Jan-1970"]
)
+ @mock.patch("cloudinit.distros.uses_systemd")
+ @mock.patch(
+ "cloudinit.distros.subp.which",
+ )
+ @mock.patch(
+ "cloudinit.distros.subp.subp",
+ )
+ def test_virtualization_detected(self, m_subp, m_which, m_uses_systemd):
+ m_uses_systemd.return_value = True
+ m_which.return_value = "/usr/bin/systemd-detect-virt"
+ m_subp.return_value = ("kvm", None)
+
+ cls = distros.fetch("ubuntu")
+ d = cls("ubuntu", {}, None)
+ self.assertTrue(d.is_virtual)
+
+ @mock.patch("cloudinit.distros.uses_systemd")
+ @mock.patch(
+ "cloudinit.distros.subp.subp",
+ )
+ def test_virtualization_not_detected(self, m_subp, m_uses_systemd):
+ m_uses_systemd.return_value = True
+ m_subp.return_value = ("none", None)
+
+ cls = distros.fetch("ubuntu")
+ d = cls("ubuntu", {}, None)
+ self.assertFalse(d.is_virtual)
+
+ @mock.patch("cloudinit.distros.uses_systemd")
+ def test_virtualization_unknown(self, m_uses_systemd):
+ m_uses_systemd.return_value = True
+
+ from cloudinit.subp import ProcessExecutionError
+
+ cls = distros.fetch("ubuntu")
+ d = cls("ubuntu", {}, None)
+ with mock.patch(
+ "cloudinit.distros.subp.which",
+ return_value=None,
+ ):
+ self.assertIsNone(
+ d.is_virtual,
+ "Reflect unknown state when detection"
+ " binary cannot be found",
+ )
+
+ with mock.patch(
+ "cloudinit.distros.subp.subp",
+ side_effect=ProcessExecutionError(),
+ ):
+ self.assertIsNone(
+ d.is_virtual, "Reflect unknown state on ProcessExecutionError"
+ )
+
+ def test_virtualization_on_freebsd(self):
+ # This test function is a bit unusual:
+ # We need to first mock away the `ifconfig -a` subp call
+ # Then, we can use side-effects to get the results of two subp calls
+ # needed for is_container()/virtual() which is_virtual depends on.
+ # We also have to clear cache between each of those assertions.
+
+ cls = distros.fetch("freebsd")
+ with mock.patch(
+ "cloudinit.distros.subp.subp", return_value=("", None)
+ ):
+ d = cls("freebsd", {}, None)
+ # This mock is called by `sysctl -n security.jail.jailed`
+ with mock.patch(
+ "cloudinit.distros.subp.subp",
+ side_effect=[("0\n", None), ("literaly any truthy value", None)],
+ ):
+ self.assertFalse(d.is_container())
+ d.is_container.cache_clear()
+ self.assertTrue(d.is_container())
+ d.is_container.cache_clear()
+
+ # This mock is called by `sysctl -n kern.vm_guest`
+ with mock.patch(
+ "cloudinit.distros.subp.subp",
+ # fmt: off
+ side_effect=[
+ ("0\n", None), ("hv\n", None), # virtual
+ ("0\n", None), ("none\n", None), # physical
+ ("0\n", None), ("hv\n", None) # virtual
+ ],
+ # fmt: on
+ ):
+ self.assertEqual(d.virtual(), "microsoft")
+ d.is_container.cache_clear()
+ d.virtual.cache_clear()
+ self.assertEqual(d.virtual(), "none")
+ d.is_container.cache_clear()
+ d.virtual.cache_clear()
+
+ self.assertTrue(d.is_virtual)
+
class TestGetPackageMirrors:
def return_first(self, mlist):
diff --git a/tests/unittests/distros/test_freebsd.py b/tests/unittests/distros/test_freebsd.py
index 22be5098..70f2c7c6 100644
--- a/tests/unittests/distros/test_freebsd.py
+++ b/tests/unittests/distros/test_freebsd.py
@@ -2,9 +2,43 @@
import os
+from cloudinit.distros.freebsd import Distro, FreeBSDNetworking
from cloudinit.util import find_freebsd_part, get_path_dev_freebsd
+from tests.unittests.distros import _get_distro
from tests.unittests.helpers import CiTestCase, mock
+M_PATH = "cloudinit.distros.freebsd."
+
+
+class TestFreeBSD:
+ @mock.patch(M_PATH + "subp.subp")
+ def test_add_user(self, m_subp, mocker):
+ mocker.patch.object(Distro, "networking_cls", spec=FreeBSDNetworking)
+ distro = _get_distro("freebsd")
+ distro.add_user("me2", uid=1234, default=False)
+ assert [
+ mock.call(
+ [
+ "pw",
+ "useradd",
+ "-n",
+ "me2",
+ "-u",
+ "1234",
+ "-d/usr/home/me2",
+ "-m",
+ ],
+ logstring=[
+ "pw",
+ "useradd",
+ "-n",
+ "me2",
+ "-d/usr/home/me2",
+ "-m",
+ ],
+ )
+ ] == m_subp.call_args_list
+
class TestDeviceLookUp(CiTestCase):
@mock.patch("cloudinit.subp.subp")
diff --git a/tests/unittests/distros/test_ifconfig.py b/tests/unittests/distros/test_ifconfig.py
index ce595746..c5c1dee9 100644
--- a/tests/unittests/distros/test_ifconfig.py
+++ b/tests/unittests/distros/test_ifconfig.py
@@ -38,6 +38,22 @@ class TestIfconfigParserFreeBSD(TestCase):
ifs = Ifconfig().parse(self.ifs_txt)
assert "txcsum" in ifs["vtnet0"].options
+ def test_duplicate_mac(self):
+ """
+ assert that we can have duplicate macs, and that it's not an accident
+ """
+ self.ifs_txt = readResource(
+ "netinfo/freebsd-duplicate-macs-ifconfig-output"
+ )
+ ifc = Ifconfig()
+ ifc.parse(self.ifs_txt)
+ ifs_by_mac = ifc.ifs_by_mac()
+ assert len(ifs_by_mac["00:0d:3a:54:ad:1e"]) == 2
+ assert (
+ ifs_by_mac["00:0d:3a:54:ad:1e"][0].name
+ != ifs_by_mac["00:0d:3a:54:ad:1e"][1].name
+ )
+
class TestIfconfigParserOpenBSD(TestCase):
def setUp(self):
diff --git a/tests/unittests/distros/test_netconfig.py b/tests/unittests/distros/test_netconfig.py
index f17a5d21..e9fb0591 100644
--- a/tests/unittests/distros/test_netconfig.py
+++ b/tests/unittests/distros/test_netconfig.py
@@ -7,7 +7,15 @@ from io import StringIO
from textwrap import dedent
from unittest import mock
-from cloudinit import distros, helpers, safeyaml, settings, subp, util
+from cloudinit import (
+ distros,
+ features,
+ helpers,
+ safeyaml,
+ settings,
+ subp,
+ util,
+)
from cloudinit.distros.parsers.sys_conf import SysConf
from cloudinit.net.activators import IfUpDownActivator
from tests.unittests.helpers import (
@@ -190,7 +198,9 @@ network:
eth0:
addresses:
- 192.168.1.5/24
- gateway4: 192.168.1.254
+ routes:
+ - to: default
+ via: 192.168.1.254
eth1:
dhcp4: true
"""
@@ -207,7 +217,9 @@ network:
eth0:
addresses:
- 2607:f0d0:1002:0011::2/64
- gateway6: 2607:f0d0:1002:0011::1
+ routes:
+ - to: default
+ via: 2607:f0d0:1002:0011::1
eth1:
dhcp4: true
"""
@@ -359,7 +371,7 @@ class TestNetCfgDistroFreeBSD(TestNetCfgDistroBase):
}
rc_conf_expected = """\
defaultrouter=192.168.1.254
-ifconfig_eth0='192.168.1.5 netmask 255.255.255.0'
+ifconfig_eth0='inet 192.168.1.5 netmask 255.255.255.0'
ifconfig_eth1=DHCP
"""
@@ -374,6 +386,27 @@ ifconfig_eth1=DHCP
)
@mock.patch("cloudinit.net.get_interfaces_by_mac")
+ def test_apply_network_config_freebsd_ipv6_standard(self, ifaces_mac):
+ ifaces_mac.return_value = {
+ "00:15:5d:4c:73:00": "eth0",
+ }
+ rc_conf_expected = """\
+ipv6_defaultrouter=2607:f0d0:1002:0011::1
+ifconfig_eth1=DHCP
+ifconfig_eth0_ipv6='inet6 2607:f0d0:1002:0011::2/64'
+"""
+
+ expected_cfgs = {
+ "/etc/rc.conf": rc_conf_expected,
+ "/etc/resolv.conf": "",
+ }
+ self._apply_and_verify_freebsd(
+ self.distro.apply_network_config,
+ V1_NET_CFG_IPV6,
+ expected_cfgs=expected_cfgs.copy(),
+ )
+
+ @mock.patch("cloudinit.net.get_interfaces_by_mac")
def test_apply_network_config_freebsd_ifrename(self, ifaces_mac):
ifaces_mac.return_value = {
"00:15:5d:4c:73:00": "vtnet0",
@@ -381,7 +414,7 @@ ifconfig_eth1=DHCP
rc_conf_expected = """\
ifconfig_vtnet0_name=eth0
defaultrouter=192.168.1.254
-ifconfig_eth0='192.168.1.5 netmask 255.255.255.0'
+ifconfig_eth0='inet 192.168.1.5 netmask 255.255.255.0'
ifconfig_eth1=DHCP
"""
@@ -452,7 +485,6 @@ class TestNetCfgDistroUbuntuEni(TestNetCfgDistroBase):
expected_cfgs = {
self.eni_path(): V1_NET_CFG_OUTPUT,
}
- # ub_distro.apply_network_config(V1_NET_CFG, False)
with mock.patch(
"cloudinit.net.activators.select_activator"
) as select_activator:
@@ -496,7 +528,6 @@ class TestNetCfgDistroUbuntuEni(TestNetCfgDistroBase):
expected_cfgs = {
self.eni_path(): V1_NET_CFG_OUTPUT,
}
- # ub_distro.apply_network_config(V1_NET_CFG, False)
self._apply_and_verify_eni(
self.distro.apply_network_config,
V1_NET_CFG,
@@ -522,7 +553,12 @@ class TestNetCfgDistroUbuntuNetplan(TestNetCfgDistroBase):
self.devlist = ["eth0", "lo"]
def _apply_and_verify_netplan(
- self, apply_fn, config, expected_cfgs=None, bringup=False
+ self,
+ apply_fn,
+ config,
+ expected_cfgs=None,
+ bringup=False,
+ previous_files=(),
):
if not expected_cfgs:
raise ValueError("expected_cfg must not be None")
@@ -534,65 +570,97 @@ class TestNetCfgDistroUbuntuNetplan(TestNetCfgDistroBase):
return_value=self.devlist,
):
with self.reRooted(tmpd) as tmpd:
+ for previous_path, content, mode in previous_files:
+ util.write_file(previous_path, content, mode=mode)
apply_fn(config, bringup)
results = dir2dict(tmpd)
- for cfgpath, expected in expected_cfgs.items():
+ for cfgpath, expected, mode in expected_cfgs:
print("----------")
print(expected)
print("^^^^ expected | rendered VVVVVVV")
print(results[cfgpath])
print("----------")
self.assertEqual(expected, results[cfgpath])
- self.assertEqual(0o644, get_mode(cfgpath, tmpd))
+ self.assertEqual(mode, get_mode(cfgpath, tmpd))
def netplan_path(self):
return "/etc/netplan/50-cloud-init.yaml"
def test_apply_network_config_v1_to_netplan_ub(self):
- expected_cfgs = {
- self.netplan_path(): V1_TO_V2_NET_CFG_OUTPUT,
- }
+ expected_cfgs = (
+ (self.netplan_path(), V1_TO_V2_NET_CFG_OUTPUT, 0o600),
+ )
- # ub_distro.apply_network_config(V1_NET_CFG, False)
self._apply_and_verify_netplan(
self.distro.apply_network_config,
V1_NET_CFG,
- expected_cfgs=expected_cfgs.copy(),
+ expected_cfgs=expected_cfgs,
)
def test_apply_network_config_v1_ipv6_to_netplan_ub(self):
- expected_cfgs = {
- self.netplan_path(): V1_TO_V2_NET_CFG_IPV6_OUTPUT,
- }
+ expected_cfgs = (
+ (self.netplan_path(), V1_TO_V2_NET_CFG_IPV6_OUTPUT, 0o600),
+ )
- # ub_distro.apply_network_config(V1_NET_CFG_IPV6, False)
self._apply_and_verify_netplan(
self.distro.apply_network_config,
V1_NET_CFG_IPV6,
- expected_cfgs=expected_cfgs.copy(),
+ expected_cfgs=expected_cfgs,
)
def test_apply_network_config_v2_passthrough_ub(self):
- expected_cfgs = {
- self.netplan_path(): V2_TO_V2_NET_CFG_OUTPUT,
- }
- # ub_distro.apply_network_config(V2_NET_CFG, False)
+ expected_cfgs = (
+ (self.netplan_path(), V2_TO_V2_NET_CFG_OUTPUT, 0o600),
+ )
self._apply_and_verify_netplan(
self.distro.apply_network_config,
V2_NET_CFG,
- expected_cfgs=expected_cfgs.copy(),
+ expected_cfgs=expected_cfgs,
+ )
+
+ def test_apply_network_config_v2_passthrough_retain_orig_perms(self):
+ """Custom permissions on existing netplan is kept when more strict."""
+ expected_cfgs = (
+ (self.netplan_path(), V2_TO_V2_NET_CFG_OUTPUT, 0o640),
)
+ with mock.patch.object(
+ features, "NETPLAN_CONFIG_ROOT_READ_ONLY", False
+ ):
+ # When NETPLAN_CONFIG_ROOT_READ_ONLY is False default perms are 644
+ # we keep 640 because it's more strict.
+ # 1640 is used to assert sticky bit preserved across write
+ self._apply_and_verify_netplan(
+ self.distro.apply_network_config,
+ V2_NET_CFG,
+ expected_cfgs=expected_cfgs,
+ previous_files=(
+ ("/etc/netplan/50-cloud-init.yaml", "a", 0o640),
+ ),
+ )
+
+ def test_apply_network_config_v2_passthrough_ub_old_behavior(self):
+ """Kinetic and earlier have 50-cloud-init.yaml world-readable"""
+ expected_cfgs = (
+ (self.netplan_path(), V2_TO_V2_NET_CFG_OUTPUT, 0o644),
+ )
+ with mock.patch.object(
+ features, "NETPLAN_CONFIG_ROOT_READ_ONLY", False
+ ):
+ self._apply_and_verify_netplan(
+ self.distro.apply_network_config,
+ V2_NET_CFG,
+ expected_cfgs=expected_cfgs,
+ )
def test_apply_network_config_v2_full_passthrough_ub(self):
- expected_cfgs = {
- self.netplan_path(): V2_PASSTHROUGH_NET_CFG_OUTPUT,
- }
- # ub_distro.apply_network_config(V2_PASSTHROUGH_NET_CFG, False)
+ expected_cfgs = (
+ (self.netplan_path(), V2_PASSTHROUGH_NET_CFG_OUTPUT, 0o600),
+ )
self._apply_and_verify_netplan(
self.distro.apply_network_config,
V2_PASSTHROUGH_NET_CFG,
- expected_cfgs=expected_cfgs.copy(),
+ expected_cfgs=expected_cfgs,
)
self.assertIn("Passthrough netplan v2 config", self.logs.getvalue())
self.assertIn(
@@ -912,6 +980,7 @@ class TestNetCfgDistroArch(TestNetCfgDistroBase):
apply_fn(config, bringup)
results = dir2dict(tmpd)
+ mode = 0o600 if with_netplan else 0o644
for cfgpath, expected in expected_cfgs.items():
print("----------")
print(expected)
@@ -919,7 +988,7 @@ class TestNetCfgDistroArch(TestNetCfgDistroBase):
print(results[cfgpath])
print("----------")
self.assertEqual(expected, results[cfgpath])
- self.assertEqual(0o644, get_mode(cfgpath, tmpd))
+ self.assertEqual(mode, get_mode(cfgpath, tmpd))
def netctl_path(self, iface):
return "/etc/netctl/%s" % iface
@@ -957,7 +1026,6 @@ class TestNetCfgDistroArch(TestNetCfgDistroBase):
),
}
- # ub_distro.apply_network_config(V1_NET_CFG, False)
self._apply_and_verify(
self.distro.apply_network_config,
V1_NET_CFG,
@@ -976,7 +1044,9 @@ class TestNetCfgDistroArch(TestNetCfgDistroBase):
eth0:
addresses:
- 192.168.1.5/24
- gateway4: 192.168.1.254
+ routes:
+ - to: default
+ via: 192.168.1.254
eth1:
dhcp4: true
"""
@@ -1245,6 +1315,7 @@ class TestNetCfgDistroMariner(TestNetCfgDistroBase):
def get_mode(path, target=None):
+ # Mask upper st_mode bits like S_IFREG bit preserve sticky and isuid/osgid
return os.stat(subp.target_path(target, path)).st_mode & 0o777
diff --git a/tests/unittests/distros/test_opensuse.py b/tests/unittests/distros/test_opensuse.py
index 6b8eea65..3261d629 100644
--- a/tests/unittests/distros/test_opensuse.py
+++ b/tests/unittests/distros/test_opensuse.py
@@ -1,10 +1,331 @@
# This file is part of cloud-init. See LICENSE file for license information.
-from tests.unittests.distros import _get_distro
-from tests.unittests.helpers import CiTestCase
+from unittest import mock
+from cloudinit import distros
-class TestopenSUSE(CiTestCase):
- def test_get_distro(self):
- distro = _get_distro("opensuse")
- self.assertEqual(distro.osfamily, "suse")
+
+@mock.patch("cloudinit.distros.opensuse.subp.subp")
+class TestPackageCommands:
+ distro = distros.fetch("opensuse")("opensuse", {}, None)
+
+ @mock.patch(
+ "cloudinit.distros.opensuse.util.get_mount_info",
+ return_value=("/dev/sda1", "xfs", "/"),
+ )
+ @mock.patch(
+ "cloudinit.distros.opensuse.util.load_file",
+ return_value="foo\n/dev/sda1 / xfs rw,bar\n",
+ )
+ @mock.patch(
+ "cloudinit.distros.opensuse.os.path.exists", return_value=False
+ )
+ def test_upgrade_not_btrfs(self, m_tu_path, m_mounts, m_minfo, m_subp):
+ # Reset state
+ self.distro.update_method = None
+
+ self.distro.package_command("upgrade")
+ expected_cmd = ["zypper", "--non-interactive", "update"]
+ m_subp.assert_called_with(expected_cmd, capture=False)
+
+ @mock.patch(
+ "cloudinit.distros.opensuse.util.get_mount_info",
+ return_value=("/dev/sda1", "xfs", "/"),
+ )
+ @mock.patch(
+ "cloudinit.distros.opensuse.util.load_file",
+ return_value="foo\n/dev/sda1 / xfs rw,bar\n",
+ )
+ @mock.patch(
+ "cloudinit.distros.opensuse.os.path.exists", return_value=False
+ )
+ def test_upgrade_not_btrfs_pkg(self, m_tu_path, m_mounts, m_minfo, m_subp):
+ # Reset state
+ self.distro.update_method = None
+
+ self.distro.package_command("upgrade", None, ["python36", "gzip"])
+ expected_cmd = [
+ "zypper",
+ "--non-interactive",
+ "update",
+ "python36",
+ "gzip",
+ ]
+ m_subp.assert_called_with(expected_cmd, capture=False)
+
+ @mock.patch(
+ "cloudinit.distros.opensuse.util.get_mount_info",
+ return_value=("/dev/sda1", "xfs", "/"),
+ )
+ @mock.patch(
+ "cloudinit.distros.opensuse.util.load_file",
+ return_value="foo\n/dev/sda1 / xfs rw,bar\n",
+ )
+ @mock.patch(
+ "cloudinit.distros.opensuse.os.path.exists", return_value=False
+ )
+ def test_update_not_btrfs(self, m_tu_path, m_mounts, m_minfo, m_subp):
+ # Reset state
+ self.distro.update_method = None
+
+ self.distro.package_command("update")
+ expected_cmd = ["zypper", "--non-interactive", "update"]
+ m_subp.assert_called_with(expected_cmd, capture=False)
+
+ @mock.patch(
+ "cloudinit.distros.opensuse.util.get_mount_info",
+ return_value=("/dev/sda1", "xfs", "/"),
+ )
+ @mock.patch(
+ "cloudinit.distros.opensuse.util.load_file",
+ return_value="foo\n/dev/sda1 / xfs rw,bar\n",
+ )
+ @mock.patch(
+ "cloudinit.distros.opensuse.os.path.exists", return_value=False
+ )
+ def test_update_not_btrfs_pkg(self, m_tu_path, m_mounts, m_minfo, m_subp):
+ # Reset state
+ self.distro.update_method = None
+
+ self.distro.package_command("update", None, ["python36", "gzip"])
+ expected_cmd = [
+ "zypper",
+ "--non-interactive",
+ "update",
+ "python36",
+ "gzip",
+ ]
+ m_subp.assert_called_with(expected_cmd, capture=False)
+
+ @mock.patch(
+ "cloudinit.distros.opensuse.util.get_mount_info",
+ return_value=("/dev/sda1", "xfs", "/"),
+ )
+ @mock.patch(
+ "cloudinit.distros.opensuse.util.load_file",
+ return_value="foo\n/dev/sda1 / xfs rw,bar\n",
+ )
+ @mock.patch(
+ "cloudinit.distros.opensuse.os.path.exists", return_value=False
+ )
+ def test_install_not_btrfs_pkg(self, m_tu_path, m_mounts, m_minfo, m_subp):
+ # Reset state
+ self.distro.update_method = None
+
+ self.distro.install_packages(["python36", "gzip"])
+ expected_cmd = [
+ "zypper",
+ "--non-interactive",
+ "install",
+ "--auto-agree-with-licenses",
+ "python36",
+ "gzip",
+ ]
+ m_subp.assert_called_with(expected_cmd, capture=False)
+
+ @mock.patch(
+ "cloudinit.distros.opensuse.util.get_mount_info",
+ return_value=("/dev/sda1", "btrfs", "/"),
+ )
+ @mock.patch(
+ "cloudinit.distros.opensuse.util.load_file",
+ return_value="foo\n/dev/sda1 / btrfs rw,bar\n",
+ )
+ @mock.patch("cloudinit.distros.opensuse.os.path.exists", return_value=True)
+ def test_upgrade_btrfs(self, m_tu_path, m_mounts, m_minfo, m_subp):
+ # Reset state
+ self.distro.update_method = None
+
+ self.distro.package_command("upgrade")
+ expected_cmd = [
+ "transactional-update",
+ "--non-interactive",
+ "--drop-if-no-change",
+ "up",
+ ]
+ m_subp.assert_called_with(expected_cmd, capture=False)
+
+ @mock.patch(
+ "cloudinit.distros.opensuse.util.get_mount_info",
+ return_value=("/dev/sda1", "btrfs", "/"),
+ )
+ @mock.patch(
+ "cloudinit.distros.opensuse.util.load_file",
+ return_value="foo\n/dev/sda1 / btrfs rw,bar\n",
+ )
+ @mock.patch("cloudinit.distros.opensuse.os.path.exists", return_value=True)
+ def test_upgrade_btrfs_pkg(self, m_tu_path, m_mounts, m_minfo, m_subp):
+ # Reset state
+ self.distro.update_method = None
+
+ self.distro.package_command("upgrade", None, ["python36", "gzip"])
+ expected_cmd = [
+ "transactional-update",
+ "--non-interactive",
+ "--drop-if-no-change",
+ "pkg",
+ "update",
+ "python36",
+ "gzip",
+ ]
+ m_subp.assert_called_with(expected_cmd, capture=False)
+
+ @mock.patch(
+ "cloudinit.distros.opensuse.util.get_mount_info",
+ return_value=("/dev/sda1", "btrfs", "/"),
+ )
+ @mock.patch(
+ "cloudinit.distros.opensuse.util.load_file",
+ return_value="foo\n/dev/sda1 / btrf rw,bar\n",
+ )
+ @mock.patch("cloudinit.distros.opensuse.os.path.exists", return_value=True)
+ def test_update_btrfs(self, m_tu_path, m_mounts, m_minfo, m_subp):
+ # Reset state
+ self.distro.update_method = None
+
+ self.distro.package_command("update")
+ expected_cmd = [
+ "transactional-update",
+ "--non-interactive",
+ "--drop-if-no-change",
+ "up",
+ ]
+ m_subp.assert_called_with(expected_cmd, capture=False)
+
+ @mock.patch(
+ "cloudinit.distros.opensuse.util.get_mount_info",
+ return_value=("/dev/sda1", "btrfs", "/"),
+ )
+ @mock.patch(
+ "cloudinit.distros.opensuse.util.load_file",
+ return_value="foo\n/dev/sda1 / btrfs rw,bar\n",
+ )
+ @mock.patch("cloudinit.distros.opensuse.os.path.exists", return_value=True)
+ def test_update_btrfs_pkg(self, m_tu_path, m_mounts, m_minfo, m_subp):
+ # Reset state
+ self.distro.update_method = None
+
+ self.distro.package_command("update", None, ["python36", "gzip"])
+ expected_cmd = [
+ "transactional-update",
+ "--non-interactive",
+ "--drop-if-no-change",
+ "pkg",
+ "update",
+ "python36",
+ "gzip",
+ ]
+ m_subp.assert_called_with(expected_cmd, capture=False)
+
+ @mock.patch(
+ "cloudinit.distros.opensuse.util.get_mount_info",
+ return_value=("/dev/sda1", "btrfs", "/"),
+ )
+ @mock.patch(
+ "cloudinit.distros.opensuse.util.load_file",
+ return_value="foo\n/dev/sda1 / btrfs rw,bar\n",
+ )
+ @mock.patch("cloudinit.distros.opensuse.os.path.exists", return_value=True)
+ def test_install_btrfs_pkg(self, m_tu_path, m_mounts, m_minfo, m_subp):
+ # Reset state
+ self.distro.update_method = None
+
+ self.distro.install_packages(["python36", "gzip"])
+ expected_cmd = [
+ "transactional-update",
+ "--non-interactive",
+ "--drop-if-no-change",
+ "pkg",
+ "install",
+ "--auto-agree-with-licenses",
+ "python36",
+ "gzip",
+ ]
+ m_subp.assert_called_with(expected_cmd, capture=False)
+
+ @mock.patch(
+ "cloudinit.distros.opensuse.util.get_mount_info",
+ return_value=("/dev/sda1", "btrfs", "/"),
+ )
+ @mock.patch(
+ "cloudinit.distros.opensuse.util.load_file",
+ return_value="foo\n/dev/sda1 / btrfs ro,bar\n",
+ )
+ @mock.patch(
+ "cloudinit.distros.opensuse.os.path.exists", return_value=False
+ )
+ def test_upgrade_no_transact_up_ro_root(
+ self, m_tu_path, m_mounts, m_minfo, m_subp
+ ):
+ # Reset state
+ self.distro.update_method = None
+
+ result = self.distro.package_command("upgrade")
+ assert self.distro.read_only_root
+ assert result is None
+ assert not m_subp.called
+
+ @mock.patch(
+ "cloudinit.distros.opensuse.util.get_mount_info",
+ return_value=("/dev/sda1", "btrfs", "/"),
+ )
+ @mock.patch(
+ "cloudinit.distros.opensuse.util.load_file",
+ return_value="foo\n/dev/sda1 / btrfs rw,bar\n",
+ )
+ @mock.patch(
+ "cloudinit.distros.opensuse.os.path.exists", return_value=False
+ )
+ def test_upgrade_no_transact_up_rw_root_btrfs(
+ self, m_tu_path, m_mounts, m_minfo, m_subp
+ ):
+ # Reset state
+ self.distro.update_method = None
+
+ self.distro.package_command("upgrade")
+ assert self.distro.update_method == "zypper"
+ assert self.distro.read_only_root is False
+ expected_cmd = ["zypper", "--non-interactive", "update"]
+ m_subp.assert_called_with(expected_cmd, capture=False)
+
+ @mock.patch(
+ "cloudinit.distros.opensuse.util.get_mount_info",
+ return_value=("/dev/sda1", "xfs", "/"),
+ )
+ @mock.patch(
+ "cloudinit.distros.opensuse.util.load_file",
+ return_value="foo\n/dev/sda1 / xfs ro,bar\n",
+ )
+ @mock.patch("cloudinit.distros.opensuse.os.path.exists", return_value=True)
+ def test_upgrade_transact_up_ro_root(
+ self, m_tu_path, m_mounts, m_minfo, m_subp
+ ):
+ # Reset state
+ self.distro.update_method = None
+
+ result = self.distro.package_command("upgrade")
+ assert self.distro.update_method == "zypper"
+ assert self.distro.read_only_root
+ assert result is None
+ assert not m_subp.called
+
+ @mock.patch(
+ "cloudinit.distros.opensuse.util.get_mount_info",
+ return_value=("/dev/sda1", "btrfs", "/"),
+ )
+ @mock.patch(
+ "cloudinit.distros.opensuse.util.load_file",
+ return_value="foo\n/dev/sda1 / btrfs ro,bar\n",
+ )
+ @mock.patch("cloudinit.distros.opensuse.os.path.exists", return_value=True)
+ def test_refresh_transact_up_ro_root_btrfs(
+ self, m_tu_path, m_mounts, m_minfo, m_subp
+ ):
+ # Reset state
+ self.distro.update_method = None
+
+ self.distro.package_command("refresh")
+ assert self.distro.update_method == "transactional"
+ assert self.distro.read_only_root
+ expected_cmd = ["zypper", "--non-interactive", "refresh"]
+ m_subp.assert_called_with(expected_cmd, capture=False)
diff --git a/tests/unittests/net/test_dhcp.py b/tests/unittests/net/test_dhcp.py
index e4f57b50..40340553 100644
--- a/tests/unittests/net/test_dhcp.py
+++ b/tests/unittests/net/test_dhcp.py
@@ -376,11 +376,14 @@ class TestDHCPDiscoveryClean(CiTestCase):
self.logs.getvalue(),
)
+ @mock.patch("cloudinit.net.dhcp.os.remove")
@mock.patch("time.sleep", mock.MagicMock())
@mock.patch("cloudinit.net.dhcp.os.kill")
@mock.patch("cloudinit.net.dhcp.subp.subp")
@mock.patch("cloudinit.net.dhcp.util.wait_for_files", return_value=False)
- def test_dhcp_discovery_warns_invalid_pid(self, m_wait, m_subp, m_kill):
+ def test_dhcp_discovery_warns_invalid_pid(
+ self, m_wait, m_subp, m_kill, m_remove
+ ):
"""dhcp_discovery logs a warning when pidfile contains invalid content.
Lease processing still occurs and no proc kill is attempted.
@@ -422,12 +425,13 @@ class TestDHCPDiscoveryClean(CiTestCase):
)
m_kill.assert_not_called()
+ @mock.patch("cloudinit.net.dhcp.os.remove")
@mock.patch("cloudinit.net.dhcp.util.get_proc_ppid")
@mock.patch("cloudinit.net.dhcp.os.kill")
@mock.patch("cloudinit.net.dhcp.util.wait_for_files")
@mock.patch("cloudinit.net.dhcp.subp.subp")
def test_dhcp_discovery_waits_on_lease_and_pid(
- self, m_subp, m_wait, m_kill, m_getppid
+ self, m_subp, m_wait, m_kill, m_getppid, m_remove
):
"""dhcp_discovery waits for the presence of pidfile and dhcp.leases."""
m_subp.return_value = ("", "")
@@ -446,11 +450,12 @@ class TestDHCPDiscoveryClean(CiTestCase):
)
m_kill.assert_not_called()
+ @mock.patch("cloudinit.net.dhcp.os.remove")
@mock.patch("cloudinit.net.dhcp.util.get_proc_ppid")
@mock.patch("cloudinit.net.dhcp.os.kill")
@mock.patch("cloudinit.net.dhcp.subp.subp")
@mock.patch("cloudinit.util.wait_for_files", return_value=False)
- def test_dhcp_discovery(self, m_wait, m_subp, m_kill, m_getppid):
+ def test_dhcp_discovery(self, m_wait, m_subp, m_kill, m_getppid, m_remove):
"""dhcp_discovery brings up the interface and runs dhclient.
It also returns the parsed dhcp.leases file.
@@ -508,11 +513,14 @@ class TestDHCPDiscoveryClean(CiTestCase):
)
m_kill.assert_has_calls([mock.call(my_pid, signal.SIGKILL)])
+ @mock.patch("cloudinit.net.dhcp.os.remove")
@mock.patch("cloudinit.net.dhcp.util.get_proc_ppid")
@mock.patch("cloudinit.net.dhcp.os.kill")
@mock.patch("cloudinit.net.dhcp.subp.subp")
@mock.patch("cloudinit.util.wait_for_files")
- def test_dhcp_output_error_stream(self, m_wait, m_subp, m_kill, m_getppid):
+ def test_dhcp_output_error_stream(
+ self, m_wait, m_subp, m_kill, m_getppid, m_remove
+ ):
""" "dhcp_log_func is called with the output and error streams of
dhclient when the callable is passed."""
dhclient_err = "FAKE DHCLIENT ERROR"
diff --git a/tests/unittests/net/test_network_state.py b/tests/unittests/net/test_network_state.py
index 75d033dc..57a4436f 100644
--- a/tests/unittests/net/test_network_state.py
+++ b/tests/unittests/net/test_network_state.py
@@ -6,6 +6,8 @@ import pytest
from cloudinit import safeyaml
from cloudinit.net import network_state
+from cloudinit.net.netplan import Renderer as NetplanRenderer
+from cloudinit.net.renderers import NAME_TO_RENDERER
from tests.unittests.helpers import CiTestCase
netstate_path = "cloudinit.net.network_state"
@@ -99,15 +101,140 @@ class TestNetworkStateParseConfig(CiTestCase):
self.assertNotEqual(None, result)
-class TestNetworkStateParseConfigV2(CiTestCase):
- def test_version_2_ignores_renderer_key(self):
+@mock.patch("cloudinit.net.network_state.get_interfaces_by_mac")
+class TestNetworkStateParseConfigV2:
+ def test_version_2_ignores_renderer_key(self, m_get_interfaces_by_mac):
ncfg = {"version": 2, "renderer": "networkd", "ethernets": {}}
- with mock.patch("cloudinit.net.network_state.get_interfaces_by_mac"):
- nsi = network_state.NetworkStateInterpreter(
- version=ncfg["version"], config=ncfg
+ nsi = network_state.NetworkStateInterpreter(
+ version=ncfg["version"], config=ncfg
+ )
+ nsi.parse_config(skip_broken=False)
+ assert ncfg == nsi.as_dict()["config"]
+
+ @pytest.mark.parametrize(
+ "cfg",
+ [
+ pytest.param(
+ """
+ version: 2
+ ethernets:
+ eth0:
+ addresses:
+ - 10.54.2.19/21
+ - 2a00:1730:fff9:100::52/128
+ {gateway4}
+ {gateway6}
+ match:
+ macaddress: 52:54:00:3f:fc:f7
+ nameservers:
+ addresses:
+ - 10.52.1.1
+ - 10.52.1.71
+ - 2001:4860:4860::8888
+ - 2001:4860:4860::8844
+ set-name: eth0
+ """,
+ id="ethernets",
+ ),
+ pytest.param(
+ """
+ version: 2
+ vlans:
+ encc000.2653:
+ id: 2653
+ link: "encc000"
+ addresses:
+ - 10.54.2.19/21
+ - 2a00:1730:fff9:100::52/128
+ {gateway4}
+ {gateway6}
+ nameservers:
+ addresses:
+ - 10.52.1.1
+ - 10.52.1.71
+ - 2001:4860:4860::8888
+ - 2001:4860:4860::8844
+ """,
+ id="vlan",
+ ),
+ pytest.param(
+ """
+ version: 2
+ bonds:
+ bond0:
+ addresses:
+ - 10.54.2.19/21
+ - 2a00:1730:fff9:100::52/128
+ {gateway4}
+ {gateway6}
+ interfaces:
+ - enp0s0
+ - enp0s1
+ mtu: 1334
+ parameters: {{}}
+ """,
+ id="bond",
+ ),
+ pytest.param(
+ """
+ version: 2
+ bridges:
+ bridge0:
+ addresses:
+ - 10.54.2.19/21
+ - 2a00:1730:fff9:100::52/128
+ {gateway4}
+ {gateway6}
+ interfaces:
+ - enp0s0
+ - enp0s1
+ parameters: {{}}
+ """,
+ id="bridge",
+ ),
+ ],
+ )
+ @pytest.mark.parametrize(
+ "renderer_cls",
+ [
+ pytest.param(None, id="non-netplan"),
+ ]
+ + [
+ pytest.param(mod.Renderer, id=name)
+ for name, mod in NAME_TO_RENDERER.items()
+ ],
+ )
+ def test_v2_warns_deprecated_gateways(
+ self, m_get_interfaces_by_mac, renderer_cls, cfg: str, caplog
+ ):
+ """
+ Tests that a v2 netconf with the deprecated `gateway4` or `gateway6`
+ issues a warning about it only on non netplan targets.
+
+ In netplan targets we perform a passthrough and the warning is not
+ needed.
+ """
+ ncfg = safeyaml.load(
+ cfg.format(
+ gateway4="gateway4: 10.54.0.1",
+ gateway6="gateway6: 2a00:1730:fff9:100::1",
)
- nsi.parse_config(skip_broken=False)
- self.assertEqual(ncfg, nsi.as_dict()["config"])
+ )
+ nsi = network_state.NetworkStateInterpreter(
+ version=ncfg["version"],
+ config=ncfg,
+ renderer=mock.MagicMock(spec=renderer_cls),
+ )
+ nsi.parse_config(skip_broken=False)
+ assert ncfg == nsi.as_dict()["config"]
+
+ if renderer_cls != NetplanRenderer:
+ count = 1 # Only one deprecation
+ else:
+ count = 0 # No deprecation as we passthrough
+ assert count == caplog.text.count(
+ "DEPRECATED: The use of `gateway4` and `gateway6` is"
+ )
class TestNetworkStateParseNameservers:
diff --git a/tests/unittests/net/test_networkd.py b/tests/unittests/net/test_networkd.py
index 2958231b..bb781b98 100644
--- a/tests/unittests/net/test_networkd.py
+++ b/tests/unittests/net/test_networkd.py
@@ -195,10 +195,54 @@ Domains=rgrunbla.github.beta.tailscale.net
[Route]
Gateway=10.0.0.1
+
+[Route]
Gateway=2a01:4f8:10a:19d2::2
"""
+V2_CONFIG_MULTI_SUBNETS = """
+network:
+ version: 2
+ ethernets:
+ eth0:
+ addresses:
+ - 192.168.1.1/24
+ - fec0::1/64
+ gateway4: 192.168.254.254
+ gateway6: "fec0::ffff"
+ routes:
+ - to: 169.254.1.1/32
+ - to: "fe80::1/128"
+"""
+
+V2_CONFIG_MULTI_SUBNETS_RENDERED = """\
+[Address]
+Address=192.168.1.1/24
+
+[Address]
+Address=fec0::1/64
+
+[Match]
+Name=eth0
+
+[Network]
+DHCP=no
+
+[Route]
+Gateway=192.168.254.254
+
+[Route]
+Gateway=fec0::ffff
+
+[Route]
+Destination=169.254.1.1/32
+
+[Route]
+Destination=fe80::1/128
+
+"""
+
class TestNetworkdRenderState:
def _parse_network_state_from_config(self, config):
@@ -307,5 +351,16 @@ class TestNetworkdRenderState:
assert rendered_content["eth0"] == V1_CONFIG_MULTI_SUBNETS_RENDERED
+ def test_networkd_render_v2_multi_subnets(self):
+ """
+ Ensure a device with multiple subnets gets correctly rendered.
+
+ Per systemd-networkd docs, [Route] can only contain a single instance
+ of Gateway.
+ """
+ with mock.patch("cloudinit.net.get_interfaces_by_mac"):
+ ns = self._parse_network_state_from_config(V2_CONFIG_MULTI_SUBNETS)
+ renderer = networkd.Renderer()
+ rendered_content = renderer._render_content(ns)
-# vi: ts=4 expandtab
+ assert rendered_content["eth0"] == V2_CONFIG_MULTI_SUBNETS_RENDERED
diff --git a/tests/unittests/sources/azure/test_imds.py b/tests/unittests/sources/azure/test_imds.py
new file mode 100644
index 00000000..b5a72645
--- /dev/null
+++ b/tests/unittests/sources/azure/test_imds.py
@@ -0,0 +1,491 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import json
+import logging
+import math
+from unittest import mock
+
+import pytest
+import requests
+
+from cloudinit.sources.azure import imds
+from cloudinit.url_helper import UrlError
+
+MOCKPATH = "cloudinit.sources.azure.imds."
+
+
+@pytest.fixture
+def mock_readurl():
+ with mock.patch(MOCKPATH + "readurl", autospec=True) as m:
+ yield m
+
+
+@pytest.fixture
+def mock_requests_session_request():
+ with mock.patch("requests.Session.request", autospec=True) as m:
+ yield m
+
+
+@pytest.fixture
+def mock_url_helper_time_sleep():
+ with mock.patch("cloudinit.url_helper.time.sleep", autospec=True) as m:
+ yield m
+
+
+def fake_http_error_for_code(status_code: int):
+ response_failure = requests.Response()
+ response_failure.status_code = status_code
+ return requests.exceptions.HTTPError(
+ "fake error",
+ response=response_failure,
+ )
+
+
+class TestFetchMetadataWithApiFallback:
+ default_url = (
+ "http://169.254.169.254/metadata/instance?"
+ "api-version=2021-08-01&extended=true"
+ )
+ fallback_url = (
+ "http://169.254.169.254/metadata/instance?api-version=2019-06-01"
+ )
+ headers = {"Metadata": "true"}
+ retries = 10
+ timeout = 2
+
+ def test_basic(
+ self,
+ caplog,
+ mock_readurl,
+ ):
+ fake_md = {"foo": {"bar": []}}
+ mock_readurl.side_effect = [
+ mock.Mock(contents=json.dumps(fake_md).encode()),
+ ]
+
+ md = imds.fetch_metadata_with_api_fallback()
+
+ assert md == fake_md
+ assert mock_readurl.mock_calls == [
+ mock.call(
+ self.default_url,
+ timeout=self.timeout,
+ headers=self.headers,
+ retries=self.retries,
+ exception_cb=imds._readurl_exception_callback,
+ infinite=False,
+ log_req_resp=True,
+ ),
+ ]
+
+ warnings = [
+ x.message for x in caplog.records if x.levelno == logging.WARNING
+ ]
+ assert warnings == []
+
+ def test_basic_fallback(
+ self,
+ caplog,
+ mock_readurl,
+ ):
+ fake_md = {"foo": {"bar": []}}
+ mock_readurl.side_effect = [
+ UrlError("No IMDS version", code=400),
+ mock.Mock(contents=json.dumps(fake_md).encode()),
+ ]
+
+ md = imds.fetch_metadata_with_api_fallback()
+
+ assert md == fake_md
+ assert mock_readurl.mock_calls == [
+ mock.call(
+ self.default_url,
+ timeout=self.timeout,
+ headers=self.headers,
+ retries=self.retries,
+ exception_cb=imds._readurl_exception_callback,
+ infinite=False,
+ log_req_resp=True,
+ ),
+ mock.call(
+ self.fallback_url,
+ timeout=self.timeout,
+ headers=self.headers,
+ retries=self.retries,
+ exception_cb=imds._readurl_exception_callback,
+ infinite=False,
+ log_req_resp=True,
+ ),
+ ]
+
+ warnings = [
+ x.message for x in caplog.records if x.levelno == logging.WARNING
+ ]
+ assert warnings == [
+ "Failed to fetch metadata from IMDS: No IMDS version",
+ "Falling back to IMDS api-version: 2019-06-01",
+ ]
+
+ @pytest.mark.parametrize(
+ "error",
+ [
+ fake_http_error_for_code(404),
+ fake_http_error_for_code(410),
+ fake_http_error_for_code(429),
+ fake_http_error_for_code(500),
+ requests.ConnectionError("Fake connection error"),
+ requests.Timeout("Fake connection timeout"),
+ ],
+ )
+ def test_will_retry_errors(
+ self,
+ caplog,
+ mock_requests_session_request,
+ mock_url_helper_time_sleep,
+ error,
+ ):
+ fake_md = {"foo": {"bar": []}}
+ mock_requests_session_request.side_effect = [
+ error,
+ mock.Mock(content=json.dumps(fake_md)),
+ ]
+
+ md = imds.fetch_metadata_with_api_fallback()
+
+ assert md == fake_md
+ assert len(mock_requests_session_request.mock_calls) == 2
+ assert mock_url_helper_time_sleep.mock_calls == [mock.call(1)]
+
+ warnings = [
+ x.message for x in caplog.records if x.levelno == logging.WARNING
+ ]
+ assert warnings == []
+
+ def test_will_retry_errors_on_fallback(
+ self,
+ caplog,
+ mock_requests_session_request,
+ mock_url_helper_time_sleep,
+ ):
+ error = fake_http_error_for_code(400)
+ fake_md = {"foo": {"bar": []}}
+ mock_requests_session_request.side_effect = [
+ error,
+ fake_http_error_for_code(429),
+ mock.Mock(content=json.dumps(fake_md)),
+ ]
+
+ md = imds.fetch_metadata_with_api_fallback()
+
+ assert md == fake_md
+ assert len(mock_requests_session_request.mock_calls) == 3
+ assert mock_url_helper_time_sleep.mock_calls == [mock.call(1)]
+
+ warnings = [
+ x.message for x in caplog.records if x.levelno == logging.WARNING
+ ]
+ assert warnings == [
+ "Failed to fetch metadata from IMDS: fake error",
+ "Falling back to IMDS api-version: 2019-06-01",
+ ]
+
+ @pytest.mark.parametrize(
+ "error",
+ [
+ fake_http_error_for_code(404),
+ fake_http_error_for_code(410),
+ fake_http_error_for_code(429),
+ fake_http_error_for_code(500),
+ requests.ConnectionError("Fake connection error"),
+ requests.Timeout("Fake connection timeout"),
+ ],
+ )
+ def test_retry_until_failure(
+ self,
+ caplog,
+ mock_requests_session_request,
+ mock_url_helper_time_sleep,
+ error,
+ ):
+ mock_requests_session_request.side_effect = [error] * (11)
+
+ with pytest.raises(UrlError) as exc_info:
+ imds.fetch_metadata_with_api_fallback()
+
+ assert exc_info.value.cause == error
+ assert len(mock_requests_session_request.mock_calls) == (
+ self.retries + 1
+ )
+ assert (
+ mock_url_helper_time_sleep.mock_calls
+ == [mock.call(1)] * self.retries
+ )
+
+ warnings = [
+ x.message for x in caplog.records if x.levelno == logging.WARNING
+ ]
+ assert warnings == [f"Failed to fetch metadata from IMDS: {error!s}"]
+
+ @pytest.mark.parametrize(
+ "error",
+ [
+ fake_http_error_for_code(403),
+ fake_http_error_for_code(501),
+ ],
+ )
+ def test_will_not_retry_errors(
+ self,
+ caplog,
+ mock_requests_session_request,
+ mock_url_helper_time_sleep,
+ error,
+ ):
+ fake_md = {"foo": {"bar": []}}
+ mock_requests_session_request.side_effect = [
+ error,
+ mock.Mock(content=json.dumps(fake_md)),
+ ]
+
+ with pytest.raises(UrlError) as exc_info:
+ imds.fetch_metadata_with_api_fallback()
+
+ assert exc_info.value.cause == error
+ assert len(mock_requests_session_request.mock_calls) == 1
+ assert mock_url_helper_time_sleep.mock_calls == []
+
+ warnings = [
+ x.message for x in caplog.records if x.levelno == logging.WARNING
+ ]
+ assert warnings == [f"Failed to fetch metadata from IMDS: {error!s}"]
+
+ def test_non_json_repsonse(
+ self,
+ caplog,
+ mock_readurl,
+ ):
+ mock_readurl.side_effect = [
+ mock.Mock(contents=b"bad data"),
+ ]
+
+ with pytest.raises(ValueError):
+ imds.fetch_metadata_with_api_fallback()
+
+ assert mock_readurl.mock_calls == [
+ mock.call(
+ self.default_url,
+ timeout=self.timeout,
+ headers=self.headers,
+ retries=self.retries,
+ exception_cb=imds._readurl_exception_callback,
+ infinite=False,
+ log_req_resp=True,
+ ),
+ ]
+
+ warnings = [
+ x.message for x in caplog.records if x.levelno == logging.WARNING
+ ]
+ assert warnings == [
+ (
+ "Failed to parse metadata from IMDS: "
+ "Expecting value: line 1 column 1 (char 0)"
+ )
+ ]
+
+
+class TestFetchReprovisionData:
+ url = (
+ "http://169.254.169.254/metadata/"
+ "reprovisiondata?api-version=2019-06-01"
+ )
+ headers = {"Metadata": "true"}
+ timeout = 2
+
+ def test_basic(
+ self,
+ caplog,
+ mock_readurl,
+ ):
+ content = b"ovf content"
+ mock_readurl.side_effect = [
+ mock.Mock(contents=content),
+ ]
+
+ ovf = imds.fetch_reprovision_data()
+
+ assert ovf == content
+ assert mock_readurl.mock_calls == [
+ mock.call(
+ self.url,
+ timeout=self.timeout,
+ headers=self.headers,
+ exception_cb=mock.ANY,
+ infinite=True,
+ log_req_resp=False,
+ ),
+ ]
+
+ assert caplog.record_tuples == [
+ (
+ "cloudinit.sources.azure.imds",
+ logging.DEBUG,
+ "Polled IMDS 1 time(s)",
+ )
+ ]
+
+ @pytest.mark.parametrize(
+ "error",
+ [
+ fake_http_error_for_code(404),
+ fake_http_error_for_code(410),
+ ],
+ )
+ @pytest.mark.parametrize("failures", [1, 5, 100, 1000])
+ def test_will_retry_errors(
+ self,
+ caplog,
+ mock_requests_session_request,
+ mock_url_helper_time_sleep,
+ error,
+ failures,
+ ):
+ content = b"ovf content"
+ mock_requests_session_request.side_effect = [error] * failures + [
+ mock.Mock(content=content),
+ ]
+
+ ovf = imds.fetch_reprovision_data()
+
+ assert ovf == content
+ assert len(mock_requests_session_request.mock_calls) == failures + 1
+ assert (
+ mock_url_helper_time_sleep.mock_calls == [mock.call(1)] * failures
+ )
+
+ wrapped_error = UrlError(
+ error,
+ code=error.response.status_code,
+ headers=error.response.headers,
+ url=self.url,
+ )
+ backoff_logs = [
+ (
+ "cloudinit.sources.azure.imds",
+ logging.INFO,
+ "Polling IMDS failed with exception: "
+ f"{wrapped_error!r} count: {i}",
+ )
+ for i in range(1, failures + 1)
+ if i == 1 or math.log2(i).is_integer()
+ ]
+ assert caplog.record_tuples == backoff_logs + [
+ (
+ "cloudinit.url_helper",
+ logging.DEBUG,
+ mock.ANY,
+ ),
+ (
+ "cloudinit.sources.azure.imds",
+ logging.DEBUG,
+ f"Polled IMDS {failures+1} time(s)",
+ ),
+ ]
+
+ @pytest.mark.parametrize(
+ "error",
+ [
+ fake_http_error_for_code(404),
+ fake_http_error_for_code(410),
+ ],
+ )
+ @pytest.mark.parametrize("failures", [1, 5, 100, 1000])
+ @pytest.mark.parametrize(
+ "terminal_error",
+ [
+ requests.ConnectionError("Fake connection error"),
+ requests.Timeout("Fake connection timeout"),
+ ],
+ )
+ def test_retry_until_failure(
+ self,
+ caplog,
+ mock_requests_session_request,
+ mock_url_helper_time_sleep,
+ error,
+ failures,
+ terminal_error,
+ ):
+ mock_requests_session_request.side_effect = [error] * failures + [
+ terminal_error
+ ]
+
+ with pytest.raises(UrlError) as exc_info:
+ imds.fetch_reprovision_data()
+
+ assert exc_info.value.cause == terminal_error
+ assert len(mock_requests_session_request.mock_calls) == (failures + 1)
+ assert (
+ mock_url_helper_time_sleep.mock_calls == [mock.call(1)] * failures
+ )
+
+ wrapped_error = UrlError(
+ error,
+ code=error.response.status_code,
+ headers=error.response.headers,
+ url=self.url,
+ )
+
+ backoff_logs = [
+ (
+ "cloudinit.sources.azure.imds",
+ logging.INFO,
+ "Polling IMDS failed with exception: "
+ f"{wrapped_error!r} count: {i}",
+ )
+ for i in range(1, failures + 1)
+ if i == 1 or math.log2(i).is_integer()
+ ]
+ assert caplog.record_tuples == backoff_logs + [
+ (
+ "cloudinit.sources.azure.imds",
+ logging.INFO,
+ "Polling IMDS failed with exception: "
+ f"{exc_info.value!r} count: {failures+1}",
+ ),
+ ]
+
+ @pytest.mark.parametrize(
+ "error",
+ [
+ fake_http_error_for_code(403),
+ fake_http_error_for_code(501),
+ ],
+ )
+ def test_will_not_retry_errors(
+ self,
+ caplog,
+ mock_requests_session_request,
+ mock_url_helper_time_sleep,
+ error,
+ ):
+ fake_md = {"foo": {"bar": []}}
+ mock_requests_session_request.side_effect = [
+ error,
+ mock.Mock(content=json.dumps(fake_md)),
+ ]
+
+ with pytest.raises(UrlError) as exc_info:
+ imds.fetch_reprovision_data()
+
+ assert exc_info.value.cause == error
+ assert len(mock_requests_session_request.mock_calls) == 1
+ assert mock_url_helper_time_sleep.mock_calls == []
+
+ assert caplog.record_tuples == [
+ (
+ "cloudinit.sources.azure.imds",
+ logging.INFO,
+ "Polling IMDS failed with exception: "
+ f"{exc_info.value!r} count: 1",
+ ),
+ ]
diff --git a/tests/unittests/sources/helpers/test_cloudsigma.py b/tests/unittests/sources/helpers/test_cloudsigma.py
deleted file mode 100644
index 3c687388..00000000
--- a/tests/unittests/sources/helpers/test_cloudsigma.py
+++ /dev/null
@@ -1,67 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-from cloudinit.sources.helpers.cloudsigma import Cepko
-from tests.unittests import helpers as test_helpers
-
-SERVER_CONTEXT = {
- "cpu": 1000,
- "cpus_instead_of_cores": False,
- "global_context": {"some_global_key": "some_global_val"},
- "mem": 1073741824,
- "meta": {"ssh_public_key": "ssh-rsa AAAAB3NzaC1yc2E.../hQ5D5 john@doe"},
- "name": "test_server",
- "requirements": [],
- "smp": 1,
- "tags": ["much server", "very performance"],
- "uuid": "65b2fb23-8c03-4187-a3ba-8b7c919e889",
- "vnc_password": "9e84d6cb49e46379",
-}
-
-
-class CepkoMock(Cepko):
- def all(self):
- return SERVER_CONTEXT
-
- def get(self, key="", request_pattern=None):
- return SERVER_CONTEXT["tags"]
-
-
-# 2015-01-22 BAW: This test is completely useless because it only ever tests
-# the CepkoMock object. Even in its original form, I don't think it ever
-# touched the underlying Cepko class methods.
-class CepkoResultTests(test_helpers.TestCase):
- def setUp(self):
- self.c = Cepko()
- raise test_helpers.SkipTest("This test is completely useless")
-
- def test_getitem(self):
- result = self.c.all()
- self.assertEqual("65b2fb23-8c03-4187-a3ba-8b7c919e889", result["uuid"])
- self.assertEqual([], result["requirements"])
- self.assertEqual("much server", result["tags"][0])
- self.assertEqual(1, result["smp"])
-
- def test_len(self):
- self.assertEqual(len(SERVER_CONTEXT), len(self.c.all()))
-
- def test_contains(self):
- result = self.c.all()
- self.assertTrue("uuid" in result)
- self.assertFalse("uid" in result)
- self.assertTrue("meta" in result)
- self.assertFalse("ssh_public_key" in result)
-
- def test_iter(self):
- self.assertEqual(
- sorted(SERVER_CONTEXT.keys()),
- sorted([key for key in self.c.all()]),
- )
-
- def test_with_list_as_result(self):
- result = self.c.get("tags")
- self.assertEqual("much server", result[0])
- self.assertTrue("very performance" in result)
- self.assertEqual(2, len(result))
-
-
-# vi: ts=4 expandtab
diff --git a/tests/unittests/sources/test_aliyun.py b/tests/unittests/sources/test_aliyun.py
index fe4e54b5..f95923a4 100644
--- a/tests/unittests/sources/test_aliyun.py
+++ b/tests/unittests/sources/test_aliyun.py
@@ -98,6 +98,15 @@ class TestAliYunDatasource(test_helpers.ResponsesTestCase):
"instance-identity",
)
+ @property
+ def token_url(self):
+ return os.path.join(
+ self.metadata_address,
+ "latest",
+ "api",
+ "token",
+ )
+
def register_mock_metaserver(self, base_url, data):
def register_helper(register, base_url, body):
if isinstance(body, str):
@@ -127,6 +136,7 @@ class TestAliYunDatasource(test_helpers.ResponsesTestCase):
self.register_mock_metaserver(self.metadata_url, self.default_metadata)
self.register_mock_metaserver(self.userdata_url, self.default_userdata)
self.register_mock_metaserver(self.identity_url, self.default_identity)
+ self.responses.add(responses.PUT, self.token_url, "API-TOKEN")
def _test_get_data(self):
self.assertEqual(self.ds.metadata, self.default_metadata)
@@ -151,8 +161,9 @@ class TestAliYunDatasource(test_helpers.ResponsesTestCase):
self.default_metadata["hostname"], self.ds.get_hostname().hostname
)
+ @mock.patch("cloudinit.sources.DataSourceEc2.util.is_resolvable")
@mock.patch("cloudinit.sources.DataSourceAliYun._is_aliyun")
- def test_with_mock_server(self, m_is_aliyun):
+ def test_with_mock_server(self, m_is_aliyun, m_resolv):
m_is_aliyun.return_value = True
self.regist_default_server()
ret = self.ds.get_data()
diff --git a/tests/unittests/sources/test_azure.py b/tests/unittests/sources/test_azure.py
index 24fa061c..b5fe2672 100644
--- a/tests/unittests/sources/test_azure.py
+++ b/tests/unittests/sources/test_azure.py
@@ -3,7 +3,6 @@
import copy
import crypt
import json
-import logging
import os
import stat
import xml.etree.ElementTree as ET
@@ -11,13 +10,13 @@ from pathlib import Path
import pytest
import requests
-import responses
from cloudinit import distros, helpers, subp, url_helper
from cloudinit.net import dhcp
from cloudinit.sources import UNSET
from cloudinit.sources import DataSourceAzure as dsaz
from cloudinit.sources import InvalidMetaDataException
+from cloudinit.sources.azure import imds
from cloudinit.sources.helpers import netlink
from cloudinit.util import (
MountFailedError,
@@ -27,11 +26,9 @@ from cloudinit.util import (
load_json,
write_file,
)
-from cloudinit.version import version_string as vs
from tests.unittests.helpers import (
CiTestCase,
ExitStack,
- ResponsesTestCase,
mock,
populate_dir,
resourceLocation,
@@ -93,16 +90,6 @@ def mock_chassis_asset_tag():
@pytest.fixture
-def mock_device_driver():
- with mock.patch(
- MOCKPATH + "device_driver",
- autospec=True,
- return_value=None,
- ) as m:
- yield m
-
-
-@pytest.fixture
def mock_generate_fallback_config():
with mock.patch(
MOCKPATH + "net.generate_fallback_config",
@@ -173,9 +160,19 @@ def mock_net_dhcp_EphemeralIPv4Network():
yield m
-@pytest.fixture
+@pytest.fixture(autouse=True)
def mock_get_interfaces():
- with mock.patch(MOCKPATH + "net.get_interfaces", return_value=[]) as m:
+ with mock.patch(
+ MOCKPATH + "net.get_interfaces",
+ return_value=[
+ ("dummy0", "9e:65:d6:19:19:01", None, None),
+ ("enP3", "00:11:22:33:44:02", "unknown_accel", "0x3"),
+ ("eth0", "00:11:22:33:44:00", "hv_netvsc", "0x3"),
+ ("eth2", "00:11:22:33:44:01", "unknown", "0x3"),
+ ("eth3", "00:11:22:33:44:02", "unknown_with_unknown_vf", "0x3"),
+ ("lo", "00:00:00:00:00:00", None, None),
+ ],
+ ) as m:
yield m
@@ -205,7 +202,7 @@ def mock_os_path_isfile():
@pytest.fixture
def mock_readurl():
- with mock.patch(MOCKPATH + "readurl", autospec=True) as m:
+ with mock.patch(MOCKPATH + "imds.readurl", autospec=True) as m:
yield m
@@ -216,12 +213,6 @@ def mock_report_diagnostic_event():
@pytest.fixture
-def mock_requests_session_request():
- with mock.patch("requests.Session.request", autospec=True) as m:
- yield m
-
-
-@pytest.fixture
def mock_sleep():
with mock.patch(
MOCKPATH + "sleep",
@@ -237,12 +228,6 @@ def mock_subp_subp():
@pytest.fixture
-def mock_url_helper_time_sleep():
- with mock.patch("cloudinit.url_helper.time.sleep", autospec=True) as m:
- yield m
-
-
-@pytest.fixture
def mock_util_ensure_dir():
with mock.patch(
MOCKPATH + "util.ensure_dir",
@@ -507,6 +492,116 @@ class TestGenerateNetworkConfig:
},
),
(
+ "hv_netvsc driver",
+ {
+ "interface": [
+ {
+ "macAddress": "001122334400",
+ "ipv6": {"ipAddress": []},
+ "ipv4": {
+ "subnet": [
+ {"prefix": "24", "address": "10.0.0.0"}
+ ],
+ "ipAddress": [
+ {
+ "privateIpAddress": "10.0.0.4",
+ "publicIpAddress": "104.46.124.81",
+ }
+ ],
+ },
+ }
+ ]
+ },
+ {
+ "ethernets": {
+ "eth0": {
+ "dhcp4": True,
+ "dhcp4-overrides": {"route-metric": 100},
+ "dhcp6": False,
+ "match": {
+ "macaddress": "00:11:22:33:44:00",
+ "driver": "hv_netvsc",
+ },
+ "set-name": "eth0",
+ }
+ },
+ "version": 2,
+ },
+ ),
+ (
+ "unknown",
+ {
+ "interface": [
+ {
+ "macAddress": "001122334401",
+ "ipv6": {"ipAddress": []},
+ "ipv4": {
+ "subnet": [
+ {"prefix": "24", "address": "10.0.0.0"}
+ ],
+ "ipAddress": [
+ {
+ "privateIpAddress": "10.0.0.4",
+ "publicIpAddress": "104.46.124.81",
+ }
+ ],
+ },
+ }
+ ]
+ },
+ {
+ "ethernets": {
+ "eth0": {
+ "dhcp4": True,
+ "dhcp4-overrides": {"route-metric": 100},
+ "dhcp6": False,
+ "match": {
+ "macaddress": "00:11:22:33:44:01",
+ "driver": "unknown",
+ },
+ "set-name": "eth0",
+ }
+ },
+ "version": 2,
+ },
+ ),
+ (
+ "unknown with unknown matching VF",
+ {
+ "interface": [
+ {
+ "macAddress": "001122334402",
+ "ipv6": {"ipAddress": []},
+ "ipv4": {
+ "subnet": [
+ {"prefix": "24", "address": "10.0.0.0"}
+ ],
+ "ipAddress": [
+ {
+ "privateIpAddress": "10.0.0.4",
+ "publicIpAddress": "104.46.124.81",
+ }
+ ],
+ },
+ }
+ ]
+ },
+ {
+ "ethernets": {
+ "eth0": {
+ "dhcp4": True,
+ "dhcp4-overrides": {"route-metric": 100},
+ "dhcp6": False,
+ "match": {
+ "macaddress": "00:11:22:33:44:02",
+ },
+ "set-name": "eth0",
+ }
+ },
+ "version": 2,
+ },
+ ),
+ (
"multiple interfaces with increasing route metric",
{
"interface": [
@@ -648,7 +743,7 @@ class TestGenerateNetworkConfig:
],
)
def test_parsing_scenarios(
- self, label, mock_device_driver, metadata, expected
+ self, label, mock_get_interfaces, metadata, expected
):
assert (
dsaz.generate_network_config_from_instance_network_metadata(
@@ -657,27 +752,6 @@ class TestGenerateNetworkConfig:
== expected
)
- def test_match_hv_netvsc(self, mock_device_driver):
- mock_device_driver.return_value = "hv_netvsc"
-
- assert dsaz.generate_network_config_from_instance_network_metadata(
- NETWORK_METADATA["network"]
- ) == {
- "ethernets": {
- "eth0": {
- "dhcp4": True,
- "dhcp4-overrides": {"route-metric": 100},
- "dhcp6": False,
- "match": {
- "macaddress": "00:0d:3a:04:75:98",
- "driver": "hv_netvsc",
- },
- "set-name": "eth0",
- }
- },
- "version": 2,
- }
-
class TestNetworkConfig:
fallback_config = {
@@ -693,7 +767,9 @@ class TestNetworkConfig:
],
}
- def test_single_ipv4_nic_configuration(self, azure_ds, mock_device_driver):
+ def test_single_ipv4_nic_configuration(
+ self, azure_ds, mock_get_interfaces
+ ):
"""Network config emits dhcp on single nic with ipv4"""
expected = {
"ethernets": {
@@ -712,7 +788,7 @@ class TestNetworkConfig:
assert azure_ds.network_config == expected
def test_uses_fallback_cfg_when_apply_network_config_is_false(
- self, azure_ds, mock_device_driver, mock_generate_fallback_config
+ self, azure_ds, mock_generate_fallback_config
):
azure_ds.ds_cfg["apply_network_config"] = False
azure_ds._metadata_imds = NETWORK_METADATA
@@ -721,7 +797,7 @@ class TestNetworkConfig:
assert azure_ds.network_config == self.fallback_config
def test_uses_fallback_cfg_when_imds_metadata_unset(
- self, azure_ds, mock_device_driver, mock_generate_fallback_config
+ self, azure_ds, mock_generate_fallback_config
):
azure_ds._metadata_imds = UNSET
mock_generate_fallback_config.return_value = self.fallback_config
@@ -729,7 +805,7 @@ class TestNetworkConfig:
assert azure_ds.network_config == self.fallback_config
def test_uses_fallback_cfg_when_no_network_metadata(
- self, azure_ds, mock_device_driver, mock_generate_fallback_config
+ self, azure_ds, mock_generate_fallback_config
):
"""Network config generates fallback network config when the
IMDS instance metadata is corrupted/invalid, such as when
@@ -745,7 +821,7 @@ class TestNetworkConfig:
assert azure_ds.network_config == self.fallback_config
def test_uses_fallback_cfg_when_no_interface_metadata(
- self, azure_ds, mock_device_driver, mock_generate_fallback_config
+ self, azure_ds, mock_generate_fallback_config
):
"""Network config generates fallback network config when the
IMDS instance metadata is corrupted/invalid, such as when
@@ -761,176 +837,6 @@ class TestNetworkConfig:
assert azure_ds.network_config == self.fallback_config
-class TestGetMetadataFromIMDS(ResponsesTestCase):
-
- with_logs = True
-
- def setUp(self):
- super(TestGetMetadataFromIMDS, self).setUp()
- self.network_md_url = "{}/instance?api-version=2019-06-01".format(
- dsaz.IMDS_URL
- )
-
- @mock.patch(MOCKPATH + "readurl", autospec=True)
- def test_get_metadata_uses_instance_url(self, m_readurl):
- """Make sure readurl is called with the correct url when accessing
- metadata"""
- m_readurl.return_value = url_helper.StringResponse(
- json.dumps(IMDS_NETWORK_METADATA).encode("utf-8")
- )
-
- dsaz.get_metadata_from_imds(retries=3, md_type=dsaz.MetadataType.ALL)
- m_readurl.assert_called_with(
- "http://169.254.169.254/metadata/instance?api-version=2019-06-01",
- exception_cb=mock.ANY,
- headers=mock.ANY,
- retries=mock.ANY,
- timeout=mock.ANY,
- infinite=False,
- )
-
- @mock.patch(MOCKPATH + "readurl", autospec=True)
- def test_get_network_metadata_uses_network_url(self, m_readurl):
- """Make sure readurl is called with the correct url when accessing
- network metadata"""
- m_readurl.return_value = url_helper.StringResponse(
- json.dumps(IMDS_NETWORK_METADATA).encode("utf-8")
- )
-
- dsaz.get_metadata_from_imds(
- retries=3, md_type=dsaz.MetadataType.NETWORK
- )
- m_readurl.assert_called_with(
- "http://169.254.169.254/metadata/instance/network?api-version="
- "2019-06-01",
- exception_cb=mock.ANY,
- headers=mock.ANY,
- retries=mock.ANY,
- timeout=mock.ANY,
- infinite=False,
- )
-
- @mock.patch(MOCKPATH + "readurl", autospec=True)
- @mock.patch(MOCKPATH + "EphemeralDHCPv4", autospec=True)
- def test_get_default_metadata_uses_instance_url(self, m_dhcp, m_readurl):
- """Make sure readurl is called with the correct url when accessing
- metadata"""
- m_readurl.return_value = url_helper.StringResponse(
- json.dumps(IMDS_NETWORK_METADATA).encode("utf-8")
- )
-
- dsaz.get_metadata_from_imds(retries=3)
- m_readurl.assert_called_with(
- "http://169.254.169.254/metadata/instance?api-version=2019-06-01",
- exception_cb=mock.ANY,
- headers=mock.ANY,
- retries=mock.ANY,
- timeout=mock.ANY,
- infinite=False,
- )
-
- @mock.patch(MOCKPATH + "readurl", autospec=True)
- def test_get_metadata_uses_extended_url(self, m_readurl):
- """Make sure readurl is called with the correct url when accessing
- metadata"""
- m_readurl.return_value = url_helper.StringResponse(
- json.dumps(IMDS_NETWORK_METADATA).encode("utf-8")
- )
-
- dsaz.get_metadata_from_imds(
- retries=3,
- md_type=dsaz.MetadataType.ALL,
- api_version="2021-08-01",
- )
- m_readurl.assert_called_with(
- "http://169.254.169.254/metadata/instance?api-version="
- "2021-08-01&extended=true",
- exception_cb=mock.ANY,
- headers=mock.ANY,
- retries=mock.ANY,
- timeout=mock.ANY,
- infinite=False,
- )
-
- @mock.patch(MOCKPATH + "readurl", autospec=True)
- def test_get_metadata_performs_dhcp_when_network_is_down(self, m_readurl):
- """Perform DHCP setup when nic is not up."""
- m_readurl.return_value = url_helper.StringResponse(
- json.dumps(NETWORK_METADATA).encode("utf-8")
- )
-
- self.assertEqual(
- NETWORK_METADATA, dsaz.get_metadata_from_imds(retries=2)
- )
-
- self.assertIn(
- "Crawl of Azure Instance Metadata Service (IMDS) took", # log_time
- self.logs.getvalue(),
- )
-
- m_readurl.assert_called_with(
- self.network_md_url,
- exception_cb=mock.ANY,
- headers={"Metadata": "true"},
- retries=2,
- timeout=dsaz.IMDS_TIMEOUT_IN_SECONDS,
- infinite=False,
- )
-
- @mock.patch("cloudinit.url_helper.time.sleep")
- def test_get_metadata_from_imds_empty_when_no_imds_present(self, m_sleep):
- """Return empty dict when IMDS network metadata is absent."""
- # Workaround https://github.com/getsentry/responses/pull/166
- # url path can be reverted to "/instance?api-version=2019-12-01"
- response = requests.Response()
- response.status_code = 404
- self.responses.add(
- responses.GET,
- dsaz.IMDS_URL + "/instance",
- body=requests.HTTPError("...", response=response),
- status=404,
- )
-
- self.assertEqual(
- {},
- dsaz.get_metadata_from_imds(retries=2, api_version="2019-12-01"),
- )
-
- self.assertEqual([mock.call(1), mock.call(1)], m_sleep.call_args_list)
- self.assertIn(
- "Crawl of Azure Instance Metadata Service (IMDS) took", # log_time
- self.logs.getvalue(),
- )
-
- @mock.patch("requests.Session.request")
- @mock.patch("cloudinit.url_helper.time.sleep")
- def test_get_metadata_from_imds_retries_on_timeout(
- self, m_sleep, m_request
- ):
- """Retry IMDS network metadata on timeout errors."""
-
- self.attempt = 0
- m_request.side_effect = requests.Timeout("Fake Connection Timeout")
-
- def retry_callback(request, uri, headers):
- self.attempt += 1
- raise requests.Timeout("Fake connection timeout")
-
- self.responses.add(
- responses.GET,
- dsaz.IMDS_URL + "instance?api-version=2017-12-01",
- body=retry_callback,
- )
-
- self.assertEqual({}, dsaz.get_metadata_from_imds(retries=3))
-
- self.assertEqual([mock.call(1)] * 3, m_sleep.call_args_list)
- self.assertIn(
- "Crawl of Azure Instance Metadata Service (IMDS) took", # log_time
- self.logs.getvalue(),
- )
-
-
class TestAzureDataSource(CiTestCase):
with_logs = True
@@ -962,10 +868,10 @@ class TestAzureDataSource(CiTestCase):
self.m_dhcp.return_value.lease = {}
self.m_dhcp.return_value.iface = "eth4"
- self.m_get_metadata_from_imds = self.patches.enter_context(
+ self.m_fetch = self.patches.enter_context(
mock.patch.object(
- dsaz,
- "get_metadata_from_imds",
+ dsaz.imds,
+ "fetch_metadata_with_api_fallback",
mock.MagicMock(return_value=NETWORK_METADATA),
)
)
@@ -1069,13 +975,6 @@ scbus-1 on xpt0 bus 0
self.m_get_metadata_from_fabric = mock.MagicMock(return_value=[])
self.m_report_failure_to_fabric = mock.MagicMock(autospec=True)
- self.m_get_interfaces = mock.MagicMock(
- return_value=[
- ("dummy0", "9e:65:d6:19:19:01", None, None),
- ("eth0", "00:15:5d:69:63:ba", "hv_netvsc", "0x3"),
- ("lo", "00:00:00:00:00:00", None, None),
- ]
- )
self.m_list_possible_azure_ds = mock.MagicMock(
side_effect=_load_possible_azure_ds
)
@@ -1119,11 +1018,6 @@ scbus-1 on xpt0 bus 0
"get_interface_mac",
mock.MagicMock(return_value="00:15:5d:69:63:ba"),
),
- (
- dsaz.net,
- "get_interfaces",
- self.m_get_interfaces,
- ),
(dsaz.subp, "which", lambda x: True),
(
dsaz.dmi,
@@ -1238,7 +1132,7 @@ scbus-1 on xpt0 bus 0
self.assertEqual(1, m_crawl_metadata.call_count)
self.assertFalse(ret)
- def test_crawl_metadata_exception_should_report_failure_with_msg(self):
+ def test_crawl_metadata_exception_should_report_failure(self):
data = {}
dsrc = self._get_ds(data)
with mock.patch.object(
@@ -1249,9 +1143,7 @@ scbus-1 on xpt0 bus 0
m_crawl_metadata.side_effect = Exception
dsrc.get_data()
self.assertEqual(1, m_crawl_metadata.call_count)
- m_report_failure.assert_called_once_with(
- description=dsaz.DEFAULT_REPORT_FAILURE_USER_VISIBLE_MESSAGE
- )
+ m_report_failure.assert_called_once_with()
def test_crawl_metadata_exc_should_log_could_not_crawl_msg(self):
data = {}
@@ -1292,7 +1184,7 @@ scbus-1 on xpt0 bus 0
data, write_ovf_to_data_dir=True, write_ovf_to_seed_dir=False
)
- self.m_get_metadata_from_imds.return_value = {}
+ self.m_fetch.return_value = {}
with mock.patch(MOCKPATH + "util.mount_cb") as m_mount_cb:
m_mount_cb.side_effect = [
MountFailedError("fail"),
@@ -1429,7 +1321,7 @@ scbus-1 on xpt0 bus 0
data = {"ovfcontent": ovfenv, "sys_cfg": {}}
dsrc = self._get_ds(data)
dsrc.crawl_metadata()
- self.assertEqual(1, self.m_get_metadata_from_imds.call_count)
+ self.assertEqual(1, self.m_fetch.call_count)
@mock.patch("cloudinit.sources.DataSourceAzure.util.write_file")
@mock.patch(
@@ -1446,7 +1338,7 @@ scbus-1 on xpt0 bus 0
dsrc = self._get_ds(data)
poll_imds_func.return_value = ovfenv
dsrc.crawl_metadata()
- self.assertEqual(2, self.m_get_metadata_from_imds.call_count)
+ self.assertEqual(2, self.m_fetch.call_count)
@mock.patch("cloudinit.sources.DataSourceAzure.util.write_file")
@mock.patch(
@@ -1497,9 +1389,11 @@ scbus-1 on xpt0 bus 0
"cloudinit.sources.DataSourceAzure.DataSourceAzure._report_ready",
return_value=True,
)
- @mock.patch("cloudinit.sources.DataSourceAzure.readurl")
+ @mock.patch(
+ "cloudinit.sources.DataSourceAzure.imds.fetch_reprovision_data"
+ )
def test_crawl_metadata_on_reprovision_reports_ready_using_lease(
- self, m_readurl, m_report_ready, m_media_switch, m_write
+ self, m_fetch_reprovision_data, m_report_ready, m_media_switch, m_write
):
"""If reprovisioning, report ready using the obtained lease"""
ovfenv = construct_ovf_env(preprovisioned_vm=True)
@@ -1518,8 +1412,8 @@ scbus-1 on xpt0 bus 0
m_media_switch.return_value = None
reprovision_ovfenv = construct_ovf_env()
- m_readurl.return_value = url_helper.StringResponse(
- reprovision_ovfenv.encode("utf-8")
+ m_fetch_reprovision_data.return_value = reprovision_ovfenv.encode(
+ "utf-8"
)
dsrc.crawl_metadata()
@@ -1537,10 +1431,7 @@ scbus-1 on xpt0 bus 0
self.assertTrue(os.path.isdir(self.waagent_d))
self.assertEqual(stat.S_IMODE(os.stat(self.waagent_d).st_mode), 0o700)
- @mock.patch(
- "cloudinit.sources.DataSourceAzure.device_driver", return_value=None
- )
- def test_network_config_set_from_imds(self, m_driver):
+ def test_network_config_set_from_imds(self):
"""Datasource.network_config returns IMDS network data."""
sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}}
data = {
@@ -1563,12 +1454,7 @@ scbus-1 on xpt0 bus 0
dsrc.get_data()
self.assertEqual(expected_network_config, dsrc.network_config)
- @mock.patch(
- "cloudinit.sources.DataSourceAzure.device_driver", return_value=None
- )
- def test_network_config_set_from_imds_route_metric_for_secondary_nic(
- self, m_driver
- ):
+ def test_network_config_set_from_imds_route_metric_for_secondary_nic(self):
"""Datasource.network_config adds route-metric to secondary nics."""
sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}}
data = {
@@ -1609,17 +1495,12 @@ scbus-1 on xpt0 bus 0
third_intf["ipv4"]["ipAddress"][0]["privateIpAddress"] = "10.0.2.6"
imds_data["network"]["interface"].append(third_intf)
- self.m_get_metadata_from_imds.return_value = imds_data
+ self.m_fetch.return_value = imds_data
dsrc = self._get_ds(data)
dsrc.get_data()
self.assertEqual(expected_network_config, dsrc.network_config)
- @mock.patch(
- "cloudinit.sources.DataSourceAzure.device_driver", return_value=None
- )
- def test_network_config_set_from_imds_for_secondary_nic_no_ip(
- self, m_driver
- ):
+ def test_network_config_set_from_imds_for_secondary_nic_no_ip(self):
"""If an IP address is empty then there should no config for it."""
sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}}
data = {
@@ -1640,7 +1521,7 @@ scbus-1 on xpt0 bus 0
}
imds_data = copy.deepcopy(NETWORK_METADATA)
imds_data["network"]["interface"].append(SECONDARY_INTERFACE_NO_IP)
- self.m_get_metadata_from_imds.return_value = imds_data
+ self.m_fetch.return_value = imds_data
dsrc = self._get_ds(data)
dsrc.get_data()
self.assertEqual(expected_network_config, dsrc.network_config)
@@ -1999,28 +1880,15 @@ scbus-1 on xpt0 bus 0
self.assertFalse(dsrc._report_failure())
self.assertEqual(2, self.m_report_failure_to_fabric.call_count)
- def test_dsaz_report_failure_description_msg(self):
+ def test_dsaz_report_failure(self):
dsrc = self._get_ds({"ovfcontent": construct_ovf_env()})
with mock.patch.object(dsrc, "crawl_metadata") as m_crawl_metadata:
- # mock crawl metadata failure to cause report failure
m_crawl_metadata.side_effect = Exception
- test_msg = "Test report failure description message"
- self.assertTrue(dsrc._report_failure(description=test_msg))
- self.m_report_failure_to_fabric.assert_called_once_with(
- endpoint="168.63.129.16", description=test_msg
- )
-
- def test_dsaz_report_failure_no_description_msg(self):
- dsrc = self._get_ds({"ovfcontent": construct_ovf_env()})
-
- with mock.patch.object(dsrc, "crawl_metadata") as m_crawl_metadata:
- m_crawl_metadata.side_effect = Exception
-
- self.assertTrue(dsrc._report_failure()) # no description msg
+ self.assertTrue(dsrc._report_failure())
self.m_report_failure_to_fabric.assert_called_once_with(
- endpoint="168.63.129.16", description=None
+ endpoint="168.63.129.16"
)
def test_dsaz_report_failure_uses_cached_ephemeral_dhcp_ctx_lease(self):
@@ -2038,7 +1906,7 @@ scbus-1 on xpt0 bus 0
# ensure called with cached ephemeral dhcp lease option 245
self.m_report_failure_to_fabric.assert_called_once_with(
- endpoint="test-ep", description=mock.ANY
+ endpoint="test-ep"
)
def test_dsaz_report_failure_no_net_uses_new_ephemeral_dhcp_lease(self):
@@ -2060,7 +1928,7 @@ scbus-1 on xpt0 bus 0
# ensure called with the newly discovered
# ephemeral dhcp lease option 245
self.m_report_failure_to_fabric.assert_called_once_with(
- endpoint="1.2.3.4", description=mock.ANY
+ endpoint="1.2.3.4"
)
def test_exception_fetching_fabric_data_doesnt_propagate(self):
@@ -2157,7 +2025,7 @@ scbus-1 on xpt0 bus 0
[mock.call("/dev/cd0")], m_check_fbsd_cdrom.call_args_list
)
- @mock.patch(MOCKPATH + "net.get_interfaces", autospec=True)
+ @mock.patch(MOCKPATH + "net.get_interfaces")
def test_blacklist_through_distro(self, m_net_get_interfaces):
"""Verify Azure DS updates blacklist drivers in the distro's
networking object."""
@@ -2175,7 +2043,7 @@ scbus-1 on xpt0 bus 0
)
distro.networking.get_interfaces_by_mac()
- self.m_get_interfaces.assert_called_with(
+ m_net_get_interfaces.assert_called_with(
blacklist_drivers=dsaz.BLACKLIST_DRIVERS
)
@@ -2210,13 +2078,12 @@ scbus-1 on xpt0 bus 0
@mock.patch(
"cloudinit.sources.helpers.azure.OpenSSLManager.parse_certificates"
)
- @mock.patch(MOCKPATH + "get_metadata_from_imds")
def test_get_public_ssh_keys_with_no_openssh_format(
- self, m_get_metadata_from_imds, m_parse_certificates
+ self, m_parse_certificates
):
imds_data = copy.deepcopy(NETWORK_METADATA)
imds_data["compute"]["publicKeys"][0]["keyData"] = "no-openssh-format"
- m_get_metadata_from_imds.return_value = imds_data
+ self.m_fetch.return_value = imds_data
sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}}
data = {
"ovfcontent": construct_ovf_env(),
@@ -2229,9 +2096,8 @@ scbus-1 on xpt0 bus 0
self.assertEqual(ssh_keys, [])
self.assertEqual(m_parse_certificates.call_count, 0)
- @mock.patch(MOCKPATH + "get_metadata_from_imds")
- def test_get_public_ssh_keys_without_imds(self, m_get_metadata_from_imds):
- m_get_metadata_from_imds.return_value = dict()
+ def test_get_public_ssh_keys_without_imds(self):
+ self.m_fetch.return_value = dict()
sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}}
data = {
"ovfcontent": construct_ovf_env(),
@@ -2244,67 +2110,7 @@ scbus-1 on xpt0 bus 0
ssh_keys = dsrc.get_public_ssh_keys()
self.assertEqual(ssh_keys, ["key2"])
- @mock.patch(MOCKPATH + "get_metadata_from_imds")
- def test_imds_api_version_wanted_nonexistent(
- self, m_get_metadata_from_imds
- ):
- def get_metadata_from_imds_side_eff(*args, **kwargs):
- if kwargs["api_version"] == dsaz.IMDS_VER_WANT:
- raise url_helper.UrlError("No IMDS version", code=400)
- return NETWORK_METADATA
-
- m_get_metadata_from_imds.side_effect = get_metadata_from_imds_side_eff
- sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}}
- data = {
- "ovfcontent": construct_ovf_env(),
- "sys_cfg": sys_cfg,
- }
- dsrc = self._get_ds(data)
- dsrc.get_data()
- self.assertIsNotNone(dsrc.metadata)
-
- assert m_get_metadata_from_imds.mock_calls == [
- mock.call(
- retries=10,
- md_type=dsaz.MetadataType.ALL,
- api_version="2021-08-01",
- exc_cb=mock.ANY,
- infinite=False,
- ),
- mock.call(
- retries=10,
- md_type=dsaz.MetadataType.ALL,
- api_version="2019-06-01",
- exc_cb=mock.ANY,
- infinite=False,
- ),
- ]
-
- @mock.patch(
- MOCKPATH + "get_metadata_from_imds", return_value=NETWORK_METADATA
- )
- def test_imds_api_version_wanted_exists(self, m_get_metadata_from_imds):
- sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}}
- data = {
- "ovfcontent": construct_ovf_env(),
- "sys_cfg": sys_cfg,
- }
- dsrc = self._get_ds(data)
- dsrc.get_data()
- self.assertIsNotNone(dsrc.metadata)
-
- assert m_get_metadata_from_imds.mock_calls == [
- mock.call(
- retries=10,
- md_type=dsaz.MetadataType.ALL,
- api_version="2021-08-01",
- exc_cb=mock.ANY,
- infinite=False,
- )
- ]
-
- @mock.patch(MOCKPATH + "get_metadata_from_imds")
- def test_hostname_from_imds(self, m_get_metadata_from_imds):
+ def test_hostname_from_imds(self):
sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}}
data = {
"ovfcontent": construct_ovf_env(),
@@ -2316,13 +2122,12 @@ scbus-1 on xpt0 bus 0
computerName="hostname1",
disablePasswordAuthentication="true",
)
- m_get_metadata_from_imds.return_value = imds_data_with_os_profile
+ self.m_fetch.return_value = imds_data_with_os_profile
dsrc = self._get_ds(data)
dsrc.get_data()
self.assertEqual(dsrc.metadata["local-hostname"], "hostname1")
- @mock.patch(MOCKPATH + "get_metadata_from_imds")
- def test_username_from_imds(self, m_get_metadata_from_imds):
+ def test_username_from_imds(self):
sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}}
data = {
"ovfcontent": construct_ovf_env(),
@@ -2334,15 +2139,14 @@ scbus-1 on xpt0 bus 0
computerName="hostname1",
disablePasswordAuthentication="true",
)
- m_get_metadata_from_imds.return_value = imds_data_with_os_profile
+ self.m_fetch.return_value = imds_data_with_os_profile
dsrc = self._get_ds(data)
dsrc.get_data()
self.assertEqual(
dsrc.cfg["system_info"]["default_user"]["name"], "username1"
)
- @mock.patch(MOCKPATH + "get_metadata_from_imds")
- def test_disable_password_from_imds(self, m_get_metadata_from_imds):
+ def test_disable_password_from_imds(self):
sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}}
data = {
"ovfcontent": construct_ovf_env(),
@@ -2354,13 +2158,12 @@ scbus-1 on xpt0 bus 0
computerName="hostname1",
disablePasswordAuthentication="true",
)
- m_get_metadata_from_imds.return_value = imds_data_with_os_profile
+ self.m_fetch.return_value = imds_data_with_os_profile
dsrc = self._get_ds(data)
dsrc.get_data()
self.assertTrue(dsrc.metadata["disable_password"])
- @mock.patch(MOCKPATH + "get_metadata_from_imds")
- def test_userdata_from_imds(self, m_get_metadata_from_imds):
+ def test_userdata_from_imds(self):
sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}}
data = {
"ovfcontent": construct_ovf_env(),
@@ -2374,16 +2177,13 @@ scbus-1 on xpt0 bus 0
disablePasswordAuthentication="true",
)
imds_data["compute"]["userData"] = b64e(userdata)
- m_get_metadata_from_imds.return_value = imds_data
+ self.m_fetch.return_value = imds_data
dsrc = self._get_ds(data)
ret = dsrc.get_data()
self.assertTrue(ret)
self.assertEqual(dsrc.userdata_raw, userdata.encode("utf-8"))
- @mock.patch(MOCKPATH + "get_metadata_from_imds")
- def test_userdata_from_imds_with_customdata_from_OVF(
- self, m_get_metadata_from_imds
- ):
+ def test_userdata_from_imds_with_customdata_from_OVF(self):
userdataOVF = "userdataOVF"
sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}}
data = {
@@ -2399,7 +2199,7 @@ scbus-1 on xpt0 bus 0
disablePasswordAuthentication="true",
)
imds_data["compute"]["userData"] = b64e(userdataImds)
- m_get_metadata_from_imds.return_value = imds_data
+ self.m_fetch.return_value = imds_data
dsrc = self._get_ds(data)
ret = dsrc.get_data()
self.assertTrue(ret)
@@ -2995,7 +2795,7 @@ class TestPreprovisioningHotAttachNics(CiTestCase):
@mock.patch(MOCKPATH + "DataSourceAzure._report_ready")
@mock.patch(MOCKPATH + "DataSourceAzure.wait_for_link_up")
@mock.patch("cloudinit.sources.helpers.netlink.wait_for_nic_attach_event")
- @mock.patch(MOCKPATH + "DataSourceAzure.get_imds_data_with_api_fallback")
+ @mock.patch(MOCKPATH + "imds.fetch_metadata_with_api_fallback")
@mock.patch(MOCKPATH + "EphemeralDHCPv4", autospec=True)
@mock.patch(MOCKPATH + "DataSourceAzure._wait_for_nic_detach")
@mock.patch("os.path.isfile")
@@ -3175,7 +2975,7 @@ class TestPreprovisioningHotAttachNics(CiTestCase):
@mock.patch(
"cloudinit.sources.helpers.netlink.wait_for_media_disconnect_connect"
)
-@mock.patch("requests.Session.request")
+@mock.patch(MOCKPATH + "imds.fetch_reprovision_data")
@mock.patch(MOCKPATH + "DataSourceAzure._report_ready", return_value=True)
class TestPreprovisioningPollIMDS(CiTestCase):
def setUp(self):
@@ -3189,13 +2989,17 @@ class TestPreprovisioningPollIMDS(CiTestCase):
def test_poll_imds_re_dhcp_on_timeout(
self,
m_report_ready,
- m_request,
+ m_fetch_reprovisiondata,
m_media_switch,
m_dhcp,
m_net,
m_fallback,
):
"""The poll_imds will retry DHCP on IMDS timeout."""
+ m_fetch_reprovisiondata.side_effect = [
+ url_helper.UrlError(requests.Timeout("Fake connection timeout")),
+ b"ovf data",
+ ]
report_file = self.tmp_path("report_marker", self.tmp)
lease = {
"interface": "eth9",
@@ -3209,23 +3013,6 @@ class TestPreprovisioningPollIMDS(CiTestCase):
dhcp_ctx = mock.MagicMock(lease=lease)
dhcp_ctx.obtain_lease.return_value = lease
- self.tries = 0
-
- def fake_timeout_once(**kwargs):
- self.tries += 1
- if self.tries == 1:
- raise requests.Timeout("Fake connection timeout")
- elif self.tries in (2, 3):
- response = requests.Response()
- response.status_code = 404 if self.tries == 2 else 410
- raise requests.exceptions.HTTPError(
- "fake {}".format(response.status_code), response=response
- )
- # Third try should succeed and stop retries or redhcp
- return mock.MagicMock(status_code=200, text="good", content="good")
-
- m_request.side_effect = fake_timeout_once
-
dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths)
with mock.patch.object(
dsa, "_reported_ready_marker_file", report_file
@@ -3235,14 +3022,14 @@ class TestPreprovisioningPollIMDS(CiTestCase):
assert m_report_ready.mock_calls == [mock.call()]
self.assertEqual(3, m_dhcp.call_count, "Expected 3 DHCP calls")
- self.assertEqual(4, self.tries, "Expected 4 total reads from IMDS")
+ assert m_fetch_reprovisiondata.call_count == 2
@mock.patch("os.path.isfile")
def test_poll_imds_skips_dhcp_if_ctx_present(
self,
m_isfile,
report_ready_func,
- fake_resp,
+ m_fetch_reprovisiondata,
m_media_switch,
m_dhcp,
m_net,
@@ -3271,7 +3058,7 @@ class TestPreprovisioningPollIMDS(CiTestCase):
m_ephemeral_dhcpv4,
m_isfile,
report_ready_func,
- m_request,
+ m_fetch_reprovisiondata,
m_media_switch,
m_dhcp,
m_net,
@@ -3282,17 +3069,15 @@ class TestPreprovisioningPollIMDS(CiTestCase):
polling for reprovisiondata. Note that if this ctx is set when
_poll_imds is called, then it is not expected to be waiting for
media_disconnect_connect either."""
-
- tries = 0
-
- def fake_timeout_once(**kwargs):
- nonlocal tries
- tries += 1
- if tries == 1:
- raise requests.Timeout("Fake connection timeout")
- return mock.MagicMock(status_code=200, text="good", content="good")
-
- m_request.side_effect = fake_timeout_once
+ m_fetch_reprovisiondata.side_effect = [
+ url_helper.UrlError(
+ requests.ConnectionError(
+ "Failed to establish a new connection: "
+ "[Errno 101] Network is unreachable"
+ )
+ ),
+ b"ovf data",
+ ]
report_file = self.tmp_path("report_marker", self.tmp)
m_isfile.return_value = True
distro = mock.MagicMock()
@@ -3307,12 +3092,12 @@ class TestPreprovisioningPollIMDS(CiTestCase):
self.assertEqual(1, m_dhcp_ctx.clean_network.call_count)
self.assertEqual(1, m_ephemeral_dhcpv4.call_count)
self.assertEqual(0, m_media_switch.call_count)
- self.assertEqual(2, m_request.call_count)
+ self.assertEqual(2, m_fetch_reprovisiondata.call_count)
def test_does_not_poll_imds_report_ready_when_marker_file_exists(
self,
m_report_ready,
- m_request,
+ m_fetch_reprovisiondata,
m_media_switch,
m_dhcp,
m_net,
@@ -3339,10 +3124,12 @@ class TestPreprovisioningPollIMDS(CiTestCase):
dsa._poll_imds()
self.assertEqual(m_report_ready.call_count, 0)
+ @mock.patch(MOCKPATH + "imds.fetch_metadata_with_api_fallback")
def test_poll_imds_report_ready_success_writes_marker_file(
self,
+ m_fetch,
m_report_ready,
- m_request,
+ m_fetch_reprovisiondata,
m_media_switch,
m_dhcp,
m_net,
@@ -3375,7 +3162,7 @@ class TestPreprovisioningPollIMDS(CiTestCase):
def test_poll_imds_report_ready_failure_raises_exc_and_doesnt_write_marker(
self,
m_report_ready,
- m_request,
+ m_fetch_reprovisiondata,
m_media_switch,
m_dhcp,
m_net,
@@ -3415,7 +3202,9 @@ class TestPreprovisioningPollIMDS(CiTestCase):
)
@mock.patch("cloudinit.net.ephemeral.EphemeralIPv4Network", autospec=True)
@mock.patch("cloudinit.net.ephemeral.maybe_perform_dhcp_discovery")
-@mock.patch("requests.Session.request")
+@mock.patch(
+ MOCKPATH + "imds.fetch_reprovision_data", side_effect=[b"ovf data"]
+)
class TestAzureDataSourcePreprovisioning(CiTestCase):
def setUp(self):
super(TestAzureDataSourcePreprovisioning, self).setUp()
@@ -3425,7 +3214,7 @@ class TestAzureDataSourcePreprovisioning(CiTestCase):
dsaz.BUILTIN_DS_CONFIG["data_dir"] = self.waagent_d
def test_poll_imds_returns_ovf_env(
- self, m_request, m_dhcp, m_net, m_media_switch
+ self, m_fetch_reprovisiondata, m_dhcp, m_net, m_media_switch
):
"""The _poll_imds method should return the ovf_env.xml."""
m_media_switch.return_value = None
@@ -3437,30 +3226,8 @@ class TestAzureDataSourcePreprovisioning(CiTestCase):
"subnet-mask": "255.255.255.0",
}
]
- url = "http://{0}/metadata/reprovisiondata?api-version=2019-06-01"
- host = "169.254.169.254"
- full_url = url.format(host)
- m_request.return_value = mock.MagicMock(
- status_code=200, text="ovf", content="ovf"
- )
dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths)
self.assertTrue(len(dsa._poll_imds()) > 0)
- self.assertEqual(
- m_request.call_args_list,
- [
- mock.call(
- allow_redirects=True,
- headers={
- "Metadata": "true",
- "User-Agent": "Cloud-Init/%s" % vs(),
- },
- method="GET",
- timeout=dsaz.IMDS_TIMEOUT_IN_SECONDS,
- url=full_url,
- stream=False,
- )
- ],
- )
self.assertEqual(m_dhcp.call_count, 2)
m_net.assert_any_call(
broadcast="192.168.2.255",
@@ -3473,7 +3240,7 @@ class TestAzureDataSourcePreprovisioning(CiTestCase):
self.assertEqual(m_net.call_count, 2)
def test__reprovision_calls__poll_imds(
- self, m_request, m_dhcp, m_net, m_media_switch
+ self, m_fetch_reprovisiondata, m_dhcp, m_net, m_media_switch
):
"""The _reprovision method should call poll IMDS."""
m_media_switch.return_value = None
@@ -3486,33 +3253,14 @@ class TestAzureDataSourcePreprovisioning(CiTestCase):
"unknown-245": "624c3620",
}
]
- url = "http://{0}/metadata/reprovisiondata?api-version=2019-06-01"
- host = "169.254.169.254"
- full_url = url.format(host)
hostname = "myhost"
username = "myuser"
content = construct_ovf_env(username=username, hostname=hostname)
- m_request.return_value = mock.MagicMock(
- status_code=200, text=content, content=content
- )
+ m_fetch_reprovisiondata.side_effect = [content]
dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths)
md, _ud, cfg, _d = dsa._reprovision()
self.assertEqual(md["local-hostname"], hostname)
self.assertEqual(cfg["system_info"]["default_user"]["name"], username)
- self.assertIn(
- mock.call(
- allow_redirects=True,
- headers={
- "Metadata": "true",
- "User-Agent": "Cloud-Init/%s" % vs(),
- },
- method="GET",
- timeout=dsaz.IMDS_TIMEOUT_IN_SECONDS,
- url=full_url,
- stream=False,
- ),
- m_request.call_args_list,
- )
self.assertEqual(m_dhcp.call_count, 2)
m_net.assert_any_call(
broadcast="192.168.2.255",
@@ -3857,186 +3605,6 @@ def fake_http_error_for_code(status_code: int):
)
-@pytest.mark.parametrize(
- "md_type,expected_url",
- [
- (
- dsaz.MetadataType.ALL,
- "http://169.254.169.254/metadata/instance?"
- "api-version=2021-08-01&extended=true",
- ),
- (
- dsaz.MetadataType.NETWORK,
- "http://169.254.169.254/metadata/instance/network?"
- "api-version=2021-08-01",
- ),
- (
- dsaz.MetadataType.REPROVISION_DATA,
- "http://169.254.169.254/metadata/reprovisiondata?"
- "api-version=2021-08-01",
- ),
- ],
-)
-class TestIMDS:
- def test_basic_scenarios(
- self, azure_ds, caplog, mock_readurl, md_type, expected_url
- ):
- fake_md = {"foo": {"bar": []}}
- mock_readurl.side_effect = [
- mock.MagicMock(contents=json.dumps(fake_md).encode()),
- ]
-
- md = azure_ds.get_imds_data_with_api_fallback(
- retries=5,
- md_type=md_type,
- )
-
- assert md == fake_md
- assert mock_readurl.mock_calls == [
- mock.call(
- expected_url,
- timeout=2,
- headers={"Metadata": "true"},
- retries=5,
- exception_cb=dsaz.imds_readurl_exception_callback,
- infinite=False,
- ),
- ]
-
- warnings = [
- x.message for x in caplog.records if x.levelno == logging.WARNING
- ]
- assert warnings == []
-
- @pytest.mark.parametrize(
- "error",
- [
- fake_http_error_for_code(404),
- fake_http_error_for_code(410),
- fake_http_error_for_code(429),
- fake_http_error_for_code(500),
- requests.Timeout("Fake connection timeout"),
- ],
- )
- def test_will_retry_errors(
- self,
- azure_ds,
- caplog,
- md_type,
- expected_url,
- mock_requests_session_request,
- mock_url_helper_time_sleep,
- error,
- ):
- fake_md = {"foo": {"bar": []}}
- mock_requests_session_request.side_effect = [
- error,
- mock.Mock(content=json.dumps(fake_md)),
- ]
-
- md = azure_ds.get_imds_data_with_api_fallback(
- retries=5,
- md_type=md_type,
- )
-
- assert md == fake_md
- assert len(mock_requests_session_request.mock_calls) == 2
- assert mock_url_helper_time_sleep.mock_calls == [mock.call(1)]
-
- warnings = [
- x.message for x in caplog.records if x.levelno == logging.WARNING
- ]
- assert warnings == []
-
- @pytest.mark.parametrize("retries", [0, 1, 5, 10])
- @pytest.mark.parametrize(
- "error",
- [
- fake_http_error_for_code(404),
- fake_http_error_for_code(410),
- fake_http_error_for_code(429),
- fake_http_error_for_code(500),
- requests.Timeout("Fake connection timeout"),
- ],
- )
- def test_retry_until_failure(
- self,
- azure_ds,
- caplog,
- md_type,
- expected_url,
- mock_requests_session_request,
- mock_url_helper_time_sleep,
- error,
- retries,
- ):
- mock_requests_session_request.side_effect = [error] * (retries + 1)
-
- assert (
- azure_ds.get_imds_data_with_api_fallback(
- retries=retries,
- md_type=md_type,
- )
- == {}
- )
-
- assert len(mock_requests_session_request.mock_calls) == (retries + 1)
- assert (
- mock_url_helper_time_sleep.mock_calls == [mock.call(1)] * retries
- )
-
- warnings = [
- x.message for x in caplog.records if x.levelno == logging.WARNING
- ]
- assert warnings == [
- "Ignoring IMDS instance metadata. "
- "Get metadata from IMDS failed: %s" % error
- ]
-
- @pytest.mark.parametrize(
- "error",
- [
- fake_http_error_for_code(403),
- fake_http_error_for_code(501),
- requests.ConnectionError("Fake Network Unreachable"),
- ],
- )
- def test_will_not_retry_errors(
- self,
- azure_ds,
- caplog,
- md_type,
- expected_url,
- mock_requests_session_request,
- mock_url_helper_time_sleep,
- error,
- ):
- fake_md = {"foo": {"bar": []}}
- mock_requests_session_request.side_effect = [
- error,
- mock.Mock(content=json.dumps(fake_md)),
- ]
-
- assert (
- azure_ds.get_imds_data_with_api_fallback(
- retries=5,
- md_type=md_type,
- )
- == {}
- )
-
- assert len(mock_requests_session_request.mock_calls) == 1
- assert mock_url_helper_time_sleep.mock_calls == []
-
- warnings = [
- x.message for x in caplog.records if x.levelno == logging.WARNING
- ]
- assert warnings == [
- "Ignoring IMDS instance metadata. "
- "Get metadata from IMDS failed: %s" % error
- ]
-
-
class TestInstanceId:
def test_metadata(self, azure_ds, mock_dmi_read_dmi_data):
azure_ds.metadata = {"instance-id": "test-id"}
@@ -4141,8 +3709,9 @@ class TestProvisioning:
timeout=2,
headers={"Metadata": "true"},
retries=10,
- exception_cb=dsaz.imds_readurl_exception_callback,
+ exception_cb=imds._readurl_exception_callback,
infinite=False,
+ log_req_resp=True,
),
]
@@ -4200,29 +3769,31 @@ class TestProvisioning:
mock.call(
"http://169.254.169.254/metadata/instance?"
"api-version=2021-08-01&extended=true",
- timeout=2,
+ exception_cb=imds._readurl_exception_callback,
headers={"Metadata": "true"},
- retries=10,
- exception_cb=dsaz.imds_readurl_exception_callback,
infinite=False,
+ log_req_resp=True,
+ retries=10,
+ timeout=2,
),
mock.call(
"http://169.254.169.254/metadata/reprovisiondata?"
"api-version=2019-06-01",
- timeout=2,
- headers={"Metadata": "true"},
exception_cb=mock.ANY,
- infinite=True,
+ headers={"Metadata": "true"},
log_req_resp=False,
+ infinite=True,
+ timeout=2,
),
mock.call(
"http://169.254.169.254/metadata/instance?"
"api-version=2021-08-01&extended=true",
- timeout=2,
+ exception_cb=imds._readurl_exception_callback,
headers={"Metadata": "true"},
- retries=10,
- exception_cb=dsaz.imds_readurl_exception_callback,
infinite=False,
+ log_req_resp=True,
+ retries=10,
+ timeout=2,
),
]
@@ -4303,38 +3874,41 @@ class TestProvisioning:
mock.call(
"http://169.254.169.254/metadata/instance?"
"api-version=2021-08-01&extended=true",
- timeout=2,
+ exception_cb=imds._readurl_exception_callback,
headers={"Metadata": "true"},
- retries=10,
- exception_cb=dsaz.imds_readurl_exception_callback,
infinite=False,
+ log_req_resp=True,
+ retries=10,
+ timeout=2,
),
mock.call(
- "http://169.254.169.254/metadata/instance/network?"
- "api-version=2021-08-01",
- timeout=2,
+ "http://169.254.169.254/metadata/instance?"
+ "api-version=2021-08-01&extended=true",
+ exception_cb=imds._readurl_exception_callback,
headers={"Metadata": "true"},
- retries=0,
- exception_cb=mock.ANY,
- infinite=True,
+ infinite=False,
+ log_req_resp=True,
+ retries=10,
+ timeout=2,
),
mock.call(
"http://169.254.169.254/metadata/reprovisiondata?"
"api-version=2019-06-01",
- timeout=2,
- headers={"Metadata": "true"},
exception_cb=mock.ANY,
- infinite=True,
+ headers={"Metadata": "true"},
log_req_resp=False,
+ infinite=True,
+ timeout=2,
),
mock.call(
"http://169.254.169.254/metadata/instance?"
"api-version=2021-08-01&extended=true",
- timeout=2,
+ exception_cb=imds._readurl_exception_callback,
headers={"Metadata": "true"},
- retries=10,
- exception_cb=dsaz.imds_readurl_exception_callback,
infinite=False,
+ log_req_resp=True,
+ retries=10,
+ timeout=2,
),
]
@@ -4451,38 +4025,41 @@ class TestProvisioning:
mock.call(
"http://169.254.169.254/metadata/instance?"
"api-version=2021-08-01&extended=true",
- timeout=2,
+ exception_cb=imds._readurl_exception_callback,
headers={"Metadata": "true"},
- retries=10,
- exception_cb=dsaz.imds_readurl_exception_callback,
infinite=False,
+ log_req_resp=True,
+ retries=10,
+ timeout=2,
),
mock.call(
- "http://169.254.169.254/metadata/instance/network?"
- "api-version=2021-08-01",
- timeout=2,
+ "http://169.254.169.254/metadata/instance?"
+ "api-version=2021-08-01&extended=true",
+ exception_cb=imds._readurl_exception_callback,
headers={"Metadata": "true"},
- retries=0,
- exception_cb=mock.ANY,
- infinite=True,
+ infinite=False,
+ log_req_resp=True,
+ retries=10,
+ timeout=2,
),
mock.call(
"http://169.254.169.254/metadata/reprovisiondata?"
"api-version=2019-06-01",
- timeout=2,
- headers={"Metadata": "true"},
exception_cb=mock.ANY,
+ headers={"Metadata": "true"},
infinite=True,
log_req_resp=False,
+ timeout=2,
),
mock.call(
"http://169.254.169.254/metadata/instance?"
"api-version=2021-08-01&extended=true",
- timeout=2,
+ exception_cb=imds._readurl_exception_callback,
headers={"Metadata": "true"},
- retries=10,
- exception_cb=dsaz.imds_readurl_exception_callback,
infinite=False,
+ log_req_resp=True,
+ retries=10,
+ timeout=2,
),
]
@@ -4557,29 +4134,31 @@ class TestProvisioning:
mock.call(
"http://169.254.169.254/metadata/instance?"
"api-version=2021-08-01&extended=true",
- timeout=2,
+ exception_cb=imds._readurl_exception_callback,
headers={"Metadata": "true"},
- retries=10,
- exception_cb=dsaz.imds_readurl_exception_callback,
infinite=False,
+ log_req_resp=True,
+ retries=10,
+ timeout=2,
),
mock.call(
"http://169.254.169.254/metadata/reprovisiondata?"
"api-version=2019-06-01",
- timeout=2,
- headers={"Metadata": "true"},
exception_cb=mock.ANY,
+ headers={"Metadata": "true"},
infinite=True,
log_req_resp=False,
+ timeout=2,
),
mock.call(
"http://169.254.169.254/metadata/instance?"
"api-version=2021-08-01&extended=true",
- timeout=2,
+ exception_cb=imds._readurl_exception_callback,
headers={"Metadata": "true"},
- retries=10,
- exception_cb=dsaz.imds_readurl_exception_callback,
infinite=False,
+ log_req_resp=True,
+ retries=10,
+ timeout=2,
),
]
@@ -4636,12 +4215,13 @@ class TestProvisioning:
mock.call(
"http://169.254.169.254/metadata/instance?"
"api-version=2021-08-01&extended=true",
- timeout=2,
+ exception_cb=imds._readurl_exception_callback,
headers={"Metadata": "true"},
- retries=10,
- exception_cb=dsaz.imds_readurl_exception_callback,
infinite=False,
- )
+ log_req_resp=True,
+ retries=10,
+ timeout=2,
+ ),
]
assert self.mock_subp_subp.mock_calls == []
diff --git a/tests/unittests/sources/test_azure_helper.py b/tests/unittests/sources/test_azure_helper.py
index 0a41fedf..38a57b99 100644
--- a/tests/unittests/sources/test_azure_helper.py
+++ b/tests/unittests/sources/test_azure_helper.py
@@ -75,6 +75,23 @@ HEALTH_REPORT_XML_TEMPLATE = """\
</Health>
"""
+
+def get_formatted_health_report_xml_bytes(
+ container_id: str,
+ incarnation: int,
+ instance_id: str,
+ health_status: str,
+ health_detail_subsection: str,
+) -> bytes:
+ return HEALTH_REPORT_XML_TEMPLATE.format(
+ container_id=container_id,
+ incarnation=incarnation,
+ instance_id=instance_id,
+ health_status=health_status,
+ health_detail_subsection=health_detail_subsection,
+ ).encode("utf-8")
+
+
HEALTH_DETAIL_SUBSECTION_XML_TEMPLATE = dedent(
"""\
<Details>
@@ -626,14 +643,11 @@ class TestGoalStateHealthReporter(CiTestCase):
return element.text
return None
- def _get_formatted_health_report_xml_string(self, **kwargs):
- return HEALTH_REPORT_XML_TEMPLATE.format(**kwargs)
-
def _get_formatted_health_detail_subsection_xml_string(self, **kwargs):
return HEALTH_DETAIL_SUBSECTION_XML_TEMPLATE.format(**kwargs)
def _get_report_ready_health_document(self):
- return self._get_formatted_health_report_xml_string(
+ return get_formatted_health_report_xml_bytes(
incarnation=escape(str(self.default_parameters["incarnation"])),
container_id=escape(self.default_parameters["container_id"]),
instance_id=escape(self.default_parameters["instance_id"]),
@@ -651,7 +665,7 @@ class TestGoalStateHealthReporter(CiTestCase):
)
)
- return self._get_formatted_health_report_xml_string(
+ return get_formatted_health_report_xml_bytes(
incarnation=escape(str(self.default_parameters["incarnation"])),
container_id=escape(self.default_parameters["container_id"]),
instance_id=escape(self.default_parameters["instance_id"]),
@@ -887,7 +901,7 @@ class TestGoalStateHealthReporter(CiTestCase):
health_description=escape(health_description),
)
)
- health_document = self._get_formatted_health_report_xml_string(
+ health_document = get_formatted_health_report_xml_bytes(
incarnation=escape(incarnation),
container_id=escape(container_id),
instance_id=escape(instance_id),
@@ -1132,9 +1146,9 @@ class TestWALinuxAgentShim(CiTestCase):
posted_document = (
self.AzureEndpointHttpClient.return_value.post.call_args[1]["data"]
)
- self.assertIn(self.test_incarnation, posted_document)
- self.assertIn(self.test_container_id, posted_document)
- self.assertIn(self.test_instance_id, posted_document)
+ self.assertIn(self.test_incarnation.encode("utf-8"), posted_document)
+ self.assertIn(self.test_container_id.encode("utf-8"), posted_document)
+ self.assertIn(self.test_instance_id.encode("utf-8"), posted_document)
def test_goal_state_values_used_for_report_failure(self):
shim = wa_shim(endpoint="test_endpoint")
@@ -1142,14 +1156,14 @@ class TestWALinuxAgentShim(CiTestCase):
posted_document = (
self.AzureEndpointHttpClient.return_value.post.call_args[1]["data"]
)
- self.assertIn(self.test_incarnation, posted_document)
- self.assertIn(self.test_container_id, posted_document)
- self.assertIn(self.test_instance_id, posted_document)
+ self.assertIn(self.test_incarnation.encode("utf-8"), posted_document)
+ self.assertIn(self.test_container_id.encode("utf-8"), posted_document)
+ self.assertIn(self.test_instance_id.encode("utf-8"), posted_document)
def test_xml_elems_in_report_ready_post(self):
shim = wa_shim(endpoint="test_endpoint")
shim.register_with_azure_and_fetch_data()
- health_document = HEALTH_REPORT_XML_TEMPLATE.format(
+ health_document = get_formatted_health_report_xml_bytes(
incarnation=escape(self.test_incarnation),
container_id=escape(self.test_container_id),
instance_id=escape(self.test_instance_id),
@@ -1164,7 +1178,7 @@ class TestWALinuxAgentShim(CiTestCase):
def test_xml_elems_in_report_failure_post(self):
shim = wa_shim(endpoint="test_endpoint")
shim.register_with_azure_and_report_failure(description="TestDesc")
- health_document = HEALTH_REPORT_XML_TEMPLATE.format(
+ health_document = get_formatted_health_report_xml_bytes(
incarnation=escape(self.test_incarnation),
container_id=escape(self.test_container_id),
instance_id=escape(self.test_instance_id),
@@ -1382,35 +1396,11 @@ class TestGetMetadataGoalStateXMLAndReportFailureToFabric(CiTestCase):
)
self.assertEqual(1, self.m_shim.return_value.clean_up.call_count)
- def test_report_failure_to_fabric_with_desc_calls_shim_report_failure(
- self,
- ):
- azure_helper.report_failure_to_fabric(
- endpoint="test_endpoint", description="TestDesc"
- )
- self.m_shim.return_value.register_with_azure_and_report_failure.assert_called_once_with( # noqa: E501
- description="TestDesc"
- )
-
- def test_report_failure_to_fabric_with_no_desc_calls_shim_report_failure(
+ def test_report_failure_to_fabric_calls_shim_report_failure(
self,
):
azure_helper.report_failure_to_fabric(endpoint="test_endpoint")
# default err message description should be shown to the user
- # if no description is passed in
- self.m_shim.return_value.register_with_azure_and_report_failure.assert_called_once_with( # noqa: E501
- description=(
- azure_helper.DEFAULT_REPORT_FAILURE_USER_VISIBLE_MESSAGE
- )
- )
-
- def test_report_failure_to_fabric_empty_desc_calls_shim_report_failure(
- self,
- ):
- azure_helper.report_failure_to_fabric(
- endpoint="test_endpoint", description=""
- )
- # default err message description should be shown to the user
# if an empty description is passed in
self.m_shim.return_value.register_with_azure_and_report_failure.assert_called_once_with( # noqa: E501
description=(
diff --git a/tests/unittests/sources/test_ec2.py b/tests/unittests/sources/test_ec2.py
index 4c832da7..3fe525e3 100644
--- a/tests/unittests/sources/test_ec2.py
+++ b/tests/unittests/sources/test_ec2.py
@@ -5,6 +5,7 @@ import json
import threading
from unittest import mock
+import pytest
import requests
import responses
@@ -223,6 +224,12 @@ TAGS_METADATA_2021_03_23: dict = {
}
+@pytest.fixture(autouse=True)
+def disable_is_resolvable():
+ with mock.patch("cloudinit.sources.DataSourceEc2.util.is_resolvable"):
+ yield
+
+
def _register_ssh_keys(rfunc, base_url, keys_data):
"""handle ssh key inconsistencies.
@@ -303,7 +310,7 @@ def register_mock_metaserver(base_url, data, responses_mock=None):
def myreg(*argc, **kwargs):
url, body = argc
- method = responses.PUT if ec2.API_TOKEN_ROUTE in url else responses.GET
+ method = responses.PUT if "latest/api/token" in url else responses.GET
status = kwargs.get("status", 200)
return responses_mock.add(method, url, body, status=status)
@@ -1180,6 +1187,16 @@ class TesIdentifyPlatform(test_helpers.CiTestCase):
return unspecial
@mock.patch("cloudinit.sources.DataSourceEc2._collect_platform_data")
+ def test_identify_aliyun(self, m_collect):
+ """aliyun should be identified if product name equals to
+ Alibaba Cloud ECS
+ """
+ m_collect.return_value = self.collmock(
+ product_name="Alibaba Cloud ECS"
+ )
+ self.assertEqual(ec2.CloudNames.ALIYUN, ec2.identify_platform())
+
+ @mock.patch("cloudinit.sources.DataSourceEc2._collect_platform_data")
def test_identify_zstack(self, m_collect):
"""zstack should be identified if chassis-asset-tag
ends in .zstack.io
diff --git a/tests/unittests/sources/test_init.py b/tests/unittests/sources/test_init.py
index a81c33a2..0447e02c 100644
--- a/tests/unittests/sources/test_init.py
+++ b/tests/unittests/sources/test_init.py
@@ -716,9 +716,13 @@ class TestDataSource(CiTestCase):
"cloudinit.sources.canonical_cloud_id", return_value="my-cloud"
):
datasource.get_data()
- self.assertEqual("my-cloud\n", util.load_file(cloud_id_link))
- # A symlink with the generic /run/cloud-init/cloud-id link is present
- self.assertTrue(util.is_link(cloud_id_link))
+ self.assertEqual("my-cloud\n", util.load_file(cloud_id_link))
+ # A symlink with the generic /run/cloud-init/cloud-id
+ # link is present
+ self.assertTrue(util.is_link(cloud_id_link))
+ datasource.persist_instance_data()
+ # cloud-id<cloud-type> not deleted: no cloud-id change
+ self.assertTrue(os.path.exists(cloud_id_file))
# When cloud-id changes, symlink and content change
with mock.patch(
"cloudinit.sources.canonical_cloud_id", return_value="my-cloud2"
diff --git a/tests/unittests/sources/test_lxd.py b/tests/unittests/sources/test_lxd.py
index b02ed177..efc24883 100644
--- a/tests/unittests/sources/test_lxd.py
+++ b/tests/unittests/sources/test_lxd.py
@@ -440,18 +440,22 @@ class TestReadMetadata:
"[GET] [HTTP:200] http://lxd/1.0/config",
],
),
- ( # Assert 404 on devices
+ ( # Assert 404 on devices logs about skipping
True,
{
"http://lxd/1.0/meta-data": "local-hostname: md\n",
"http://lxd/1.0/config": "[]",
+ # No devices URL response, so 404 raised
+ },
+ {
+ "_metadata_api_version": lxd.LXD_SOCKET_API_VERSION,
+ "config": {},
+ "meta-data": "local-hostname: md\n",
},
- InvalidMetaDataException(
- "Invalid HTTP response [404] from http://lxd/1.0/devices"
- ),
[
"[GET] [HTTP:200] http://lxd/1.0/meta-data",
"[GET] [HTTP:200] http://lxd/1.0/config",
+ "Skipping http://lxd/1.0/devices on [HTTP:404]",
],
),
( # Assert non-JSON format from devices
@@ -693,5 +697,46 @@ class TestReadMetadata:
== m_session_get.call_args_list
)
+ @mock.patch.object(lxd.requests.Session, "get")
+ @mock.patch.object(lxd.time, "sleep")
+ def test_socket_retry(self, m_session_get, m_sleep):
+ """validate socket retry logic"""
+
+ def generate_return_codes():
+ """
+ [200]
+ [500, 200]
+ [500, 500, 200]
+ [500, 500, ..., 200]
+ """
+ five_hundreds = []
+
+ # generate a couple of longer ones to assert timeout condition
+ for _ in range(33):
+ five_hundreds.append(500)
+ yield [*five_hundreds, 200]
+
+ for return_codes in generate_return_codes():
+ m = mock.Mock(
+ get=mock.Mock(
+ side_effect=[
+ mock.MagicMock(
+ ok=mock.PropertyMock(return_value=True),
+ status_code=code,
+ text=mock.PropertyMock(
+ return_value="properly formatted http response"
+ ),
+ )
+ for code in return_codes
+ ]
+ )
+ )
+ resp = lxd._do_request(m, "http://agua/")
-# vi: ts=4 expandtab
+ # assert that 30 iterations or the first 200 code is the final
+ # attempt, whichever comes first
+ assert min(len(return_codes), 30) == m.get.call_count
+ if len(return_codes) < 31:
+ assert 200 == resp.status_code
+ else:
+ assert 500 == resp.status_code
diff --git a/tests/unittests/sources/test_opennebula.py b/tests/unittests/sources/test_opennebula.py
index af1c45b8..0fc332a9 100644
--- a/tests/unittests/sources/test_opennebula.py
+++ b/tests/unittests/sources/test_opennebula.py
@@ -121,7 +121,9 @@ class TestOpenNebulaDataSource(CiTestCase):
util.find_devs_with = lambda n: [] # type: ignore
populate_context_dir(self.seed_dir, {"KEY1": "val1"})
dsrc = self.ds(sys_cfg=self.sys_cfg, distro=None, paths=self.paths)
- ret = dsrc.get_data()
+ with mock.patch(DS_PATH + ".pwd.getpwnam") as getpwnam:
+ ret = dsrc.get_data()
+ self.assertEqual([mock.call("nobody")], getpwnam.call_args_list)
self.assertTrue(ret)
finally:
util.find_devs_with = orig_find_devs_with
diff --git a/tests/unittests/sources/test_openstack.py b/tests/unittests/sources/test_openstack.py
index 8bcecae7..02516772 100644
--- a/tests/unittests/sources/test_openstack.py
+++ b/tests/unittests/sources/test_openstack.py
@@ -10,9 +10,11 @@ import re
from io import StringIO
from urllib.parse import urlparse
+import pytest
import responses
from cloudinit import helpers, settings, util
+from cloudinit.distros import Distro
from cloudinit.sources import UNSET, BrokenMetadata
from cloudinit.sources import DataSourceOpenStack as ds
from cloudinit.sources import convert_vendordata
@@ -76,6 +78,12 @@ EC2_VERSIONS = [
MOCK_PATH = "cloudinit.sources.DataSourceOpenStack."
+@pytest.fixture(autouse=True)
+def mock_is_resolvable():
+ with mock.patch(f"{MOCK_PATH}util.is_resolvable"):
+ yield
+
+
# TODO _register_uris should leverage test_ec2.register_mock_metaserver.
def _register_uris(version, ec2_files, ec2_meta, os_files, *, responses_mock):
"""Registers a set of url patterns into responses that will mimic the
@@ -292,15 +300,13 @@ class TestOpenStackDataSource(test_helpers.ResponsesTestCase):
OS_FILES,
responses_mock=self.responses,
)
+ distro = mock.MagicMock(spec=Distro)
+ distro.is_virtual = False
ds_os = ds.DataSourceOpenStack(
- settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": self.tmp})
+ settings.CFG_BUILTIN, distro, helpers.Paths({"run_dir": self.tmp})
)
self.assertIsNone(ds_os.version)
- mock_path = MOCK_PATH + "detect_openstack"
- with test_helpers.mock.patch(mock_path) as m_detect_os:
- m_detect_os.return_value = True
- found = ds_os.get_data()
- self.assertTrue(found)
+ self.assertTrue(ds_os.get_data())
self.assertEqual(2, ds_os.version)
md = dict(ds_os.metadata)
md.pop("instance-id", None)
@@ -344,8 +350,9 @@ class TestOpenStackDataSource(test_helpers.ResponsesTestCase):
]
self.assertIsNone(ds_os_local.version)
- mock_path = MOCK_PATH + "detect_openstack"
- with test_helpers.mock.patch(mock_path) as m_detect_os:
+ with test_helpers.mock.patch.object(
+ ds_os_local, "detect_openstack"
+ ) as m_detect_os:
m_detect_os.return_value = True
found = ds_os_local.get_data()
self.assertTrue(found)
@@ -370,12 +377,15 @@ class TestOpenStackDataSource(test_helpers.ResponsesTestCase):
_register_uris(
self.VERSION, {}, {}, os_files, responses_mock=self.responses
)
+ distro = mock.MagicMock(spec=Distro)
+ distro.is_virtual = True
ds_os = ds.DataSourceOpenStack(
- settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": self.tmp})
+ settings.CFG_BUILTIN, distro, helpers.Paths({"run_dir": self.tmp})
)
self.assertIsNone(ds_os.version)
- mock_path = MOCK_PATH + "detect_openstack"
- with test_helpers.mock.patch(mock_path) as m_detect_os:
+ with test_helpers.mock.patch.object(
+ ds_os, "detect_openstack"
+ ) as m_detect_os:
m_detect_os.return_value = True
found = ds_os.get_data()
self.assertFalse(found)
@@ -394,19 +404,17 @@ class TestOpenStackDataSource(test_helpers.ResponsesTestCase):
_register_uris(
self.VERSION, {}, {}, os_files, responses_mock=self.responses
)
+ distro = mock.MagicMock(spec=Distro)
+ distro.is_virtual = True
ds_os = ds.DataSourceOpenStack(
- settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": self.tmp})
+ settings.CFG_BUILTIN, distro, helpers.Paths({"run_dir": self.tmp})
)
ds_os.ds_cfg = {
"max_wait": 0,
"timeout": 0,
}
self.assertIsNone(ds_os.version)
- mock_path = MOCK_PATH + "detect_openstack"
- with test_helpers.mock.patch(mock_path) as m_detect_os:
- m_detect_os.return_value = True
- found = ds_os.get_data()
- self.assertFalse(found)
+ self.assertFalse(ds_os.get_data())
self.assertIsNone(ds_os.version)
def test_network_config_disabled_by_datasource_config(self):
@@ -471,16 +479,19 @@ class TestOpenStackDataSource(test_helpers.ResponsesTestCase):
_register_uris(
self.VERSION, {}, {}, os_files, responses_mock=self.responses
)
+ distro = mock.MagicMock(spec=Distro)
+ distro.is_virtual = True
ds_os = ds.DataSourceOpenStack(
- settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": self.tmp})
+ settings.CFG_BUILTIN, distro, helpers.Paths({"run_dir": self.tmp})
)
ds_os.ds_cfg = {
"max_wait": 0,
"timeout": 0,
}
self.assertIsNone(ds_os.version)
- mock_path = MOCK_PATH + "detect_openstack"
- with test_helpers.mock.patch(mock_path) as m_detect_os:
+ with test_helpers.mock.patch.object(
+ ds_os, "detect_openstack"
+ ) as m_detect_os:
m_detect_os.return_value = True
found = ds_os.get_data()
self.assertFalse(found)
@@ -568,13 +579,58 @@ class TestVendorDataLoading(test_helpers.TestCase):
@test_helpers.mock.patch(MOCK_PATH + "util.is_x86")
class TestDetectOpenStack(test_helpers.CiTestCase):
+ def setUp(self):
+ self.tmp = self.tmp_dir()
+
+ def _fake_ds(self) -> ds.DataSourceOpenStack:
+ distro = mock.MagicMock(spec=Distro)
+ distro.is_virtual = True
+ return ds.DataSourceOpenStack(
+ settings.CFG_BUILTIN, distro, helpers.Paths({"run_dir": self.tmp})
+ )
+
def test_detect_openstack_non_intel_x86(self, m_is_x86):
"""Return True on non-intel platforms because dmi isn't conclusive."""
m_is_x86.return_value = False
self.assertTrue(
- ds.detect_openstack(), "Expected detect_openstack == True"
+ self._fake_ds().detect_openstack(),
+ "Expected detect_openstack == True",
+ )
+
+ def test_detect_openstack_bare_metal(self, m_is_x86):
+ """Return True if the distro is non-virtual."""
+ m_is_x86.return_value = True
+
+ distro = mock.MagicMock(spec=Distro)
+ distro.is_virtual = False
+
+ fake_ds = self._fake_ds()
+ fake_ds.distro = distro
+
+ self.assertFalse(
+ fake_ds.distro.is_virtual,
+ "Expected distro.is_virtual == False",
)
+ with test_helpers.mock.patch.object(
+ fake_ds, "wait_for_metadata_service"
+ ) as m_wait_for_metadata_service:
+ m_wait_for_metadata_service.return_value = True
+
+ self.assertTrue(
+ fake_ds.wait_for_metadata_service(),
+ "Expected wait_for_metadata_service == True",
+ )
+
+ self.assertTrue(
+ fake_ds.detect_openstack(), "Expected detect_openstack == True"
+ )
+
+ self.assertTrue(
+ m_wait_for_metadata_service.called,
+ "Expected wait_for_metadata_service to be called",
+ )
+
@test_helpers.mock.patch(MOCK_PATH + "util.get_proc_env")
@test_helpers.mock.patch(MOCK_PATH + "dmi.read_dmi_data")
def test_not_detect_openstack_intel_x86_ec2(
@@ -594,7 +650,8 @@ class TestDetectOpenStack(test_helpers.CiTestCase):
m_dmi.side_effect = fake_dmi_read
self.assertFalse(
- ds.detect_openstack(), "Expected detect_openstack == False on EC2"
+ self._fake_ds().detect_openstack(),
+ "Expected detect_openstack == False on EC2",
)
m_proc_env.assert_called_with(1)
@@ -609,7 +666,8 @@ class TestDetectOpenStack(test_helpers.CiTestCase):
for product_name in openstack_product_names:
m_dmi.return_value = product_name
self.assertTrue(
- ds.detect_openstack(), "Failed to detect_openstack"
+ self._fake_ds().detect_openstack(),
+ "Failed to detect_openstack",
)
@test_helpers.mock.patch(MOCK_PATH + "dmi.read_dmi_data")
@@ -628,7 +686,7 @@ class TestDetectOpenStack(test_helpers.CiTestCase):
m_dmi.side_effect = fake_dmi_read
self.assertTrue(
- ds.detect_openstack(),
+ self._fake_ds().detect_openstack(),
"Expected detect_openstack == True on OpenTelekomCloud",
)
@@ -648,7 +706,7 @@ class TestDetectOpenStack(test_helpers.CiTestCase):
m_dmi.side_effect = fake_dmi_read
self.assertTrue(
- ds.detect_openstack(),
+ self._fake_ds().detect_openstack(),
"Expected detect_openstack == True on SAP CCloud VM",
)
@@ -668,7 +726,7 @@ class TestDetectOpenStack(test_helpers.CiTestCase):
m_dmi.side_effect = fake_asset_tag_dmi_read
self.assertTrue(
- ds.detect_openstack(),
+ self._fake_ds().detect_openstack(),
"Expected detect_openstack == True on Huawei Cloud VM",
)
@@ -688,11 +746,11 @@ class TestDetectOpenStack(test_helpers.CiTestCase):
m_dmi.side_effect = fake_dmi_read
self.assertTrue(
- ds.detect_openstack(accept_oracle=True),
+ self._fake_ds().detect_openstack(accept_oracle=True),
"Expected detect_openstack == True on OracleCloud.com",
)
self.assertFalse(
- ds.detect_openstack(accept_oracle=False),
+ self._fake_ds().detect_openstack(accept_oracle=False),
"Expected detect_openstack == False.",
)
@@ -711,7 +769,7 @@ class TestDetectOpenStack(test_helpers.CiTestCase):
m_dmi.side_effect = fake_dmi_read
self.assertTrue(
- ds.detect_openstack(),
+ self._fake_ds().detect_openstack(),
"Expected detect_openstack == True on Generic OpenStack Platform",
)
@@ -749,7 +807,7 @@ class TestDetectOpenStack(test_helpers.CiTestCase):
m_dmi.side_effect = fake_dmi_read
self.assertTrue(
- ds.detect_openstack(),
+ self._fake_ds().detect_openstack(),
"Expected detect_openstack == True on OpenTelekomCloud",
)
m_proc_env.assert_called_with(1)
diff --git a/tests/unittests/sources/test_ovf.py b/tests/unittests/sources/test_ovf.py
index 1fbd564f..109d8889 100644
--- a/tests/unittests/sources/test_ovf.py
+++ b/tests/unittests/sources/test_ovf.py
@@ -5,19 +5,13 @@
# This file is part of cloud-init. See LICENSE file for license information.
import base64
-import os
from collections import OrderedDict
from textwrap import dedent
from cloudinit import subp, util
from cloudinit.helpers import Paths
-from cloudinit.safeyaml import YAMLError
from cloudinit.sources import DataSourceOVF as dsovf
-from cloudinit.sources.DataSourceOVF import GuestCustScriptDisabled
-from cloudinit.sources.helpers.vmware.imc.config_custom_script import (
- CustomScriptNotFound,
-)
-from tests.unittests.helpers import CiTestCase, mock, wrap_and_call
+from tests.unittests.helpers import CiTestCase, mock
MPATH = "cloudinit.sources.DataSourceOVF."
@@ -203,34 +197,6 @@ class TestReadOvfEnv(CiTestCase):
self.assertIsNone(ud)
-class TestMarkerFiles(CiTestCase):
- def setUp(self):
- super(TestMarkerFiles, self).setUp()
- self.tdir = self.tmp_dir()
-
- def test_false_when_markerid_none(self):
- """Return False when markerid provided is None."""
- self.assertFalse(
- dsovf.check_marker_exists(markerid=None, marker_dir=self.tdir)
- )
-
- def test_markerid_file_exist(self):
- """Return False when markerid file path does not exist,
- True otherwise."""
- self.assertFalse(dsovf.check_marker_exists("123", self.tdir))
-
- marker_file = self.tmp_path(".markerfile-123.txt", self.tdir)
- util.write_file(marker_file, "")
- self.assertTrue(dsovf.check_marker_exists("123", self.tdir))
-
- def test_marker_file_setup(self):
- """Test creation of marker files."""
- markerfilepath = self.tmp_path(".markerfile-hi.txt", self.tdir)
- self.assertFalse(os.path.exists(markerfilepath))
- dsovf.setup_marker_files(markerid="hi", marker_dir=self.tdir)
- self.assertTrue(os.path.exists(markerfilepath))
-
-
class TestDatasourceOVF(CiTestCase):
with_logs = True
@@ -240,334 +206,8 @@ class TestDatasourceOVF(CiTestCase):
self.datasource = dsovf.DataSourceOVF
self.tdir = self.tmp_dir()
- def test_get_data_false_on_none_dmi_data(self):
- """When dmi for system-product-name is None, get_data returns False."""
- paths = Paths({"cloud_dir": self.tdir})
- ds = self.datasource(sys_cfg={}, distro={}, paths=paths)
- retcode = wrap_and_call(
- "cloudinit.sources.DataSourceOVF",
- {
- "dmi.read_dmi_data": None,
- "transport_iso9660": NOT_FOUND,
- "transport_vmware_guestinfo": NOT_FOUND,
- },
- ds.get_data,
- )
- self.assertFalse(retcode, "Expected False return from ds.get_data")
- self.assertIn(
- "DEBUG: No system-product-name found", self.logs.getvalue()
- )
-
- def test_get_data_vmware_customization_disabled(self):
- """When vmware customization is disabled via sys_cfg and
- allow_raw_data is disabled via ds_cfg, log a message.
- """
- paths = Paths({"cloud_dir": self.tdir})
- ds = self.datasource(
- sys_cfg={
- "disable_vmware_customization": True,
- "datasource": {"OVF": {"allow_raw_data": False}},
- },
- distro={},
- paths=paths,
- )
- conf_file = self.tmp_path("test-cust", self.tdir)
- conf_content = dedent(
- """\
- [MISC]
- MARKER-ID = 12345345
- """
- )
- util.write_file(conf_file, conf_content)
- retcode = wrap_and_call(
- "cloudinit.sources.DataSourceOVF",
- {
- "dmi.read_dmi_data": "vmware",
- "transport_iso9660": NOT_FOUND,
- "transport_vmware_guestinfo": NOT_FOUND,
- "util.del_dir": True,
- "search_file": self.tdir,
- "wait_for_imc_cfg_file": conf_file,
- },
- ds.get_data,
- )
- self.assertFalse(retcode, "Expected False return from ds.get_data")
- self.assertIn(
- "DEBUG: Customization for VMware platform is disabled.",
- self.logs.getvalue(),
- )
-
- def test_get_data_vmware_customization_sys_cfg_disabled(self):
- """When vmware customization is disabled via sys_cfg and
- no meta data is found, log a message.
- """
- paths = Paths({"cloud_dir": self.tdir})
- ds = self.datasource(
- sys_cfg={
- "disable_vmware_customization": True,
- "datasource": {"OVF": {"allow_raw_data": True}},
- },
- distro={},
- paths=paths,
- )
- conf_file = self.tmp_path("test-cust", self.tdir)
- conf_content = dedent(
- """\
- [MISC]
- MARKER-ID = 12345345
- """
- )
- util.write_file(conf_file, conf_content)
- retcode = wrap_and_call(
- "cloudinit.sources.DataSourceOVF",
- {
- "dmi.read_dmi_data": "vmware",
- "transport_iso9660": NOT_FOUND,
- "transport_vmware_guestinfo": NOT_FOUND,
- "util.del_dir": True,
- "search_file": self.tdir,
- "wait_for_imc_cfg_file": conf_file,
- },
- ds.get_data,
- )
- self.assertFalse(retcode, "Expected False return from ds.get_data")
- self.assertIn(
- "DEBUG: Customization using VMware config is disabled.",
- self.logs.getvalue(),
- )
-
- def test_get_data_allow_raw_data_disabled(self):
- """When allow_raw_data is disabled via ds_cfg and
- meta data is found, log a message.
- """
- paths = Paths({"cloud_dir": self.tdir})
- ds = self.datasource(
- sys_cfg={
- "disable_vmware_customization": False,
- "datasource": {"OVF": {"allow_raw_data": False}},
- },
- distro={},
- paths=paths,
- )
-
- # Prepare the conf file
- conf_file = self.tmp_path("test-cust", self.tdir)
- conf_content = dedent(
- """\
- [CLOUDINIT]
- METADATA = test-meta
- """
- )
- util.write_file(conf_file, conf_content)
- # Prepare the meta data file
- metadata_file = self.tmp_path("test-meta", self.tdir)
- util.write_file(metadata_file, "This is meta data")
- retcode = wrap_and_call(
- "cloudinit.sources.DataSourceOVF",
- {
- "dmi.read_dmi_data": "vmware",
- "transport_iso9660": NOT_FOUND,
- "transport_vmware_guestinfo": NOT_FOUND,
- "util.del_dir": True,
- "search_file": self.tdir,
- "wait_for_imc_cfg_file": conf_file,
- "collect_imc_file_paths": [self.tdir + "/test-meta", "", ""],
- },
- ds.get_data,
- )
- self.assertFalse(retcode, "Expected False return from ds.get_data")
- self.assertIn(
- "DEBUG: Customization using raw data is disabled.",
- self.logs.getvalue(),
- )
-
- def test_get_data_vmware_customization_enabled(self):
- """When cloud-init workflow for vmware is enabled via sys_cfg log a
- message.
- """
- paths = Paths({"cloud_dir": self.tdir})
- ds = self.datasource(
- sys_cfg={"disable_vmware_customization": False},
- distro={},
- paths=paths,
- )
- conf_file = self.tmp_path("test-cust", self.tdir)
- conf_content = dedent(
- """\
- [CUSTOM-SCRIPT]
- SCRIPT-NAME = test-script
- [MISC]
- MARKER-ID = 12345345
- """
- )
- util.write_file(conf_file, conf_content)
- with mock.patch(MPATH + "get_tools_config", return_value="true"):
- with self.assertRaises(CustomScriptNotFound) as context:
- wrap_and_call(
- "cloudinit.sources.DataSourceOVF",
- {
- "dmi.read_dmi_data": "vmware",
- "util.del_dir": True,
- "search_file": self.tdir,
- "wait_for_imc_cfg_file": conf_file,
- "get_nics_to_enable": "",
- },
- ds.get_data,
- )
- customscript = self.tmp_path("test-script", self.tdir)
- self.assertIn(
- "Script %s not found!!" % customscript, str(context.exception)
- )
-
- def test_get_data_cust_script_disabled(self):
- """If custom script is disabled by VMware tools configuration,
- raise a RuntimeError.
- """
- paths = Paths({"cloud_dir": self.tdir})
- ds = self.datasource(
- sys_cfg={"disable_vmware_customization": False},
- distro={},
- paths=paths,
- )
- # Prepare the conf file
- conf_file = self.tmp_path("test-cust", self.tdir)
- conf_content = dedent(
- """\
- [CUSTOM-SCRIPT]
- SCRIPT-NAME = test-script
- [MISC]
- MARKER-ID = 12345346
- """
- )
- util.write_file(conf_file, conf_content)
- # Prepare the custom sript
- customscript = self.tmp_path("test-script", self.tdir)
- util.write_file(customscript, "This is the post cust script")
-
- with mock.patch(MPATH + "get_tools_config", return_value="invalid"):
- with mock.patch(
- MPATH + "set_customization_status", return_value=("msg", b"")
- ):
- with self.assertRaises(GuestCustScriptDisabled) as context:
- wrap_and_call(
- "cloudinit.sources.DataSourceOVF",
- {
- "dmi.read_dmi_data": "vmware",
- "util.del_dir": True,
- "search_file": self.tdir,
- "wait_for_imc_cfg_file": conf_file,
- "get_nics_to_enable": "",
- },
- ds.get_data,
- )
- self.assertIn(
- "Custom script is disabled by VM Administrator",
- str(context.exception),
- )
-
- def test_get_data_cust_script_enabled(self):
- """If custom script is enabled by VMware tools configuration,
- execute the script.
- """
- paths = Paths({"cloud_dir": self.tdir})
- ds = self.datasource(
- sys_cfg={"disable_vmware_customization": False},
- distro={},
- paths=paths,
- )
- # Prepare the conf file
- conf_file = self.tmp_path("test-cust", self.tdir)
- conf_content = dedent(
- """\
- [CUSTOM-SCRIPT]
- SCRIPT-NAME = test-script
- [MISC]
- MARKER-ID = 12345346
- """
- )
- util.write_file(conf_file, conf_content)
-
- # Mock custom script is enabled by return true when calling
- # get_tools_config
- with mock.patch(MPATH + "get_tools_config", return_value="true"):
- with mock.patch(
- MPATH + "set_customization_status", return_value=("msg", b"")
- ):
- with self.assertRaises(CustomScriptNotFound) as context:
- wrap_and_call(
- "cloudinit.sources.DataSourceOVF",
- {
- "dmi.read_dmi_data": "vmware",
- "util.del_dir": True,
- "search_file": self.tdir,
- "wait_for_imc_cfg_file": conf_file,
- "get_nics_to_enable": "",
- },
- ds.get_data,
- )
- # Verify custom script is trying to be executed
- customscript = self.tmp_path("test-script", self.tdir)
- self.assertIn(
- "Script %s not found!!" % customscript, str(context.exception)
- )
-
- def test_get_data_force_run_post_script_is_yes(self):
- """If DEFAULT-RUN-POST-CUST-SCRIPT is yes, custom script could run if
- enable-custom-scripts is not defined in VM Tools configuration
- """
- paths = Paths({"cloud_dir": self.tdir})
- ds = self.datasource(
- sys_cfg={"disable_vmware_customization": False},
- distro={},
- paths=paths,
- )
- # Prepare the conf file
- conf_file = self.tmp_path("test-cust", self.tdir)
- # set DEFAULT-RUN-POST-CUST-SCRIPT = yes so that enable-custom-scripts
- # default value is TRUE
- conf_content = dedent(
- """\
- [CUSTOM-SCRIPT]
- SCRIPT-NAME = test-script
- [MISC]
- MARKER-ID = 12345346
- DEFAULT-RUN-POST-CUST-SCRIPT = yes
- """
- )
- util.write_file(conf_file, conf_content)
-
- # Mock get_tools_config(section, key, defaultVal) to return
- # defaultVal
- def my_get_tools_config(*args, **kwargs):
- return args[2]
-
- with mock.patch(
- MPATH + "get_tools_config", side_effect=my_get_tools_config
- ):
- with mock.patch(
- MPATH + "set_customization_status", return_value=("msg", b"")
- ):
- with self.assertRaises(CustomScriptNotFound) as context:
- wrap_and_call(
- "cloudinit.sources.DataSourceOVF",
- {
- "dmi.read_dmi_data": "vmware",
- "util.del_dir": True,
- "search_file": self.tdir,
- "wait_for_imc_cfg_file": conf_file,
- "get_nics_to_enable": "",
- },
- ds.get_data,
- )
- # Verify custom script still runs although it is
- # disabled by VMware Tools
- customscript = self.tmp_path("test-script", self.tdir)
- self.assertIn(
- "Script %s not found!!" % customscript, str(context.exception)
- )
-
- def test_get_data_non_vmware_seed_platform_info(self):
- """Platform info properly reports when on non-vmware platforms."""
+ def test_get_data_seed_dir(self):
+ """Platform info properly reports when getting data from seed dir."""
paths = Paths({"cloud_dir": self.tdir, "run_dir": self.tdir})
# Write ovf-env.xml seed file
seed_dir = self.tmp_path("seed", dir=self.tdir)
@@ -577,37 +217,14 @@ class TestDatasourceOVF(CiTestCase):
self.assertEqual("ovf", ds.cloud_name)
self.assertEqual("ovf", ds.platform_type)
- with mock.patch(MPATH + "dmi.read_dmi_data", return_value="!VMware"):
- with mock.patch(MPATH + "transport_vmware_guestinfo") as m_guestd:
- with mock.patch(MPATH + "transport_iso9660") as m_iso9660:
- m_iso9660.return_value = NOT_FOUND
- m_guestd.return_value = NOT_FOUND
- self.assertTrue(ds.get_data())
- self.assertEqual(
- "ovf (%s/seed/ovf-env.xml)" % self.tdir, ds.subplatform
- )
-
- def test_get_data_vmware_seed_platform_info(self):
- """Platform info properly reports when on VMware platform."""
- paths = Paths({"cloud_dir": self.tdir, "run_dir": self.tdir})
- # Write ovf-env.xml seed file
- seed_dir = self.tmp_path("seed", dir=self.tdir)
- ovf_env = self.tmp_path("ovf-env.xml", dir=seed_dir)
- util.write_file(ovf_env, OVF_ENV_CONTENT)
- ds = self.datasource(sys_cfg={}, distro={}, paths=paths)
-
- self.assertEqual("ovf", ds.cloud_name)
- self.assertEqual("ovf", ds.platform_type)
- with mock.patch(MPATH + "dmi.read_dmi_data", return_value="VMWare"):
- with mock.patch(MPATH + "transport_vmware_guestinfo") as m_guestd:
- with mock.patch(MPATH + "transport_iso9660") as m_iso9660:
- m_iso9660.return_value = NOT_FOUND
- m_guestd.return_value = NOT_FOUND
- self.assertTrue(ds.get_data())
- self.assertEqual(
- "vmware (%s/seed/ovf-env.xml)" % self.tdir,
- ds.subplatform,
- )
+ with mock.patch(MPATH + "transport_vmware_guestinfo") as m_guestd:
+ with mock.patch(MPATH + "transport_iso9660") as m_iso9660:
+ m_iso9660.return_value = NOT_FOUND
+ m_guestd.return_value = NOT_FOUND
+ self.assertTrue(ds.get_data())
+ self.assertEqual(
+ "ovf (%s/seed/ovf-env.xml)" % self.tdir, ds.subplatform
+ )
@mock.patch("cloudinit.subp.subp")
@mock.patch("cloudinit.sources.DataSource.persist_instance_data")
@@ -679,346 +296,6 @@ class TestDatasourceOVF(CiTestCase):
ds.network_config,
)
- def test_get_data_cloudinit_metadata_json(self):
- """Test metadata can be loaded to cloud-init metadata and network.
- The metadata format is json.
- """
- paths = Paths({"cloud_dir": self.tdir})
- ds = self.datasource(
- sys_cfg={"disable_vmware_customization": True},
- distro={},
- paths=paths,
- )
- # Prepare the conf file
- conf_file = self.tmp_path("test-cust", self.tdir)
- conf_content = dedent(
- """\
- [CLOUDINIT]
- METADATA = test-meta
- """
- )
- util.write_file(conf_file, conf_content)
- # Prepare the meta data file
- metadata_file = self.tmp_path("test-meta", self.tdir)
- metadata_content = dedent(
- """\
- {
- "instance-id": "cloud-vm",
- "local-hostname": "my-host.domain.com",
- "network": {
- "version": 2,
- "ethernets": {
- "eths": {
- "match": {
- "name": "ens*"
- },
- "dhcp4": true
- }
- }
- }
- }
- """
- )
- util.write_file(metadata_file, metadata_content)
-
- with mock.patch(
- MPATH + "set_customization_status", return_value=("msg", b"")
- ):
- result = wrap_and_call(
- "cloudinit.sources.DataSourceOVF",
- {
- "dmi.read_dmi_data": "vmware",
- "util.del_dir": True,
- "search_file": self.tdir,
- "wait_for_imc_cfg_file": conf_file,
- "collect_imc_file_paths": [
- self.tdir + "/test-meta",
- "",
- "",
- ],
- "get_nics_to_enable": "",
- },
- ds._get_data,
- )
-
- self.assertTrue(result)
- self.assertEqual("cloud-vm", ds.metadata["instance-id"])
- self.assertEqual("my-host.domain.com", ds.metadata["local-hostname"])
- self.assertEqual(2, ds.network_config["version"])
- self.assertTrue(ds.network_config["ethernets"]["eths"]["dhcp4"])
-
- def test_get_data_cloudinit_metadata_yaml(self):
- """Test metadata can be loaded to cloud-init metadata and network.
- The metadata format is yaml.
- """
- paths = Paths({"cloud_dir": self.tdir})
- ds = self.datasource(
- sys_cfg={"disable_vmware_customization": True},
- distro={},
- paths=paths,
- )
- # Prepare the conf file
- conf_file = self.tmp_path("test-cust", self.tdir)
- conf_content = dedent(
- """\
- [CLOUDINIT]
- METADATA = test-meta
- """
- )
- util.write_file(conf_file, conf_content)
- # Prepare the meta data file
- metadata_file = self.tmp_path("test-meta", self.tdir)
- metadata_content = dedent(
- """\
- instance-id: cloud-vm
- local-hostname: my-host.domain.com
- network:
- version: 2
- ethernets:
- nics:
- match:
- name: ens*
- dhcp4: yes
- """
- )
- util.write_file(metadata_file, metadata_content)
-
- with mock.patch(
- MPATH + "set_customization_status", return_value=("msg", b"")
- ):
- result = wrap_and_call(
- "cloudinit.sources.DataSourceOVF",
- {
- "dmi.read_dmi_data": "vmware",
- "util.del_dir": True,
- "search_file": self.tdir,
- "wait_for_imc_cfg_file": conf_file,
- "collect_imc_file_paths": [
- self.tdir + "/test-meta",
- "",
- "",
- ],
- "get_nics_to_enable": "",
- },
- ds._get_data,
- )
-
- self.assertTrue(result)
- self.assertEqual("cloud-vm", ds.metadata["instance-id"])
- self.assertEqual("my-host.domain.com", ds.metadata["local-hostname"])
- self.assertEqual(2, ds.network_config["version"])
- self.assertTrue(ds.network_config["ethernets"]["nics"]["dhcp4"])
-
- def test_get_data_cloudinit_metadata_not_valid(self):
- """Test metadata is not JSON or YAML format."""
- paths = Paths({"cloud_dir": self.tdir})
- ds = self.datasource(
- sys_cfg={"disable_vmware_customization": True},
- distro={},
- paths=paths,
- )
-
- # Prepare the conf file
- conf_file = self.tmp_path("test-cust", self.tdir)
- conf_content = dedent(
- """\
- [CLOUDINIT]
- METADATA = test-meta
- """
- )
- util.write_file(conf_file, conf_content)
-
- # Prepare the meta data file
- metadata_file = self.tmp_path("test-meta", self.tdir)
- metadata_content = "[This is not json or yaml format]a=b"
- util.write_file(metadata_file, metadata_content)
-
- with mock.patch(
- MPATH + "set_customization_status", return_value=("msg", b"")
- ):
- with self.assertRaises(YAMLError) as context:
- wrap_and_call(
- "cloudinit.sources.DataSourceOVF",
- {
- "dmi.read_dmi_data": "vmware",
- "util.del_dir": True,
- "search_file": self.tdir,
- "wait_for_imc_cfg_file": conf_file,
- "collect_imc_file_paths": [
- self.tdir + "/test-meta",
- "",
- "",
- ],
- "get_nics_to_enable": "",
- },
- ds.get_data,
- )
-
- self.assertIn(
- "expected '<document start>', but found '<scalar>'",
- str(context.exception),
- )
-
- def test_get_data_cloudinit_metadata_not_found(self):
- """Test metadata file can't be found."""
- paths = Paths({"cloud_dir": self.tdir})
- ds = self.datasource(
- sys_cfg={"disable_vmware_customization": True},
- distro={},
- paths=paths,
- )
- # Prepare the conf file
- conf_file = self.tmp_path("test-cust", self.tdir)
- conf_content = dedent(
- """\
- [CLOUDINIT]
- METADATA = test-meta
- """
- )
- util.write_file(conf_file, conf_content)
- # Don't prepare the meta data file
-
- with mock.patch(
- MPATH + "set_customization_status", return_value=("msg", b"")
- ):
- with self.assertRaises(FileNotFoundError) as context:
- wrap_and_call(
- "cloudinit.sources.DataSourceOVF",
- {
- "dmi.read_dmi_data": "vmware",
- "util.del_dir": True,
- "search_file": self.tdir,
- "wait_for_imc_cfg_file": conf_file,
- "get_nics_to_enable": "",
- },
- ds.get_data,
- )
-
- self.assertIn("is not found", str(context.exception))
-
- def test_get_data_cloudinit_userdata(self):
- """Test user data can be loaded to cloud-init user data."""
- paths = Paths({"cloud_dir": self.tdir})
- ds = self.datasource(
- sys_cfg={"disable_vmware_customization": False},
- distro={},
- paths=paths,
- )
-
- # Prepare the conf file
- conf_file = self.tmp_path("test-cust", self.tdir)
- conf_content = dedent(
- """\
- [CLOUDINIT]
- METADATA = test-meta
- USERDATA = test-user
- """
- )
- util.write_file(conf_file, conf_content)
-
- # Prepare the meta data file
- metadata_file = self.tmp_path("test-meta", self.tdir)
- metadata_content = dedent(
- """\
- instance-id: cloud-vm
- local-hostname: my-host.domain.com
- network:
- version: 2
- ethernets:
- nics:
- match:
- name: ens*
- dhcp4: yes
- """
- )
- util.write_file(metadata_file, metadata_content)
-
- # Prepare the user data file
- userdata_file = self.tmp_path("test-user", self.tdir)
- userdata_content = "This is the user data"
- util.write_file(userdata_file, userdata_content)
-
- with mock.patch(
- MPATH + "set_customization_status", return_value=("msg", b"")
- ):
- result = wrap_and_call(
- "cloudinit.sources.DataSourceOVF",
- {
- "dmi.read_dmi_data": "vmware",
- "util.del_dir": True,
- "search_file": self.tdir,
- "wait_for_imc_cfg_file": conf_file,
- "collect_imc_file_paths": [
- self.tdir + "/test-meta",
- self.tdir + "/test-user",
- "",
- ],
- "get_nics_to_enable": "",
- },
- ds._get_data,
- )
-
- self.assertTrue(result)
- self.assertEqual("cloud-vm", ds.metadata["instance-id"])
- self.assertEqual(userdata_content, ds.userdata_raw)
-
- def test_get_data_cloudinit_userdata_not_found(self):
- """Test userdata file can't be found."""
- paths = Paths({"cloud_dir": self.tdir})
- ds = self.datasource(
- sys_cfg={"disable_vmware_customization": True},
- distro={},
- paths=paths,
- )
-
- # Prepare the conf file
- conf_file = self.tmp_path("test-cust", self.tdir)
- conf_content = dedent(
- """\
- [CLOUDINIT]
- METADATA = test-meta
- USERDATA = test-user
- """
- )
- util.write_file(conf_file, conf_content)
-
- # Prepare the meta data file
- metadata_file = self.tmp_path("test-meta", self.tdir)
- metadata_content = dedent(
- """\
- instance-id: cloud-vm
- local-hostname: my-host.domain.com
- network:
- version: 2
- ethernets:
- nics:
- match:
- name: ens*
- dhcp4: yes
- """
- )
- util.write_file(metadata_file, metadata_content)
-
- # Don't prepare the user data file
-
- with mock.patch(
- MPATH + "set_customization_status", return_value=("msg", b"")
- ):
- with self.assertRaises(FileNotFoundError) as context:
- wrap_and_call(
- "cloudinit.sources.DataSourceOVF",
- {
- "dmi.read_dmi_data": "vmware",
- "util.del_dir": True,
- "search_file": self.tdir,
- "wait_for_imc_cfg_file": conf_file,
- "get_nics_to_enable": "",
- },
- ds.get_data,
- )
-
- self.assertIn("is not found", str(context.exception))
-
class TestTransportIso9660(CiTestCase):
def setUp(self):
diff --git a/tests/unittests/sources/test_vmware.py b/tests/unittests/sources/test_vmware.py
index b3663b0a..4911e5bc 100644
--- a/tests/unittests/sources/test_vmware.py
+++ b/tests/unittests/sources/test_vmware.py
@@ -1,6 +1,7 @@
-# Copyright (c) 2021 VMware, Inc. All Rights Reserved.
+# Copyright (c) 2021-2022 VMware, Inc. All Rights Reserved.
#
# Authors: Andrew Kutz <akutz@vmware.com>
+# Pengpeng Sun <pengpengs@vmware.com>
#
# This file is part of cloud-init. See LICENSE file for license information.
@@ -8,18 +9,22 @@ import base64
import gzip
import os
from contextlib import ExitStack
+from textwrap import dedent
import pytest
-from cloudinit import dmi, helpers, safeyaml, settings
+from cloudinit import dmi, helpers, safeyaml, settings, util
from cloudinit.sources import DataSourceVMware
+from cloudinit.sources.helpers.vmware.imc import guestcust_util
from tests.unittests.helpers import (
CiTestCase,
FilesystemMockingTestCase,
mock,
populate_dir,
+ wrap_and_call,
)
+MPATH = "cloudinit.sources.DataSourceVMware."
PRODUCT_NAME_FILE_PATH = "/sys/class/dmi/id/product_name"
PRODUCT_NAME = "VMware7,1"
PRODUCT_UUID = "82343CED-E4C7-423B-8F6B-0D34D19067AB"
@@ -490,6 +495,706 @@ class TestDataSourceVMwareGuestInfo_InvalidPlatform(FilesystemMockingTestCase):
self.assertFalse(ret)
+class TestDataSourceVMwareIMC(CiTestCase):
+ """
+ Test the VMware Guest OS Customization transport
+ """
+
+ with_logs = True
+
+ def setUp(self):
+ super(TestDataSourceVMwareIMC, self).setUp()
+ self.datasource = DataSourceVMware.DataSourceVMware
+ self.tdir = self.tmp_dir()
+
+ def test_get_data_false_on_none_dmi_data(self):
+ """When dmi for system-product-name is None, get_data returns False."""
+ paths = helpers.Paths({"cloud_dir": self.tdir})
+ ds = self.datasource(sys_cfg={}, distro={}, paths=paths)
+ result = wrap_and_call(
+ "cloudinit.sources.DataSourceVMware",
+ {
+ "dmi.read_dmi_data": None,
+ },
+ ds.get_data,
+ )
+ self.assertFalse(result, "Expected False return from ds.get_data")
+ self.assertIn("No system-product-name found", self.logs.getvalue())
+
+ def test_get_imc_data_vmware_customization_disabled(self):
+ """
+ When vmware customization is disabled via sys_cfg and
+ allow_raw_data is disabled via ds_cfg, log a message.
+ """
+ paths = helpers.Paths({"cloud_dir": self.tdir})
+ ds = self.datasource(
+ sys_cfg={
+ "disable_vmware_customization": True,
+ "datasource": {"VMware": {"allow_raw_data": False}},
+ },
+ distro={},
+ paths=paths,
+ )
+ conf_file = self.tmp_path("test-cust", self.tdir)
+ conf_content = dedent(
+ """\
+ [MISC]
+ MARKER-ID = 12345345
+ """
+ )
+ util.write_file(conf_file, conf_content)
+ result = wrap_and_call(
+ "cloudinit.sources.DataSourceVMware",
+ {
+ "dmi.read_dmi_data": "vmware",
+ },
+ ds.get_imc_data_fn,
+ )
+ self.assertEqual(result, (None, None, None))
+ self.assertIn(
+ "Customization for VMware platform is disabled",
+ self.logs.getvalue(),
+ )
+
+ def test_get_imc_data_vmware_customization_sys_cfg_disabled(self):
+ """
+ When vmware customization is disabled via sys_cfg and
+ no meta data is found, log a message.
+ """
+ paths = helpers.Paths({"cloud_dir": self.tdir})
+ ds = self.datasource(
+ sys_cfg={
+ "disable_vmware_customization": True,
+ "datasource": {"VMware": {"allow_raw_data": True}},
+ },
+ distro={},
+ paths=paths,
+ )
+ conf_file = self.tmp_path("test-cust", self.tdir)
+ conf_content = dedent(
+ """\
+ [MISC]
+ MARKER-ID = 12345345
+ """
+ )
+ util.write_file(conf_file, conf_content)
+ result = wrap_and_call(
+ "cloudinit.sources.DataSourceVMware",
+ {
+ "dmi.read_dmi_data": "vmware",
+ "util.del_dir": True,
+ "guestcust_util.search_file": self.tdir,
+ "guestcust_util.wait_for_cust_cfg_file": conf_file,
+ },
+ ds.get_imc_data_fn,
+ )
+ self.assertEqual(result, (None, None, None))
+ self.assertIn(
+ "No allowed customization configuration data found",
+ self.logs.getvalue(),
+ )
+
+ def test_get_imc_data_allow_raw_data_disabled(self):
+ """
+ When allow_raw_data is disabled via ds_cfg and
+ meta data is found, log a message.
+ """
+ paths = helpers.Paths({"cloud_dir": self.tdir})
+ ds = self.datasource(
+ sys_cfg={
+ "disable_vmware_customization": False,
+ "datasource": {"VMware": {"allow_raw_data": False}},
+ },
+ distro={},
+ paths=paths,
+ )
+
+ # Prepare the conf file
+ conf_file = self.tmp_path("test-cust", self.tdir)
+ conf_content = dedent(
+ """\
+ [CLOUDINIT]
+ METADATA = test-meta
+ """
+ )
+ util.write_file(conf_file, conf_content)
+ result = wrap_and_call(
+ "cloudinit.sources.DataSourceVMware",
+ {
+ "dmi.read_dmi_data": "vmware",
+ "util.del_dir": True,
+ "guestcust_util.search_file": self.tdir,
+ "guestcust_util.wait_for_cust_cfg_file": conf_file,
+ },
+ ds.get_imc_data_fn,
+ )
+ self.assertEqual(result, (None, None, None))
+ self.assertIn(
+ "No allowed customization configuration data found",
+ self.logs.getvalue(),
+ )
+
+ def test_get_imc_data_vmware_customization_enabled(self):
+ """
+ When cloud-init workflow for vmware is enabled via sys_cfg log a
+ message.
+ """
+ paths = helpers.Paths({"cloud_dir": self.tdir})
+ ds = self.datasource(
+ sys_cfg={"disable_vmware_customization": False},
+ distro={},
+ paths=paths,
+ )
+ conf_file = self.tmp_path("test-cust", self.tdir)
+ conf_content = dedent(
+ """\
+ [CUSTOM-SCRIPT]
+ SCRIPT-NAME = test-script
+ [MISC]
+ MARKER-ID = 12345345
+ """
+ )
+ util.write_file(conf_file, conf_content)
+ with mock.patch(
+ MPATH + "guestcust_util.get_tools_config",
+ return_value="true",
+ ):
+ result = wrap_and_call(
+ "cloudinit.sources.DataSourceVMware",
+ {
+ "dmi.read_dmi_data": "vmware",
+ "util.del_dir": True,
+ "guestcust_util.search_file": self.tdir,
+ "guestcust_util.wait_for_cust_cfg_file": conf_file,
+ },
+ ds.get_imc_data_fn,
+ )
+ self.assertEqual(result, (None, None, None))
+ custom_script = self.tmp_path("test-script", self.tdir)
+ self.assertIn(
+ "Script %s not found!!" % custom_script,
+ self.logs.getvalue(),
+ )
+
+ def test_get_imc_data_cust_script_disabled(self):
+ """
+ If custom script is disabled by VMware tools configuration,
+ log a message.
+ """
+ paths = helpers.Paths({"cloud_dir": self.tdir})
+ ds = self.datasource(
+ sys_cfg={"disable_vmware_customization": False},
+ distro={},
+ paths=paths,
+ )
+ # Prepare the conf file
+ conf_file = self.tmp_path("test-cust", self.tdir)
+ conf_content = dedent(
+ """\
+ [CUSTOM-SCRIPT]
+ SCRIPT-NAME = test-script
+ [MISC]
+ MARKER-ID = 12345346
+ """
+ )
+ util.write_file(conf_file, conf_content)
+ # Prepare the custom sript
+ customscript = self.tmp_path("test-script", self.tdir)
+ util.write_file(customscript, "This is the post cust script")
+
+ with mock.patch(
+ MPATH + "guestcust_util.get_tools_config",
+ return_value="invalid",
+ ):
+ with mock.patch(
+ MPATH + "guestcust_util.set_customization_status",
+ return_value=("msg", b""),
+ ):
+ result = wrap_and_call(
+ "cloudinit.sources.DataSourceVMware",
+ {
+ "dmi.read_dmi_data": "vmware",
+ "util.del_dir": True,
+ "guestcust_util.search_file": self.tdir,
+ "guestcust_util.wait_for_cust_cfg_file": conf_file,
+ },
+ ds.get_imc_data_fn,
+ )
+ self.assertEqual(result, (None, None, None))
+ self.assertIn(
+ "Custom script is disabled by VM Administrator",
+ self.logs.getvalue(),
+ )
+
+ def test_get_imc_data_cust_script_enabled(self):
+ """
+ If custom script is enabled by VMware tools configuration,
+ execute the script.
+ """
+ paths = helpers.Paths({"cloud_dir": self.tdir})
+ ds = self.datasource(
+ sys_cfg={"disable_vmware_customization": False},
+ distro={},
+ paths=paths,
+ )
+ # Prepare the conf file
+ conf_file = self.tmp_path("test-cust", self.tdir)
+ conf_content = dedent(
+ """\
+ [CUSTOM-SCRIPT]
+ SCRIPT-NAME = test-script
+ [MISC]
+ MARKER-ID = 12345346
+ """
+ )
+ util.write_file(conf_file, conf_content)
+
+ # Mock custom script is enabled by return true when calling
+ # get_tools_config
+ with mock.patch(
+ MPATH + "guestcust_util.get_tools_config",
+ return_value="true",
+ ):
+ with mock.patch(
+ MPATH + "guestcust_util.set_customization_status",
+ return_value=("msg", b""),
+ ):
+ result = wrap_and_call(
+ "cloudinit.sources.DataSourceVMware",
+ {
+ "dmi.read_dmi_data": "vmware",
+ "util.del_dir": True,
+ "guestcust_util.search_file": self.tdir,
+ "guestcust_util.wait_for_cust_cfg_file": conf_file,
+ },
+ ds.get_imc_data_fn,
+ )
+ self.assertEqual(result, (None, None, None))
+ # Verify custom script is trying to be executed
+ custom_script = self.tmp_path("test-script", self.tdir)
+ self.assertIn(
+ "Script %s not found!!" % custom_script,
+ self.logs.getvalue(),
+ )
+
+ def test_get_imc_data_force_run_post_script_is_yes(self):
+ """
+ If DEFAULT-RUN-POST-CUST-SCRIPT is yes, custom script could run if
+ enable-custom-scripts is not defined in VM Tools configuration
+ """
+ paths = helpers.Paths({"cloud_dir": self.tdir})
+ ds = self.datasource(
+ sys_cfg={"disable_vmware_customization": False},
+ distro={},
+ paths=paths,
+ )
+ # Prepare the conf file
+ conf_file = self.tmp_path("test-cust", self.tdir)
+ # set DEFAULT-RUN-POST-CUST-SCRIPT = yes so that enable-custom-scripts
+ # default value is TRUE
+ conf_content = dedent(
+ """\
+ [CUSTOM-SCRIPT]
+ SCRIPT-NAME = test-script
+ [MISC]
+ MARKER-ID = 12345346
+ DEFAULT-RUN-POST-CUST-SCRIPT = yes
+ """
+ )
+ util.write_file(conf_file, conf_content)
+
+ # Mock get_tools_config(section, key, defaultVal) to return
+ # defaultVal
+ def my_get_tools_config(*args, **kwargs):
+ return args[2]
+
+ with mock.patch(
+ MPATH + "guestcust_util.get_tools_config",
+ side_effect=my_get_tools_config,
+ ):
+ with mock.patch(
+ MPATH + "guestcust_util.set_customization_status",
+ return_value=("msg", b""),
+ ):
+ result = wrap_and_call(
+ "cloudinit.sources.DataSourceVMware",
+ {
+ "dmi.read_dmi_data": "vmware",
+ "util.del_dir": True,
+ "guestcust_util.search_file": self.tdir,
+ "guestcust_util.wait_for_cust_cfg_file": conf_file,
+ },
+ ds.get_imc_data_fn,
+ )
+ self.assertEqual(result, (None, None, None))
+ # Verify custom script still runs although it is
+ # disabled by VMware Tools
+ custom_script = self.tmp_path("test-script", self.tdir)
+ self.assertIn(
+ "Script %s not found!!" % custom_script,
+ self.logs.getvalue(),
+ )
+
+ def test_get_data_cloudinit_metadata_json(self):
+ """
+ Test metadata can be loaded to cloud-init metadata and network.
+ The metadata format is json.
+ """
+ paths = helpers.Paths({"cloud_dir": self.tdir})
+ ds = self.datasource(
+ sys_cfg={"disable_vmware_customization": True},
+ distro={},
+ paths=paths,
+ )
+ # Prepare the conf file
+ conf_file = self.tmp_path("test-cust", self.tdir)
+ conf_content = dedent(
+ """\
+ [CLOUDINIT]
+ METADATA = test-meta
+ """
+ )
+ util.write_file(conf_file, conf_content)
+ # Prepare the meta data file
+ metadata_file = self.tmp_path("test-meta", self.tdir)
+ metadata_content = dedent(
+ """\
+ {
+ "instance-id": "cloud-vm",
+ "local-hostname": "my-host.domain.com",
+ "network": {
+ "version": 2,
+ "ethernets": {
+ "eths": {
+ "match": {
+ "name": "ens*"
+ },
+ "dhcp4": true
+ }
+ }
+ }
+ }
+ """
+ )
+ util.write_file(metadata_file, metadata_content)
+
+ with mock.patch(
+ MPATH + "guestcust_util.set_customization_status",
+ return_value=("msg", b""),
+ ):
+ result = wrap_and_call(
+ "cloudinit.sources.DataSourceVMware",
+ {
+ "dmi.read_dmi_data": "vmware",
+ "util.del_dir": True,
+ "guestcust_util.search_file": self.tdir,
+ "guestcust_util.wait_for_cust_cfg_file": conf_file,
+ "guestcust_util.get_imc_dir_path": self.tdir,
+ },
+ ds._get_data,
+ )
+ self.assertTrue(result)
+ self.assertEqual("cloud-vm", ds.metadata["instance-id"])
+ self.assertEqual("my-host.domain.com", ds.metadata["local-hostname"])
+ self.assertEqual(2, ds.network_config["version"])
+ self.assertTrue(ds.network_config["ethernets"]["eths"]["dhcp4"])
+
+ def test_get_data_cloudinit_metadata_yaml(self):
+ """
+ Test metadata can be loaded to cloud-init metadata and network.
+ The metadata format is yaml.
+ """
+ paths = helpers.Paths({"cloud_dir": self.tdir})
+ ds = self.datasource(
+ sys_cfg={"disable_vmware_customization": True},
+ distro={},
+ paths=paths,
+ )
+ # Prepare the conf file
+ conf_file = self.tmp_path("test-cust", self.tdir)
+ conf_content = dedent(
+ """\
+ [CLOUDINIT]
+ METADATA = test-meta
+ """
+ )
+ util.write_file(conf_file, conf_content)
+ # Prepare the meta data file
+ metadata_file = self.tmp_path("test-meta", self.tdir)
+ metadata_content = dedent(
+ """\
+ instance-id: cloud-vm
+ local-hostname: my-host.domain.com
+ network:
+ version: 2
+ ethernets:
+ nics:
+ match:
+ name: ens*
+ dhcp4: yes
+ """
+ )
+ util.write_file(metadata_file, metadata_content)
+
+ with mock.patch(
+ MPATH + "guestcust_util.set_customization_status",
+ return_value=("msg", b""),
+ ):
+ result = wrap_and_call(
+ "cloudinit.sources.DataSourceVMware",
+ {
+ "dmi.read_dmi_data": "vmware",
+ "util.del_dir": True,
+ "guestcust_util.search_file": self.tdir,
+ "guestcust_util.wait_for_cust_cfg_file": conf_file,
+ "guestcust_util.get_imc_dir_path": self.tdir,
+ },
+ ds._get_data,
+ )
+ self.assertTrue(result)
+ self.assertEqual("cloud-vm", ds.metadata["instance-id"])
+ self.assertEqual("my-host.domain.com", ds.metadata["local-hostname"])
+ self.assertEqual(2, ds.network_config["version"])
+ self.assertTrue(ds.network_config["ethernets"]["nics"]["dhcp4"])
+
+ def test_get_imc_data_cloudinit_metadata_not_valid(self):
+ """
+ Test metadata is not JSON or YAML format, log a message
+ """
+ paths = helpers.Paths({"cloud_dir": self.tdir})
+ ds = self.datasource(
+ sys_cfg={"disable_vmware_customization": True},
+ distro={},
+ paths=paths,
+ )
+
+ # Prepare the conf file
+ conf_file = self.tmp_path("test-cust", self.tdir)
+ conf_content = dedent(
+ """\
+ [CLOUDINIT]
+ METADATA = test-meta
+ """
+ )
+ util.write_file(conf_file, conf_content)
+
+ # Prepare the meta data file
+ metadata_file = self.tmp_path("test-meta", self.tdir)
+ metadata_content = "[This is not json or yaml format]a=b"
+ util.write_file(metadata_file, metadata_content)
+
+ with mock.patch(
+ MPATH + "guestcust_util.set_customization_status",
+ return_value=("msg", b""),
+ ):
+ result = wrap_and_call(
+ "cloudinit.sources.DataSourceVMware",
+ {
+ "dmi.read_dmi_data": "vmware",
+ "util.del_dir": True,
+ "guestcust_util.search_file": self.tdir,
+ "guestcust_util.wait_for_cust_cfg_file": conf_file,
+ "guestcust_util.get_imc_dir_path": self.tdir,
+ },
+ ds.get_data,
+ )
+ self.assertFalse(result)
+ self.assertIn(
+ "expected '<document start>', but found '<scalar>'",
+ self.logs.getvalue(),
+ )
+
+ def test_get_imc_data_cloudinit_metadata_not_found(self):
+ """
+ Test metadata file can't be found, log a message
+ """
+ paths = helpers.Paths({"cloud_dir": self.tdir})
+ ds = self.datasource(
+ sys_cfg={"disable_vmware_customization": True},
+ distro={},
+ paths=paths,
+ )
+ # Prepare the conf file
+ conf_file = self.tmp_path("test-cust", self.tdir)
+ conf_content = dedent(
+ """\
+ [CLOUDINIT]
+ METADATA = test-meta
+ """
+ )
+ util.write_file(conf_file, conf_content)
+ # Don't prepare the meta data file
+
+ with mock.patch(
+ MPATH + "guestcust_util.set_customization_status",
+ return_value=("msg", b""),
+ ):
+ result = wrap_and_call(
+ "cloudinit.sources.DataSourceVMware",
+ {
+ "dmi.read_dmi_data": "vmware",
+ "util.del_dir": True,
+ "guestcust_util.search_file": self.tdir,
+ "guestcust_util.wait_for_cust_cfg_file": conf_file,
+ "guestcust_util.get_imc_dir_path": self.tdir,
+ },
+ ds.get_imc_data_fn,
+ )
+ self.assertEqual(result, (None, None, None))
+ self.assertIn("Meta data file is not found", self.logs.getvalue())
+
+ def test_get_data_cloudinit_userdata(self):
+ """
+ Test user data can be loaded to cloud-init user data.
+ """
+ paths = helpers.Paths({"cloud_dir": self.tdir})
+ ds = self.datasource(
+ sys_cfg={"disable_vmware_customization": False},
+ distro={},
+ paths=paths,
+ )
+
+ # Prepare the conf file
+ conf_file = self.tmp_path("test-cust", self.tdir)
+ conf_content = dedent(
+ """\
+ [CLOUDINIT]
+ METADATA = test-meta
+ USERDATA = test-user
+ """
+ )
+ util.write_file(conf_file, conf_content)
+
+ # Prepare the meta data file
+ metadata_file = self.tmp_path("test-meta", self.tdir)
+ metadata_content = dedent(
+ """\
+ instance-id: cloud-vm
+ local-hostname: my-host.domain.com
+ network:
+ version: 2
+ ethernets:
+ nics:
+ match:
+ name: ens*
+ dhcp4: yes
+ """
+ )
+ util.write_file(metadata_file, metadata_content)
+
+ # Prepare the user data file
+ userdata_file = self.tmp_path("test-user", self.tdir)
+ userdata_content = "This is the user data"
+ util.write_file(userdata_file, userdata_content)
+
+ with mock.patch(
+ MPATH + "guestcust_util.set_customization_status",
+ return_value=("msg", b""),
+ ):
+ result = wrap_and_call(
+ "cloudinit.sources.DataSourceVMware",
+ {
+ "dmi.read_dmi_data": "vmware",
+ "util.del_dir": True,
+ "guestcust_util.search_file": self.tdir,
+ "guestcust_util.wait_for_cust_cfg_file": conf_file,
+ "guestcust_util.get_imc_dir_path": self.tdir,
+ },
+ ds._get_data,
+ )
+ self.assertTrue(result)
+ self.assertEqual("cloud-vm", ds.metadata["instance-id"])
+ self.assertEqual(userdata_content, ds.userdata_raw)
+
+ def test_get_imc_data_cloudinit_userdata_not_found(self):
+ """
+ Test userdata file can't be found.
+ """
+ paths = helpers.Paths({"cloud_dir": self.tdir})
+ ds = self.datasource(
+ sys_cfg={"disable_vmware_customization": True},
+ distro={},
+ paths=paths,
+ )
+
+ # Prepare the conf file
+ conf_file = self.tmp_path("test-cust", self.tdir)
+ conf_content = dedent(
+ """\
+ [CLOUDINIT]
+ METADATA = test-meta
+ USERDATA = test-user
+ """
+ )
+ util.write_file(conf_file, conf_content)
+
+ # Prepare the meta data file
+ metadata_file = self.tmp_path("test-meta", self.tdir)
+ metadata_content = dedent(
+ """\
+ instance-id: cloud-vm
+ local-hostname: my-host.domain.com
+ network:
+ version: 2
+ ethernets:
+ nics:
+ match:
+ name: ens*
+ dhcp4: yes
+ """
+ )
+ util.write_file(metadata_file, metadata_content)
+
+ # Don't prepare the user data file
+
+ with mock.patch(
+ MPATH + "guestcust_util.set_customization_status",
+ return_value=("msg", b""),
+ ):
+ result = wrap_and_call(
+ "cloudinit.sources.DataSourceVMware",
+ {
+ "dmi.read_dmi_data": "vmware",
+ "util.del_dir": True,
+ "guestcust_util.search_file": self.tdir,
+ "guestcust_util.wait_for_cust_cfg_file": conf_file,
+ "guestcust_util.get_imc_dir_path": self.tdir,
+ },
+ ds.get_imc_data_fn,
+ )
+ self.assertEqual(result, (None, None, None))
+ self.assertIn("Userdata file is not found", self.logs.getvalue())
+
+
+class TestDataSourceVMwareIMC_MarkerFiles(CiTestCase):
+ def setUp(self):
+ super(TestDataSourceVMwareIMC_MarkerFiles, self).setUp()
+ self.tdir = self.tmp_dir()
+
+ def test_false_when_markerid_none(self):
+ """Return False when markerid provided is None."""
+ self.assertFalse(
+ guestcust_util.check_marker_exists(
+ markerid=None, marker_dir=self.tdir
+ )
+ )
+
+ def test_markerid_file_exist(self):
+ """Return False when markerid file path does not exist,
+ True otherwise."""
+ self.assertFalse(guestcust_util.check_marker_exists("123", self.tdir))
+ marker_file = self.tmp_path(".markerfile-123.txt", self.tdir)
+ util.write_file(marker_file, "")
+ self.assertTrue(guestcust_util.check_marker_exists("123", self.tdir))
+
+ def test_marker_file_setup(self):
+ """Test creation of marker files."""
+ markerfilepath = self.tmp_path(".markerfile-hi.txt", self.tdir)
+ self.assertFalse(os.path.exists(markerfilepath))
+ guestcust_util.setup_marker_files(marker_id="hi", marker_dir=self.tdir)
+ self.assertTrue(os.path.exists(markerfilepath))
+
+
def assert_metadata(test_obj, ds, metadata):
test_obj.assertEqual(metadata.get("instance-id"), ds.get_instance_id())
test_obj.assertEqual(
diff --git a/tests/unittests/sources/test_vultr.py b/tests/unittests/sources/test_vultr.py
index 27481e8e..488df4f3 100644
--- a/tests/unittests/sources/test_vultr.py
+++ b/tests/unittests/sources/test_vultr.py
@@ -30,6 +30,9 @@ VULTR_V1_1 = {
},
},
"hostname": "CLOUDINIT_1",
+ "local-hostname": "CLOUDINIT_1",
+ "instance-v2-id": "29bea708-2e6e-480a-90ad-0e6b5d5ad62f",
+ "instance-id": "29bea708-2e6e-480a-90ad-0e6b5d5ad62f",
"instanceid": "42506325",
"interfaces": [
{
@@ -50,7 +53,7 @@ VULTR_V1_1 = {
}
],
"public-keys": ["ssh-rsa AAAAB3NzaC1y...IQQhv5PAOKaIl+mM3c= test3@key"],
- "region": {"regioncode": "EWR"},
+ "region": "us",
"user-defined": [],
"startup-script": "echo No configured startup script",
"raid1-script": "",
@@ -85,7 +88,9 @@ VULTR_V1_2 = {
},
},
"hostname": "CLOUDINIT_2",
+ "local-hostname": "CLOUDINIT_2",
"instance-v2-id": "29bea708-2e6e-480a-90ad-0e6b5d5ad62f",
+ "instance-id": "29bea708-2e6e-480a-90ad-0e6b5d5ad62f",
"instanceid": "42872224",
"interfaces": [
{
@@ -121,7 +126,7 @@ VULTR_V1_2 = {
},
],
"public-keys": ["ssh-rsa AAAAB3NzaC1y...IQQhv5PAOKaIl+mM3c= test3@key"],
- "region": {"regioncode": "EWR"},
+ "region": "us",
"user-defined": [],
"startup-script": "echo No configured startup script",
"user-data": [],
@@ -139,8 +144,46 @@ VULTR_V1_2 = {
],
}
+VULTR_V1_3 = None
+
SSH_KEYS_1 = ["ssh-rsa AAAAB3NzaC1y...IQQhv5PAOKaIl+mM3c= test3@key"]
+CLOUD_INTERFACES = {
+ "version": 1,
+ "config": [
+ {
+ "type": "nameserver",
+ "address": ["108.61.10.10", "2001:19f0:300:1704::6"],
+ },
+ {
+ "type": "physical",
+ "mac_address": "56:00:03:1b:4e:ca",
+ "accept-ra": 1,
+ "subnets": [
+ {"type": "dhcp", "control": "auto"},
+ {"type": "ipv6_slaac", "control": "auto"},
+ {
+ "type": "static6",
+ "control": "auto",
+ "address": "2002:19f0:5:28a7::/64",
+ },
+ ],
+ },
+ {
+ "type": "physical",
+ "mac_address": "5a:00:03:1b:4e:ca",
+ "subnets": [
+ {
+ "type": "static",
+ "control": "auto",
+ "address": "10.1.112.3",
+ "netmask": "255.255.240.0",
+ }
+ ],
+ },
+ ],
+}
+
INTERFACES = ["lo", "dummy0", "eth1", "eth0", "eth2"]
ORDERED_INTERFACES = ["eth0", "eth1", "eth2"]
@@ -241,8 +284,14 @@ def check_route(url):
class TestDataSourceVultr(CiTestCase):
def setUp(self):
+ global VULTR_V1_3
super(TestDataSourceVultr, self).setUp()
+ # Create v3
+ VULTR_V1_3 = VULTR_V1_2.copy()
+ VULTR_V1_3["cloud_interfaces"] = CLOUD_INTERFACES.copy()
+ VULTR_V1_3["interfaces"] = []
+
# Stored as a dict to make it easier to maintain
raw1 = json.dumps(VULTR_V1_1["vendor-data"][0])
raw2 = json.dumps(VULTR_V1_2["vendor-data"][0])
@@ -250,6 +299,7 @@ class TestDataSourceVultr(CiTestCase):
# Make expected format
VULTR_V1_1["vendor-data"] = [raw1]
VULTR_V1_2["vendor-data"] = [raw2]
+ VULTR_V1_3["vendor-data"] = [raw2]
self.tmp = self.tmp_dir()
@@ -297,6 +347,28 @@ class TestDataSourceVultr(CiTestCase):
# Test network config generation
self.assertEqual(EXPECTED_VULTR_NETWORK_2, source.network_config)
+ # Test the datasource with new network config type
+ @mock.patch("cloudinit.net.get_interfaces_by_mac")
+ @mock.patch("cloudinit.sources.helpers.vultr.is_vultr")
+ @mock.patch("cloudinit.sources.helpers.vultr.get_metadata")
+ def test_datasource_cloud_interfaces(
+ self, mock_getmeta, mock_isvultr, mock_netmap
+ ):
+ mock_getmeta.return_value = VULTR_V1_3
+ mock_isvultr.return_value = True
+ mock_netmap.return_value = INTERFACE_MAP
+
+ distro = mock.MagicMock()
+ distro.get_tmp_exec_path = self.tmp_dir
+ source = DataSourceVultr.DataSourceVultr(
+ settings.CFG_BUILTIN, distro, helpers.Paths({"run_dir": self.tmp})
+ )
+
+ source._get_data()
+
+ # Test network config generation
+ self.assertEqual(EXPECTED_VULTR_NETWORK_2, source.network_config)
+
# Test network config generation
@mock.patch("cloudinit.net.get_interfaces_by_mac")
def test_network_config(self, mock_netmap):
diff --git a/tests/unittests/sources/vmware/test_vmware_config_file.py b/tests/unittests/sources/vmware/test_vmware_config_file.py
index 38d45d0e..2fc2e21c 100644
--- a/tests/unittests/sources/vmware/test_vmware_config_file.py
+++ b/tests/unittests/sources/vmware/test_vmware_config_file.py
@@ -1,5 +1,5 @@
# Copyright (C) 2015 Canonical Ltd.
-# Copyright (C) 2016 VMware INC.
+# Copyright (C) 2016-2022 VMware INC.
#
# Author: Sankar Tanguturi <stanguturi@vmware.com>
# Pengpeng Sun <pengpengs@vmware.com>
@@ -12,10 +12,6 @@ import sys
import tempfile
import textwrap
-from cloudinit.sources.DataSourceOVF import (
- get_network_config_from_conf,
- read_vmware_imc,
-)
from cloudinit.sources.helpers.vmware.imc.boot_proto import BootProtoEnum
from cloudinit.sources.helpers.vmware.imc.config import Config
from cloudinit.sources.helpers.vmware.imc.config_file import (
@@ -25,6 +21,10 @@ from cloudinit.sources.helpers.vmware.imc.config_nic import (
NicConfigurator,
gen_subnet,
)
+from cloudinit.sources.helpers.vmware.imc.guestcust_util import (
+ get_network_data_from_vmware_cust_cfg,
+ get_non_network_data_from_vmware_cust_cfg,
+)
from tests.unittests.helpers import CiTestCase, cloud_init_project_dir
logging.basicConfig(level=logging.DEBUG, stream=sys.stdout)
@@ -63,23 +63,29 @@ class TestVmwareConfigFile(CiTestCase):
self.assertFalse(cf.should_keep_current_value("BAR"), "keepBar")
self.assertTrue(cf.should_remove_current_value("BAR"), "removeBar")
- def test_datasource_instance_id(self):
- """Tests instance id for the DatasourceOVF"""
+ def test_configfile_without_instance_id(self):
+ """
+ Tests instance id is None when configuration file has no instance id
+ """
cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg")
+ conf = Config(cf)
- instance_id_prefix = "iid-vmware-"
+ (md1, _) = get_non_network_data_from_vmware_cust_cfg(conf)
+ self.assertFalse("instance-id" in md1)
- conf = Config(cf)
+ (md2, _) = get_non_network_data_from_vmware_cust_cfg(conf)
+ self.assertFalse("instance-id" in md2)
- (md1, _, _) = read_vmware_imc(conf)
- self.assertIn(instance_id_prefix, md1["instance-id"])
- self.assertEqual(md1["instance-id"], "iid-vmware-imc")
+ def test_configfile_with_instance_id(self):
+ """Tests instance id get from configuration file"""
+ cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic-instance-id.cfg")
+ conf = Config(cf)
- (md2, _, _) = read_vmware_imc(conf)
- self.assertIn(instance_id_prefix, md2["instance-id"])
- self.assertEqual(md2["instance-id"], "iid-vmware-imc")
+ (md1, _) = get_non_network_data_from_vmware_cust_cfg(conf)
+ self.assertEqual(md1["instance-id"], conf.instance_id, "instance-id")
- self.assertEqual(md2["instance-id"], md1["instance-id"])
+ (md2, _) = get_non_network_data_from_vmware_cust_cfg(conf)
+ self.assertEqual(md2["instance-id"], conf.instance_id, "instance-id")
def test_configfile_static_2nics(self):
"""Tests Config class for a configuration with two static NICs."""
@@ -166,7 +172,7 @@ class TestVmwareConfigFile(CiTestCase):
config = Config(cf)
- network_config = get_network_config_from_conf(config, False)
+ network_config = get_network_data_from_vmware_cust_cfg(config, False)
self.assertEqual(1, network_config.get("version"))
@@ -201,14 +207,14 @@ class TestVmwareConfigFile(CiTestCase):
)
def test_get_config_dns_suffixes(self):
- """Tests if get_network_config_from_conf properly
+ """Tests if get_network_from_vmware_cust_cfg properly
generates nameservers and dns settings from a
specified configuration"""
cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg")
config = Config(cf)
- network_config = get_network_config_from_conf(config, False)
+ network_config = get_network_data_from_vmware_cust_cfg(config, False)
self.assertEqual(1, network_config.get("version"))
diff --git a/tests/unittests/test_atomic_helper.py b/tests/unittests/test_atomic_helper.py
index 684a9ae5..1c5deddf 100644
--- a/tests/unittests/test_atomic_helper.py
+++ b/tests/unittests/test_atomic_helper.py
@@ -30,6 +30,16 @@ class TestAtomicHelper(CiTestCase):
atomic_helper.write_file(path, contents, mode=0o400)
self.check_file(path, contents, perms=0o400)
+ def test_file_preserve_permissions(self):
+ """create a file with mode 700, then write_file with mode 644."""
+ path = self.tmp_path("test_file_preserve_permissions")
+ contents = b"test_file_perms"
+ with open(path, mode="wb") as f:
+ f.write(b"test file preserve permissions")
+ os.chmod(f.name, 0o700)
+ atomic_helper.write_file(path, contents, preserve_mode=True)
+ self.check_file(path, contents, perms=0o700)
+
def test_write_json(self):
"""write_json output is readable json."""
path = self.tmp_path("test_write_json")
diff --git a/tests/unittests/test_cli.py b/tests/unittests/test_cli.py
index dd85a1c7..2d57ba04 100644
--- a/tests/unittests/test_cli.py
+++ b/tests/unittests/test_cli.py
@@ -249,8 +249,10 @@ class TestCLI:
"**Supported distros:** almalinux, alpine, centos, "
"cloudlinux, cos, debian, eurolinux, fedora, freebsd, "
"mariner, miraclelinux, "
- "openbsd, openEuler, openmandriva, "
- "opensuse, photon, rhel, rocky, sles, ubuntu, virtuozzo",
+ "openbsd, openEuler, OpenCloudOS, openmandriva, "
+ "opensuse, opensuse-microos, opensuse-tumbleweed, "
+ "opensuse-leap, photon, rhel, rocky, sle_hpc, "
+ "sle-micro, sles, TencentOS, ubuntu, virtuozzo",
"**Config schema**:\n **resize_rootfs:** "
"(``true``/``false``/``noblock``)",
"**Examples**::\n\n runcmd:\n - [ ls, -l, / ]\n",
diff --git a/tests/unittests/test_dmi.py b/tests/unittests/test_dmi.py
index 91d424c1..698e3df8 100644
--- a/tests/unittests/test_dmi.py
+++ b/tests/unittests/test_dmi.py
@@ -3,6 +3,8 @@ import shutil
import tempfile
from unittest import mock
+import pytest
+
from cloudinit import dmi, subp, util
from tests.unittests import helpers
@@ -168,3 +170,52 @@ class TestReadDMIData(helpers.FilesystemMockingTestCase):
key, val = ("system-product-name", "my_product")
self._configure_kenv_return(key, val)
self.assertEqual(dmi.read_dmi_data(key), val)
+
+
+class TestSubDMIVars:
+
+ DMI_SRC = (
+ "dmi.nope__dmi.system-uuid__/__dmi.uuid____dmi.smbios.system.uuid__"
+ )
+
+ @pytest.mark.parametrize(
+ "is_freebsd, src, read_dmi_data_mocks, warnings, expected",
+ (
+ pytest.param(
+ False,
+ DMI_SRC,
+ [mock.call("system-uuid")],
+ [
+ "Ignoring invalid __dmi.smbios.system.uuid__",
+ "Ignoring invalid __dmi.uuid__",
+ ],
+ "dmi.nope1/__dmi.uuid____dmi.smbios.system.uuid__",
+ id="match_dmi_distro_agnostic_strings_warn_on_unknown",
+ ),
+ pytest.param(
+ True,
+ DMI_SRC,
+ [mock.call("system-uuid")],
+ [
+ "Ignoring invalid __dmi.smbios.system.uuid__",
+ "Ignoring invalid __dmi.uuid__",
+ ],
+ "dmi.nope1/__dmi.uuid____dmi.smbios.system.uuid__",
+ id="match_dmi_agnostic_and_freebsd_dmi_keys_warn_on_unknown",
+ ),
+ ),
+ )
+ def test_sub_dmi_vars(
+ self, is_freebsd, src, read_dmi_data_mocks, warnings, expected, caplog
+ ):
+ with mock.patch.object(dmi, "read_dmi_data") as m_dmi:
+ m_dmi.side_effect = [
+ "1",
+ "2",
+ RuntimeError("Too many read_dmi_data calls"),
+ ]
+ with mock.patch.object(dmi, "is_FreeBSD", return_value=is_freebsd):
+ assert expected == dmi.sub_dmi_vars(src)
+ for warning in warnings:
+ assert 1 == caplog.text.count(warning)
+ assert m_dmi.call_args_list == read_dmi_data_mocks
diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py
index f4b9403d..03be0c92 100644
--- a/tests/unittests/test_ds_identify.py
+++ b/tests/unittests/test_ds_identify.py
@@ -622,51 +622,6 @@ class TestDsIdentify(DsIdentifyBase):
"""OVF guest info is found on vmware."""
self._test_ds_found("OVF-guestinfo")
- def test_ovf_on_vmware_iso_found_when_vmware_customization(self):
- """OVF is identified when vmware customization is enabled."""
- self._test_ds_found("OVF-vmware-customization")
-
- def test_ovf_on_vmware_iso_found_open_vm_tools_64(self):
- """OVF is identified when open-vm-tools installed in /usr/lib64."""
- cust64 = copy.deepcopy(VALID_CFG["OVF-vmware-customization"])
- p32 = "usr/lib/vmware-tools/plugins/vmsvc/libdeployPkgPlugin.so"
- open64 = "usr/lib64/open-vm-tools/plugins/vmsvc/libdeployPkgPlugin.so"
- cust64["files"][open64] = cust64["files"][p32]
- del cust64["files"][p32]
- return self._check_via_dict(
- cust64, RC_FOUND, dslist=[cust64.get("ds"), DS_NONE]
- )
-
- def test_ovf_on_vmware_iso_found_open_vm_tools_x86_64_linux_gnu(self):
- """OVF is identified when open-vm-tools installed in
- /usr/lib/x86_64-linux-gnu."""
- cust64 = copy.deepcopy(VALID_CFG["OVF-vmware-customization"])
- p32 = "usr/lib/vmware-tools/plugins/vmsvc/libdeployPkgPlugin.so"
- x86 = (
- "usr/lib/x86_64-linux-gnu/open-vm-tools/plugins/vmsvc/"
- "libdeployPkgPlugin.so"
- )
- cust64["files"][x86] = cust64["files"][p32]
- del cust64["files"][p32]
- return self._check_via_dict(
- cust64, RC_FOUND, dslist=[cust64.get("ds"), DS_NONE]
- )
-
- def test_ovf_on_vmware_iso_found_open_vm_tools_aarch64_linux_gnu(self):
- """OVF is identified when open-vm-tools installed in
- /usr/lib/aarch64-linux-gnu."""
- cust64 = copy.deepcopy(VALID_CFG["OVF-vmware-customization"])
- p32 = "usr/lib/vmware-tools/plugins/vmsvc/libdeployPkgPlugin.so"
- aarch64 = (
- "usr/lib/aarch64-linux-gnu/open-vm-tools/plugins/vmsvc/"
- "libdeployPkgPlugin.so"
- )
- cust64["files"][aarch64] = cust64["files"][p32]
- del cust64["files"][p32]
- return self._check_via_dict(
- cust64, RC_FOUND, dslist=[cust64.get("ds"), DS_NONE]
- )
-
def test_ovf_on_vmware_iso_found_by_cdrom_with_matching_fs_label(self):
"""OVF is identified by well-known iso9660 labels."""
ovf_cdrom_by_label = copy.deepcopy(VALID_CFG["OVF"])
@@ -832,6 +787,51 @@ class TestDsIdentify(DsIdentifyBase):
"""VMware: no valid transports"""
self._test_ds_not_found("VMware-NoValidTransports")
+ def test_vmware_on_vmware_when_vmware_customization_is_enabled(self):
+ """VMware is identified when vmware customization is enabled."""
+ self._test_ds_found("VMware-vmware-customization")
+
+ def test_vmware_on_vmware_open_vm_tools_64(self):
+ """VMware is identified when open-vm-tools installed in /usr/lib64."""
+ cust64 = copy.deepcopy(VALID_CFG["VMware-vmware-customization"])
+ p32 = "usr/lib/vmware-tools/plugins/vmsvc/libdeployPkgPlugin.so"
+ open64 = "usr/lib64/open-vm-tools/plugins/vmsvc/libdeployPkgPlugin.so"
+ cust64["files"][open64] = cust64["files"][p32]
+ del cust64["files"][p32]
+ return self._check_via_dict(
+ cust64, RC_FOUND, dslist=[cust64.get("ds"), DS_NONE]
+ )
+
+ def test_vmware_on_vmware_open_vm_tools_x86_64_linux_gnu(self):
+ """VMware is identified when open-vm-tools installed in
+ /usr/lib/x86_64-linux-gnu."""
+ cust64 = copy.deepcopy(VALID_CFG["VMware-vmware-customization"])
+ p32 = "usr/lib/vmware-tools/plugins/vmsvc/libdeployPkgPlugin.so"
+ x86 = (
+ "usr/lib/x86_64-linux-gnu/open-vm-tools/plugins/vmsvc/"
+ "libdeployPkgPlugin.so"
+ )
+ cust64["files"][x86] = cust64["files"][p32]
+ del cust64["files"][p32]
+ return self._check_via_dict(
+ cust64, RC_FOUND, dslist=[cust64.get("ds"), DS_NONE]
+ )
+
+ def test_vmware_on_vmware_open_vm_tools_aarch64_linux_gnu(self):
+ """VMware is identified when open-vm-tools installed in
+ /usr/lib/aarch64-linux-gnu."""
+ cust64 = copy.deepcopy(VALID_CFG["VMware-vmware-customization"])
+ p32 = "usr/lib/vmware-tools/plugins/vmsvc/libdeployPkgPlugin.so"
+ aarch64 = (
+ "usr/lib/aarch64-linux-gnu/open-vm-tools/plugins/vmsvc/"
+ "libdeployPkgPlugin.so"
+ )
+ cust64["files"][aarch64] = cust64["files"][p32]
+ del cust64["files"][p32]
+ return self._check_via_dict(
+ cust64, RC_FOUND, dslist=[cust64.get("ds"), DS_NONE]
+ )
+
def test_vmware_envvar_no_data(self):
"""VMware: envvar transport no data"""
self._test_ds_not_found("VMware-EnvVar-NoData")
@@ -950,7 +950,7 @@ class TestOracle(DsIdentifyBase):
"""Simple negative test of Oracle."""
mycfg = copy.deepcopy(VALID_CFG["Oracle"])
mycfg["files"][P_CHASSIS_ASSET_TAG] = "Not Oracle"
- self._check_via_dict(mycfg, rc=RC_NOT_FOUND)
+ self._check_via_dict(mycfg, ds=["openstack", "none"], rc=RC_FOUND)
def blkid_out(disks=None):
@@ -1056,6 +1056,7 @@ VALID_CFG = {
"Ec2-brightbox-negative": {
"ds": "Ec2",
"files": {P_PRODUCT_SERIAL: "tricky-host.bobrightbox.com\n"},
+ "mocks": [MOCK_VIRT_IS_KVM],
},
"GCE": {
"ds": "GCE",
@@ -1256,26 +1257,6 @@ VALID_CFG = {
os.path.join(P_SEED_DIR, "ovf", "ovf-env.xml"): "present\n",
},
},
- "OVF-vmware-customization": {
- "ds": "OVF",
- "mocks": [
- # Include a mockes iso9660 potential, even though content not ovf
- {
- "name": "blkid",
- "ret": 0,
- "out": blkid_out(
- [{"DEVNAME": "sr0", "TYPE": "iso9660", "LABEL": ""}]
- ),
- },
- MOCK_VIRT_IS_VMWARE,
- ],
- "files": {
- "dev/sr0": "no match",
- # Setup vmware customization enabled
- "usr/lib/vmware-tools/plugins/vmsvc/libdeployPkgPlugin.so": "here",
- "etc/cloud/cloud.cfg": "disable_vmware_customization: false\n",
- },
- },
"OVF": {
"ds": "OVF",
"mocks": [
@@ -1617,6 +1598,7 @@ VALID_CFG = {
"Ec2-E24Cloud-negative": {
"ds": "Ec2",
"files": {P_SYS_VENDOR: "e24cloudyday\n"},
+ "mocks": [MOCK_VIRT_IS_KVM],
},
"VMware-NoValidTransports": {
"ds": "VMware",
@@ -1624,6 +1606,22 @@ VALID_CFG = {
MOCK_VIRT_IS_VMWARE,
],
},
+ "VMware-vmware-customization": {
+ "ds": "VMware",
+ "mocks": [
+ MOCK_VIRT_IS_VMWARE,
+ {
+ "name": "vmware_has_rpctool",
+ "ret": 0,
+ "out": "/usr/bin/vmware-rpctool",
+ },
+ ],
+ "files": {
+ # Setup vmware customization enabled
+ "usr/lib/vmware-tools/plugins/vmsvc/libdeployPkgPlugin.so": "here",
+ "etc/cloud/cloud.cfg": "disable_vmware_customization: false\n",
+ },
+ },
"VMware-EnvVar-NoData": {
"ds": "VMware",
"mocks": [
@@ -1759,6 +1757,7 @@ VALID_CFG = {
"VMware-GuestInfo-NoVirtID": {
"ds": "VMware",
"mocks": [
+ MOCK_VIRT_IS_KVM,
{
"name": "vmware_has_rpctool",
"ret": 0,
@@ -1864,6 +1863,7 @@ VALID_CFG = {
P_PRODUCT_NAME: "3DS Outscale VM\n",
P_SYS_VENDOR: "Not 3DS Outscale\n",
},
+ "mocks": [MOCK_VIRT_IS_KVM],
},
"Ec2-Outscale-negative-productname": {
"ds": "Ec2",
@@ -1871,6 +1871,7 @@ VALID_CFG = {
P_PRODUCT_NAME: "Not 3DS Outscale VM\n",
P_SYS_VENDOR: "3DS Outscale\n",
},
+ "mocks": [MOCK_VIRT_IS_KVM],
},
}
diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py
index 525706d1..056aaeb6 100644
--- a/tests/unittests/test_net.py
+++ b/tests/unittests/test_net.py
@@ -8,6 +8,7 @@ import json
import os
import re
import textwrap
+from typing import Optional
import pytest
from yaml.serializer import Serializer
@@ -33,6 +34,7 @@ from tests.unittests.helpers import (
CiTestCase,
FilesystemMockingTestCase,
dir2dict,
+ does_not_raise,
mock,
populate_dir,
)
@@ -379,7 +381,6 @@ network:
bondM:
addresses:
- 10.101.10.47/23
- gateway4: 10.101.11.254
interfaces:
- eno1
- eno3
@@ -401,6 +402,9 @@ network:
mode: 802.3ad
transmit-hash-policy: layer3+4
up-delay: 0
+ routes:
+ - to: default
+ via: 10.101.11.254
vlans:
bond0.3502:
addresses:
@@ -2247,7 +2251,6 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true
addresses:
- 192.168.0.2/24
- 192.168.2.10/24
- gateway4: 192.168.0.1
id: 101
link: eth0
macaddress: aa:bb:cc:dd:ee:11
@@ -2260,6 +2263,9 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true
- barley.maas
- sacchromyces.maas
- brettanomyces.maas
+ routes:
+ - to: default
+ via: 192.168.0.1
"""
).rstrip(" "),
"expected_sysconfig_opensuse": {
@@ -2971,7 +2977,6 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true
- 192.168.0.2/24
- 192.168.1.2/24
- 2001:1::1/92
- gateway4: 192.168.0.1
interfaces:
- bond0s0
- bond0s1
@@ -2988,6 +2993,8 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true
transmit-hash-policy: layer3+4
up-delay: 20
routes:
+ - to: default
+ via: 192.168.0.1
- to: 10.1.3.0/24
via: 192.168.0.3
- to: 2001:67c::/32
@@ -5219,7 +5226,7 @@ USERCTL=no
# Created by cloud-init on instance boot automatically, do not edit.
#
2a00:1730:fff9:100::1/128 via ::0 dev eth0
- ::0/64 via 2a00:1730:fff9:100::1 dev eth0
+ ::0/0 via 2a00:1730:fff9:100::1 dev eth0
""" # noqa: E501
),
}
@@ -5993,26 +6000,368 @@ iface eth0 inet dhcp
)
-class TestNetplanNetRendering(CiTestCase):
+class TestNetplanNetRendering:
+ @pytest.mark.parametrize(
+ "network_cfg,expected",
+ [
+ pytest.param(
+ None,
+ """
+ network:
+ ethernets:
+ eth1000:
+ dhcp4: true
+ match:
+ macaddress: 07-1c-c6-75-a4-be
+ set-name: eth1000
+ version: 2
+ """,
+ id="default_generation",
+ ),
+ # Asserts a netconf v1 with a physical device and two gateways
+ # does not produce deprecated keys, `gateway{46}`, in Netplan v2
+ pytest.param(
+ """
+ version: 1
+ config:
+ - type: physical
+ name: interface0
+ mac_address: '00:11:22:33:44:55'
+ subnets:
+ - type: static
+ address: 192.168.23.14/27
+ gateway: 192.168.23.1
+ - type: static
+ address: 11.0.0.11/24
+ gateway: 11.0.0.1
+ """,
+ """
+ network:
+ version: 2
+ ethernets:
+ interface0:
+ addresses:
+ - 192.168.23.14/27
+ - 11.0.0.11/24
+ match:
+ macaddress: 00:11:22:33:44:55
+ set-name: interface0
+ routes:
+ - to: default
+ via: 192.168.23.1
+ - to: default
+ via: 11.0.0.1
+ """,
+ id="physical_gateway46",
+ ),
+ # Asserts a netconf v1 with a bond device and two gateways
+ # does not produce deprecated keys, `gateway{46}`, in Netplan v2
+ pytest.param(
+ """
+ version: 1
+ config:
+ - type: bond
+ name: bond0
+ bond_interfaces:
+ - eth0
+ - eth1
+ params: {}
+ subnets:
+ - type: static
+ address: 192.168.23.14/27
+ gateway: 192.168.23.1
+ - type: static
+ address: 11.0.0.11/24
+ gateway: 11.0.0.1
+ """,
+ """
+ network:
+ version: 2
+ bonds:
+ bond0:
+ addresses:
+ - 192.168.23.14/27
+ - 11.0.0.11/24
+ interfaces:
+ - eth0
+ - eth1
+ routes:
+ - to: default
+ via: 192.168.23.1
+ - to: default
+ via: 11.0.0.1
+ eth0: {}
+ eth1: {}
+ """,
+ id="bond_gateway46",
+ ),
+ # Asserts a netconf v1 with a bridge device and two gateways
+ # does not produce deprecated keys, `gateway{46}`, in Netplan v2
+ pytest.param(
+ """
+ version: 1
+ config:
+ - type: bridge
+ name: bridge0
+ bridge_interfaces:
+ - eth0
+ params: {}
+ subnets:
+ - type: static
+ address: 192.168.23.14/27
+ gateway: 192.168.23.1
+ - type: static
+ address: 11.0.0.11/24
+ gateway: 11.0.0.1
+ """,
+ """
+ network:
+ version: 2
+ bridges:
+ bridge0:
+ addresses:
+ - 192.168.23.14/27
+ - 11.0.0.11/24
+ interfaces:
+ - eth0
+ routes:
+ - to: default
+ via: 192.168.23.1
+ - to: default
+ via: 11.0.0.1
+ """,
+ id="bridge_gateway46",
+ ),
+ # Asserts a netconf v1 with a vlan device and two gateways
+ # does not produce deprecated keys, `gateway{46}`, in Netplan v2
+ pytest.param(
+ """
+ version: 1
+ config:
+ - type: vlan
+ name: vlan0
+ vlan_link: eth0
+ vlan_id: 101
+ subnets:
+ - type: static
+ address: 192.168.23.14/27
+ gateway: 192.168.23.1
+ - type: static
+ address: 11.0.0.11/24
+ gateway: 11.0.0.1
+ """,
+ """
+ network:
+ version: 2
+ vlans:
+ vlan0:
+ addresses:
+ - 192.168.23.14/27
+ - 11.0.0.11/24
+ id: 101
+ link: eth0
+ routes:
+ - to: default
+ via: 192.168.23.1
+ - to: default
+ via: 11.0.0.1
+ """,
+ id="vlan_gateway46",
+ ),
+ # Asserts a netconf v1 with a nameserver device and two gateways
+ # does not produce deprecated keys, `gateway{46}`, in Netplan v2
+ pytest.param(
+ """
+ version: 1
+ config:
+ - type: physical
+ name: interface0
+ mac_address: '00:11:22:33:44:55'
+ subnets:
+ - type: static
+ address: 192.168.23.14/27
+ gateway: 192.168.23.1
+ - type: nameserver
+ address:
+ - 192.168.23.14/27
+ - 11.0.0.11/24
+ search:
+ - exemplary
+ subnets:
+ - type: static
+ address: 192.168.23.14/27
+ gateway: 192.168.23.1
+ - type: static
+ address: 11.0.0.11/24
+ gateway: 11.0.0.1
+ """,
+ """
+ network:
+ version: 2
+ ethernets:
+ interface0:
+ addresses:
+ - 192.168.23.14/27
+ match:
+ macaddress: 00:11:22:33:44:55
+ nameservers:
+ addresses:
+ - 192.168.23.14/27
+ - 11.0.0.11/24
+ search:
+ - exemplary
+ set-name: interface0
+ routes:
+ - to: default
+ via: 192.168.23.1
+ """,
+ id="nameserver_gateway4",
+ ),
+ # Asserts a netconf v1 with two subnets with two gateways does
+ # not clash
+ pytest.param(
+ """
+ version: 1
+ config:
+ - type: physical
+ name: interface0
+ mac_address: '00:11:22:33:44:55'
+ subnets:
+ - type: static
+ address: 192.168.23.14/24
+ gateway: 192.168.23.1
+ - type: static
+ address: 10.184.225.122
+ routes:
+ - network: 10.176.0.0
+ gateway: 10.184.225.121
+ """,
+ """
+ network:
+ version: 2
+ ethernets:
+ interface0:
+ addresses:
+ - 192.168.23.14/24
+ - 10.184.225.122/24
+ match:
+ macaddress: 00:11:22:33:44:55
+ routes:
+ - to: default
+ via: 192.168.23.1
+ - to: 10.176.0.0/24
+ via: 10.184.225.121
+ set-name: interface0
+ """,
+ id="two_subnets_old_new_gateway46",
+ ),
+ # Asserts a netconf v1 with one subnet with two gateways does
+ # not clash
+ pytest.param(
+ """
+ version: 1
+ config:
+ - type: physical
+ name: interface0
+ mac_address: '00:11:22:33:44:55'
+ subnets:
+ - type: static
+ address: 192.168.23.14/24
+ gateway: 192.168.23.1
+ routes:
+ - network: 192.167.225.122
+ gateway: 192.168.23.1
+ """,
+ """
+ network:
+ version: 2
+ ethernets:
+ interface0:
+ addresses:
+ - 192.168.23.14/24
+ match:
+ macaddress: 00:11:22:33:44:55
+ routes:
+ - to: default
+ via: 192.168.23.1
+ - to: 192.167.225.122/24
+ via: 192.168.23.1
+ set-name: interface0
+ """,
+ id="one_subnet_old_new_gateway46",
+ ),
+ # Assert gateways outside of the subnet's network are added with
+ # the on-link flag
+ pytest.param(
+ """
+ version: 1
+ config:
+ - type: physical
+ name: interface0
+ mac_address: '00:11:22:33:44:55'
+ subnets:
+ - type: static
+ address: 192.168.23.14/24
+ gateway: 192.168.255.1
+ - type: static
+ address: 2001:cafe::/64
+ gateway: 2001:ffff::1
+ """,
+ """
+ network:
+ version: 2
+ ethernets:
+ interface0:
+ addresses:
+ - 192.168.23.14/24
+ - 2001:cafe::/64
+ match:
+ macaddress: 00:11:22:33:44:55
+ routes:
+ - to: default
+ via: 192.168.255.1
+ on-link: true
+ - to: default
+ via: 2001:ffff::1
+ on-link: true
+ set-name: interface0
+ """,
+ id="onlink_gateways",
+ ),
+ ],
+ )
+ @mock.patch(
+ "cloudinit.net.netplan.Renderer.features",
+ new_callable=mock.PropertyMock(return_value=[]),
+ )
@mock.patch("cloudinit.net.util.get_cmdline", return_value="root=myroot")
@mock.patch("cloudinit.net.netplan._clean_default")
@mock.patch("cloudinit.net.sys_dev_path")
@mock.patch("cloudinit.net.read_sys_net")
@mock.patch("cloudinit.net.get_devicelist")
- def test_default_generation(
+ def test_render(
self,
mock_get_devicelist,
mock_read_sys_net,
mock_sys_dev_path,
mock_clean_default,
m_get_cmdline,
+ m_renderer_features,
+ network_cfg: Optional[str],
+ expected: str,
+ tmpdir,
):
- tmp_dir = self.tmp_dir()
+ tmp_dir = str(tmpdir)
_setup_test(
tmp_dir, mock_get_devicelist, mock_read_sys_net, mock_sys_dev_path
)
- network_cfg = net.generate_fallback_config()
+ if network_cfg is None:
+ network_cfg = net.generate_fallback_config()
+ else:
+ network_cfg = yaml.load(network_cfg)
+ assert isinstance(network_cfg, dict)
+
ns = network_state.parse_net_config_data(
network_cfg, skip_broken=False
)
@@ -6026,25 +6375,13 @@ class TestNetplanNetRendering(CiTestCase):
)
renderer.render_network_state(ns, target=render_dir)
- self.assertTrue(
- os.path.exists(os.path.join(render_dir, render_target))
- )
+ assert os.path.exists(os.path.join(render_dir, render_target))
with open(os.path.join(render_dir, render_target)) as fh:
contents = fh.read()
print(contents)
- expected = """
-network:
- ethernets:
- eth1000:
- dhcp4: true
- match:
- macaddress: 07-1c-c6-75-a4-be
- set-name: eth1000
- version: 2
-"""
- self.assertEqual(expected.lstrip(), contents.lstrip())
- self.assertEqual(1, mock_clean_default.call_count)
+ assert yaml.load(expected) == yaml.load(contents)
+ assert 1, mock_clean_default.call_count
class TestNetplanCleanDefault(CiTestCase):
@@ -7672,6 +8009,7 @@ class TestInterfaceHasOwnMac(CiTestCase):
mock.Mock(return_value=False),
)
class TestGetInterfacesByMac(CiTestCase):
+ with_logs = True
_data = {
"bonds": ["bond1"],
"bridges": ["bridge1"],
@@ -7683,6 +8021,10 @@ class TestGetInterfacesByMac(CiTestCase):
"bridge1",
"bond1.101",
"lo",
+ "netvsc0-vf",
+ "netvsc0",
+ "netvsc1",
+ "netvsc1-vf",
],
"macs": {
"enp0s1": "aa:aa:aa:aa:aa:01",
@@ -7693,14 +8035,27 @@ class TestGetInterfacesByMac(CiTestCase):
"bridge1-nic": "aa:aa:aa:aa:aa:03",
"lo": "00:00:00:00:00:00",
"greptap0": "00:00:00:00:00:00",
+ "netvsc0-vf": "aa:aa:aa:aa:aa:04",
+ "netvsc0": "aa:aa:aa:aa:aa:04",
+ "netvsc1-vf": "aa:aa:aa:aa:aa:05",
+ "netvsc1": "aa:aa:aa:aa:aa:05",
"tun0": None,
},
+ "drivers": {
+ "netvsc0": "hv_netvsc",
+ "netvsc0-vf": "foo",
+ "netvsc1": "hv_netvsc",
+ "netvsc1-vf": "bar",
+ },
}
data: dict = {}
def _se_get_devicelist(self):
return list(self.data["devices"])
+ def _se_device_driver(self, name):
+ return self.data["drivers"].get(name, None)
+
def _se_get_interface_mac(self, name):
return self.data["macs"][name]
@@ -7722,6 +8077,7 @@ class TestGetInterfacesByMac(CiTestCase):
self.data["devices"] = set(list(self.data["macs"].keys()))
mocks = (
"get_devicelist",
+ "device_driver",
"get_interface_mac",
"is_bridge",
"interface_has_own_mac",
@@ -7741,6 +8097,11 @@ class TestGetInterfacesByMac(CiTestCase):
self.data["macs"]["bridge1-nic"] = self.data["macs"]["enp0s1"]
self.assertRaises(RuntimeError, net.get_interfaces_by_mac)
+ def test_raise_exception_on_duplicate_netvsc_macs(self):
+ self._mock_setup()
+ self.data["macs"]["netvsc0"] = self.data["macs"]["netvsc1"]
+ self.assertRaises(RuntimeError, net.get_interfaces_by_mac)
+
def test_excludes_any_without_mac_address(self):
self._mock_setup()
ret = net.get_interfaces_by_mac()
@@ -7759,6 +8120,8 @@ class TestGetInterfacesByMac(CiTestCase):
"aa:aa:aa:aa:aa:02": "enp0s2",
"aa:aa:aa:aa:aa:03": "bridge1-nic",
"00:00:00:00:00:00": "lo",
+ "aa:aa:aa:aa:aa:04": "netvsc0",
+ "aa:aa:aa:aa:aa:05": "netvsc1",
},
ret,
)
@@ -7859,6 +8222,24 @@ class TestGetInterfacesByMac(CiTestCase):
}
self.assertEqual(expected, result)
+ def test_duplicate_ignored_macs(self):
+ # LP: #199792
+ self._data = copy.deepcopy(self._data)
+ self._data["macs"]["swp0"] = "9a:57:7d:78:47:c0"
+ self._data["macs"]["swp1"] = "9a:57:7d:78:47:c0"
+ self._data["own_macs"].append("swp0")
+ self._data["own_macs"].append("swp1")
+ self._data["drivers"]["swp0"] = "mscc_felix"
+ self._data["drivers"]["swp1"] = "mscc_felix"
+ self._mock_setup()
+ with does_not_raise():
+ net.get_interfaces_by_mac()
+ pattern = (
+ "Ignoring duplicate macs from 'swp[0-1]' and 'swp[0-1]' due to "
+ "driver 'mscc_felix'."
+ )
+ assert re.search(pattern, self.logs.getvalue())
+
class TestInterfacesSorting(CiTestCase):
def test_natural_order(self):
diff --git a/tests/unittests/test_net_freebsd.py b/tests/unittests/test_net_freebsd.py
index 1288c259..4121e404 100644
--- a/tests/unittests/test_net_freebsd.py
+++ b/tests/unittests/test_net_freebsd.py
@@ -16,6 +16,14 @@ config:
- address: 172.20.80.129/25
type: static
type: physical
+- id: eno2
+ mac_address: 08:94:ef:51:ae:e1
+ mtu: 1470
+ name: eno2
+ subnets:
+ - address: fd12:3456:789a:1::1/64
+ type: static6
+ type: physical
version: 1
"""
@@ -76,6 +84,8 @@ class TestFreeBSDRoundTrip(CiTestCase):
"/etc/rc.conf": (
"# dummy rc.conf\n"
"ifconfig_eno1="
- "'172.20.80.129 netmask 255.255.255.128 mtu 1470'\n"
+ "'inet 172.20.80.129 netmask 255.255.255.128 mtu 1470'\n"
+ "ifconfig_eno2_ipv6="
+ "'inet6 fd12:3456:789a:1::1/64 mtu 1470'\n"
),
}
diff --git a/tests/unittests/test_safeyaml.py b/tests/unittests/test_safeyaml.py
index 5be09b21..8713546f 100644
--- a/tests/unittests/test_safeyaml.py
+++ b/tests/unittests/test_safeyaml.py
@@ -12,45 +12,60 @@ class TestLoadWithMarks:
"source_yaml,loaded_yaml,schemamarks",
(
# Invalid cloud-config, non-dict types don't cause an error
- (b"scalar", "scalar", {}),
- # Multiple keys account for comments and whitespace lines
- (
+ pytest.param(b"scalar", "scalar", {}, id="invalid_nondict_config"),
+ pytest.param(
b"#\na: va\n \nb: vb\n#\nc: vc",
{"a": "va", "b": "vb", "c": "vc"},
{"a": 2, "b": 4, "c": 6},
+ id="handle_whitespace_and_comments",
),
- # List items represented on correct line number
- (
+ pytest.param(
b"a:\n - a1\n\n - a2\n",
{"a": ["a1", "a2"]},
{"a": 1, "a.0": 2, "a.1": 4},
+ id="list_items",
),
- # Nested dicts represented on correct line number
- (
+ pytest.param(
b"a:\n a1:\n\n aa1: aa1v\n",
{"a": {"a1": {"aa1": "aa1v"}}},
{"a": 1, "a.a1": 2, "a.a1.aa1": 4},
+ id="nested_dicts_within_dicts",
),
- (b"[list, of, scalar]", ["list", "of", "scalar"], {}),
- (
+ pytest.param(
+ b"a:\n- a1\n\n- a2: av2\n a2b: av2b\n",
+ {"a": ["a1", {"a2": "av2", "a2b": "av2b"}]},
+ {"a": 1, "a.0": 2, "a.1": 4, "a.1.a2": 4, "a.1.a2b": 5},
+ id="nested_dicts_within_list",
+ ),
+ pytest.param(
+ b"[list, of, scalar]",
+ ["list", "of", "scalar"],
+ {},
+ id="list_of_scalar",
+ ),
+ pytest.param(
b"{a: [a1, a2], b: [b3]}",
{"a": ["a1", "a2"], "b": ["b3"]},
{"a": 1, "a.0": 1, "a.1": 1, "b": 1},
+ id="dict_of_lists_oneline",
),
- (
+ pytest.param(
b"a: [a1, a2]\nb: [b3]",
{"a": ["a1", "a2"], "b": ["b3"]},
{"a": 1, "a.0": 1, "a.1": 1, "b": 2, "b.0": 2},
+ id="dict_of_lists_multiline",
),
- (
+ pytest.param(
b"a:\n- a1\n- a2\nb: [b3]",
{"a": ["a1", "a2"], "b": ["b3"]},
{"a": 1, "a.0": 2, "a.1": 3, "b": 4, "b.0": 4},
+ id="separate_dicts_scalar_vs_nested_list",
),
- (
+ pytest.param(
b"a:\n- a1\n- a2\nb:\n- b3",
{"a": ["a1", "a2"], "b": ["b3"]},
{"a": 1, "a.0": 2, "a.1": 3, "b": 4, "b.0": 5},
+ id="separate_dicts_nestes_lists",
),
),
)
diff --git a/tests/unittests/test_ssh_util.py b/tests/unittests/test_ssh_util.py
index d6a72dc1..ff50dd11 100644
--- a/tests/unittests/test_ssh_util.py
+++ b/tests/unittests/test_ssh_util.py
@@ -3,6 +3,7 @@
import os
import stat
from functools import partial
+from textwrap import dedent
from typing import NamedTuple
from unittest import mock
from unittest.mock import patch
@@ -477,6 +478,18 @@ class TestParseSSHConfig:
assert expected_key == ret[0].key
assert expected_value == ret[0].value
+ def test_duplicated_keys(self, m_is_file, m_load_file):
+ file_content = [
+ "HostCertificate /data/ssh/ssh_host_rsa_cert",
+ "HostCertificate /data/ssh/ssh_host_ed25519_cert",
+ ]
+ m_is_file.return_value = True
+ m_load_file.return_value = "\n".join(file_content)
+ ret = ssh_util.parse_ssh_config("some real file")
+ assert len(file_content) == len(ret)
+ for i in range(len(file_content)):
+ assert file_content[i] == ret[i].line
+
class TestUpdateSshConfigLines:
"""Test the update_ssh_config_lines method."""
@@ -622,6 +635,31 @@ class TestUpdateSshConfig:
assert not os.path.isfile(f"other_{mycfg}.d/50-cloud-init.conf")
+class TestAppendSshConfig:
+ cfgdata = "\n".join(["#Option val", "MyKey ORIG_VAL", ""])
+
+ @mock.patch(M_PATH + "_ensure_cloud_init_ssh_config_file")
+ def test_append_ssh_config(self, m_ensure_cloud_init_config_file, tmpdir):
+ mycfg = tmpdir.join("ssh_config")
+ util.write_file(mycfg, self.cfgdata)
+ m_ensure_cloud_init_config_file.return_value = str(mycfg)
+ ssh_util.append_ssh_config(
+ [("MyKey", "NEW_VAL"), ("MyKey", "NEW_VAL_2")], mycfg
+ )
+ found = util.load_file(mycfg)
+ expected_cfg = dedent(
+ """\
+ #Option val
+ MyKey ORIG_VAL
+ MyKey NEW_VAL
+ MyKey NEW_VAL_2
+ """
+ )
+ assert expected_cfg == found
+ # assert there is a newline at end of file (LP: #1677205)
+ assert "\n" == found[-1]
+
+
class TestBasicAuthorizedKeyParse:
@pytest.mark.parametrize(
"value, homedir, username, expected_rendered",
diff --git a/tests/unittests/test_stages.py b/tests/unittests/test_stages.py
index 7fde2bac..15a7e973 100644
--- a/tests/unittests/test_stages.py
+++ b/tests/unittests/test_stages.py
@@ -356,6 +356,26 @@ class TestInit:
) == self.init._find_networking_config()
assert "network config disabled" not in caplog.text
+ @mock.patch(M_PATH + "cmdline.read_initramfs_config", return_value={})
+ @mock.patch(M_PATH + "cmdline.read_kernel_cmdline_config", return_value={})
+ def test_warn_on_empty_network(self, m_cmdline, m_initramfs, caplog):
+ """funky whitespace can lead to a network key that is None, which then
+ causes fallback. Test warning log on empty network key.
+ """
+ m_cmdline.return_value = {} # Kernel doesn't disable networking
+ m_initramfs.return_value = {} # no initramfs network config
+ # Neither datasource nor system_info disable or provide network
+ self.init._cfg = {
+ "system_info": {"paths": {"cloud_dir": self.tmpdir}},
+ "network": None,
+ }
+ self.init.datasource = FakeDataSource(network_config={"network": None})
+
+ self.init.distro.generate_fallback_config = lambda: {}
+
+ self.init._find_networking_config()
+ assert "Empty network config found" in caplog.text
+
def test_apply_network_config_disabled(self, caplog):
"""Log when network is disabled by upgraded-network."""
disable_file = os.path.join(
diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py
index 0c2735ae..07142a86 100644
--- a/tests/unittests/test_util.py
+++ b/tests/unittests/test_util.py
@@ -14,13 +14,15 @@ import shutil
import stat
import tempfile
from collections import deque
+from pathlib import Path
from textwrap import dedent
from unittest import mock
+from urllib.parse import urlparse
import pytest
import yaml
-from cloudinit import importer, subp, util
+from cloudinit import features, importer, subp, url_helper, util
from cloudinit.helpers import Paths
from cloudinit.sources import DataSourceHostname
from cloudinit.subp import SubpResult
@@ -268,6 +270,36 @@ OS_RELEASE_OPENEULER_20 = dedent(
"""
)
+OS_RELEASE_OPENCLOUDOS_8 = dedent(
+ """\
+ NAME="OpenCloudOS"
+ VERSION="8.6"
+ ID="OpenCloudOS"
+ ID_LIKE="rhel fedora"
+ VERSION_ID="8.6"
+ PLATFORM_ID="platform:oc8"
+ PRETTY_NAME="OpenCloudOS 8.6"
+ ANSI_COLOR="0;31"
+ CPE_NAME="cpe:/o:opencloudos:opencloudos:8"
+ HOME_URL="https://www.opencloudos.org/"
+ BUG_REPORT_URL="https://bugs.opencloudos.tech/"
+"""
+)
+
+OS_RELEASE_TENCENTOS_3 = dedent(
+ """\
+ NAME="TencentOS"
+ VERSION="3.1"
+ ID="TencentOS"
+ ID_LIKE="rhel fedora centos"
+ VERSION_ID="3.1"
+ PLATFORM_ID="platform:el3"
+ PRETTY_NAME="TencentOS 3.1"
+ ANSI_COLOR="0;31"
+ CPE_NAME="cpe:/o:tencentos:tencentos:3"
+"""
+)
+
REDHAT_RELEASE_CENTOS_6 = "CentOS release 6.10 (Final)"
REDHAT_RELEASE_CENTOS_7 = "CentOS Linux release 7.5.1804 (Core)"
REDHAT_RELEASE_REDHAT_6 = (
@@ -1129,6 +1161,22 @@ class TestGetLinuxDistro(CiTestCase):
self.assertEqual(("openEuler", "20.03", "LTS-SP2"), dist)
@mock.patch(M_PATH + "load_file")
+ def test_get_linux_opencloudos(self, m_os_release, m_path_exists):
+ """Verify get the correct name and release name on OpenCloudOS."""
+ m_os_release.return_value = OS_RELEASE_OPENCLOUDOS_8
+ m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(("OpenCloudOS", "8.6", ""), dist)
+
+ @mock.patch(M_PATH + "load_file")
+ def test_get_linux_tencentos(self, m_os_release, m_path_exists):
+ """Verify get the correct name and release name on TencentOS."""
+ m_os_release.return_value = OS_RELEASE_TENCENTOS_3
+ m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(("TencentOS", "3.1", ""), dist)
+
+ @mock.patch(M_PATH + "load_file")
def test_get_linux_opensuse(self, m_os_release, m_path_exists):
"""Verify we get the correct name and machine arch on openSUSE
prior to openSUSE Leap 15.
@@ -1244,10 +1292,12 @@ class TestGetVariant:
({"system": "linux", "dist": ("fedora",)}, "fedora"),
({"system": "linux", "dist": ("mariner",)}, "mariner"),
({"system": "linux", "dist": ("openEuler",)}, "openeuler"),
+ ({"system": "linux", "dist": ("OpenCloudOS",)}, "opencloudos"),
({"system": "linux", "dist": ("photon",)}, "photon"),
({"system": "linux", "dist": ("rhel",)}, "rhel"),
({"system": "linux", "dist": ("rocky",)}, "rocky"),
({"system": "linux", "dist": ("suse",)}, "suse"),
+ ({"system": "linux", "dist": ("TencentOS",)}, "tencentos"),
({"system": "linux", "dist": ("virtuozzo",)}, "virtuozzo"),
({"system": "linux", "dist": ("ubuntu",)}, "ubuntu"),
({"system": "linux", "dist": ("linuxmint",)}, "ubuntu"),
@@ -1706,6 +1756,25 @@ class TestWriteFile(helpers.TestCase):
self.assertTrue(os.path.isdir(dirname))
self.assertTrue(os.path.isfile(path))
+ def test_dir_ownership(self):
+ """Verifiy that directories is created with appropriate ownership."""
+ dirname = os.path.join(self.tmp, "subdir", "subdir2")
+ path = os.path.join(dirname, "NewFile.txt")
+ contents = "Hey there"
+ user = "foo"
+ group = "foo"
+
+ with mock.patch.object(
+ util, "chownbyname", return_value=None
+ ) as mockobj:
+ util.write_file(path, contents, user=user, group=group)
+
+ calls = [
+ mock.call(os.path.join(self.tmp, "subdir"), user, group),
+ mock.call(Path(dirname), user, group),
+ ]
+ mockobj.assert_has_calls(calls, any_order=False)
+
def test_dir_is_not_created_if_ensure_dir_false(self):
"""Verify directories are not created if ensure_dir_exists is False."""
dirname = os.path.join(self.tmp, "subdir")
@@ -2287,25 +2356,107 @@ class TestMessageFromString(helpers.TestCase):
self.assertNotIn("\x00", roundtripped)
-class TestReadSeeded(helpers.TestCase):
- def setUp(self):
- super(TestReadSeeded, self).setUp()
- self.tmp = tempfile.mkdtemp()
- self.addCleanup(shutil.rmtree, self.tmp)
-
- def test_unicode_not_messed_up(self):
+class TestReadSeeded:
+ def test_unicode_not_messed_up(self, tmpdir):
ud = b"userdatablob"
vd = b"vendordatablob"
helpers.populate_dir(
- self.tmp,
+ tmpdir.strpath,
{"meta-data": "key1: val1", "user-data": ud, "vendor-data": vd},
)
- sdir = self.tmp + os.path.sep
- (found_md, found_ud, found_vd) = util.read_seeded(sdir)
+ (found_md, found_ud, found_vd) = util.read_seeded(
+ tmpdir.strpath + os.path.sep
+ )
+ assert found_md == {"key1": "val1"}
+ assert found_ud == ud
+ assert found_vd == vd
- self.assertEqual(found_md, {"key1": "val1"})
- self.assertEqual(found_ud, ud)
- self.assertEqual(found_vd, vd)
+ @pytest.mark.parametrize(
+ "base, feature_flag, req_urls",
+ (
+ pytest.param(
+ "http://10.0.0.1/%s?qs=1",
+ True,
+ [
+ "http://10.0.0.1/meta-data?qs=1",
+ "http://10.0.0.1/user-data?qs=1",
+ "http://10.0.0.1/vendor-data?qs=1",
+ ],
+ id="expand_percent_s_to_data_route",
+ ),
+ pytest.param(
+ "https://10.0.0.1:8008/",
+ True,
+ [
+ "https://10.0.0.1:8008/meta-data",
+ "https://10.0.0.1:8008/user-data",
+ "https://10.0.0.1:8008/vendor-data",
+ ],
+ id="no_duplicate_forward_slash_when_already_present",
+ ),
+ pytest.param(
+ "https://10.0.0.1:8008",
+ True,
+ [
+ "https://10.0.0.1:8008/meta-data",
+ "https://10.0.0.1:8008/user-data",
+ "https://10.0.0.1:8008/vendor-data",
+ ],
+ id="append_fwd_slash_on_routes_when_absent_and_no_query_str",
+ ),
+ pytest.param(
+ "https://10.0.0.1:8008",
+ False,
+ [
+ "https://10.0.0.1:8008meta-data",
+ "https://10.0.0.1:8008user-data",
+ "https://10.0.0.1:8008vendor-data",
+ ],
+ id="feature_off_append_fwd_slash_when_absent_and_no_query_str",
+ ),
+ pytest.param(
+ "https://10.0.0.1:8008?qs=",
+ True,
+ [
+ "https://10.0.0.1:8008?qs=meta-data",
+ "https://10.0.0.1:8008?qs=user-data",
+ "https://10.0.0.1:8008?qs=vendor-data",
+ ],
+ id="avoid_trailing_forward_slash_on_routes_with_query_strings",
+ ),
+ ),
+ )
+ @mock.patch(M_PATH + "url_helper.read_file_or_url")
+ def test_handle_http_urls(
+ self, m_read, base, feature_flag, req_urls, tmpdir
+ ):
+ def fake_response(url, timeout, retries):
+ parsed_url = urlparse(url)
+ path = parsed_url.path
+ if not path:
+ if parsed_url.query:
+ _key, _, md_type = parsed_url.query.partition("=")
+ else:
+ _url, _, md_type = parsed_url.netloc.partition("8008")
+ path = f"/{md_type}"
+ return url_helper.StringResponse(f"{path}: 1")
+
+ m_read.side_effect = fake_response
+
+ with mock.patch.object(
+ features,
+ "NOCLOUD_SEED_URL_APPEND_FORWARD_SLASH",
+ feature_flag,
+ ):
+ (found_md, found_ud, found_vd) = util.read_seeded(base)
+ # Meta-data treated as YAML
+ assert found_md == {"/meta-data": 1}
+ # user-data, vendor-data read raw. It could be scripts or other format
+ assert found_ud == "/user-data: 1"
+ assert found_vd == "/vendor-data: 1"
+ assert [
+ mock.call(req_url, timeout=5, retries=10) for req_url in req_urls
+ ] == m_read.call_args_list
class TestReadSeededWithoutVendorData(helpers.TestCase):
@@ -2636,6 +2787,24 @@ class TestGetProcEnv(helpers.TestCase):
assert ppid == util.get_proc_ppid("mocked")
+class TestHuman2Bytes:
+ """test util.human2bytes() function"""
+
+ def test_human2bytes(self):
+ assert util.human2bytes("0.5G") == 536870912
+ assert util.human2bytes("100B") == 100
+ assert util.human2bytes("100MB") == 104857600
+
+ for test_i in ["-100MB", "100b", "100mB"]:
+ with pytest.raises(ValueError):
+ util.human2bytes(test_i)
+
+ def test_ibibytes2bytes(self):
+
+ assert util.human2bytes("0.5GiB") == 536870912
+ assert util.human2bytes("100MiB") == 104857600
+
+
class TestKernelVersion:
"""test kernel version function"""
diff --git a/tests/unittests/util.py b/tests/unittests/util.py
index e7094ec5..da04c6b2 100644
--- a/tests/unittests/util.py
+++ b/tests/unittests/util.py
@@ -1,4 +1,5 @@
# This file is part of cloud-init. See LICENSE file for license information.
+from typing import Optional
from unittest import mock
from cloudinit import cloud, distros, helpers
@@ -145,6 +146,10 @@ class MockDistro(distros.Distro):
def package_command(self, command, args=None, pkgs=None):
pass
+ @property
+ def is_virtual(self) -> Optional[bool]:
+ return True
+
def update_package_sources(self):
return (True, "yay")
diff --git a/tools/.github-cla-signers b/tools/.github-cla-signers
index 2826b9d8..d8cca015 100644
--- a/tools/.github-cla-signers
+++ b/tools/.github-cla-signers
@@ -17,6 +17,7 @@ berolinux
bipinbachhao
BirknerAlex
bmhughes
+CalvoM
candlerb
cawamata
cclauss
@@ -35,7 +36,9 @@ dermotbradley
dhensby
eandersson
eb3095
+ederst
edudobay
+einsibjarni
emmanuelthome
eslerm
esposem
@@ -72,10 +75,13 @@ lungj
magnetikonline
mal
mamercad
+ManassehZhou
manuelisimo
+MarkMielke
marlluslustosa
matthewruffell
maxnet
+Mazorius
megian
michaelrommel
mitechie
@@ -84,6 +90,7 @@ nazunalika
netcho
nicolasbock
nishigori
+nkukard
olivierlemasle
omBratteng
onitake
@@ -99,6 +106,7 @@ rongz609
s-makin
SadeghHayeri
sarahwzadara
+sbraz
scorpion44
shaardie
shell-skrimp
@@ -106,6 +114,7 @@ shi2wei3
slingamn
slyon
smoser
+SRv6d
sshedi
sstallion
stappersg
@@ -126,8 +135,11 @@ vteratipally
Vultaire
WebSpider
wschoot
+wynnfeng
xiachen-rh
+xiaoge1001
xnox
yangzz-97
yawkat
+zhan9san
zhuzaifangxuele
diff --git a/tools/check-cla-signers b/tools/check-cla-signers
new file mode 100755
index 00000000..670158af
--- /dev/null
+++ b/tools/check-cla-signers
@@ -0,0 +1,14 @@
+#!/usr/bin/env bash
+
+set -eu
+set -o pipefail
+
+CLA_SIGNERS_FILE="tools/.github-cla-signers"
+
+sort -f "${CLA_SIGNERS_FILE}" -o "${CLA_SIGNERS_FILE}"
+
+if [[ -n "$(git status --porcelain -- ${CLA_SIGNERS_FILE})" ]]; then
+ echo "Please make sure that ${CLA_SIGNERS_FILE} is in alphabetical order."
+ git --no-pager diff "${CLA_SIGNERS_FILE}"
+ exit 1
+fi
diff --git a/tools/ds-identify b/tools/ds-identify
index 0b9f9a8a..da23e836 100755
--- a/tools/ds-identify
+++ b/tools/ds-identify
@@ -926,7 +926,7 @@ dscheck_UpCloud() {
return ${DS_NOT_FOUND}
}
-ovf_vmware_guest_customization() {
+vmware_guest_customization() {
# vmware guest customization
# virt provider must be vmware
@@ -1040,8 +1040,6 @@ dscheck_OVF() {
has_ovf_cdrom && return "${DS_FOUND}"
- ovf_vmware_guest_customization && return "${DS_FOUND}"
-
return ${DS_NOT_FOUND}
}
@@ -1264,6 +1262,13 @@ dscheck_OpenStack() {
*) return ${DS_MAYBE};;
esac
+ # If we are on bare metal, then we maybe are on a
+ # bare metal Ironic environment.
+ detect_virt
+ if [ "${_RET}" = "none" ]; then
+ return ${DS_MAYBE}
+ fi
+
return ${DS_NOT_FOUND}
}
@@ -1466,6 +1471,7 @@ dscheck_VMware() {
#
# * envvars
# * guestinfo
+ # * imc (VMware Guest Customization)
#
# Please note when updating this function with support for new data
# transports, the order should match the order in the _get_data
@@ -1499,6 +1505,10 @@ dscheck_VMware() {
return "${DS_FOUND}"
fi
+ # Activate the VMware datasource only if tools plugin is available and
+ # guest customization is enabled.
+ vmware_guest_customization && return "${DS_FOUND}"
+
return "${DS_NOT_FOUND}"
}
diff --git a/tools/read-dependencies b/tools/read-dependencies
index efa5879c..d6a23c32 100755
--- a/tools/read-dependencies
+++ b/tools/read-dependencies
@@ -9,8 +9,9 @@ try:
from argparse import ArgumentParser
except ImportError:
raise RuntimeError(
- 'Could not import argparse. Please install python3-argparse '
- 'package to continue')
+ "Could not import argparse. Please install python3-argparse "
+ "package to continue"
+ )
import json
import os
@@ -18,23 +19,30 @@ import re
import subprocess
import sys
-DEFAULT_REQUIREMENTS = 'requirements.txt'
+DEFAULT_REQUIREMENTS = "requirements.txt"
# Map the appropriate package dir needed for each distro choice
DISTRO_PKG_TYPE_MAP = {
- 'centos': 'redhat',
- 'eurolinux': 'redhat',
- 'miraclelinux': 'redhat',
- 'rocky': 'redhat',
- 'redhat': 'redhat',
- 'debian': 'debian',
- 'ubuntu': 'debian',
- 'opensuse': 'suse',
- 'suse': 'suse'
+ "centos": "redhat",
+ "eurolinux": "redhat",
+ "miraclelinux": "redhat",
+ "rocky": "redhat",
+ "redhat": "redhat",
+ "debian": "debian",
+ "ubuntu": "debian",
+ "opensuse": "suse",
+ "opensuse-leap": "suse",
+ "opensuse-microos": "suse",
+ "opensuse-tumbleweed": "suse",
+ "sle_hpc": "suse",
+ "sle-micro": "suse",
+ "sles": "suse",
+ "suse": "suse",
}
MAYBE_RELIABLE_YUM_INSTALL = [
- 'sh', '-c',
+ "sh",
+ "-c",
"""
error() { echo "$@" 1>&2; }
configure_repos_for_proxy_use() {
@@ -61,42 +69,37 @@ MAYBE_RELIABLE_YUM_INSTALL = [
yum install --cacheonly --assumeyes "$@"
configure_repos_for_proxy_use
""",
- 'reliable-yum-install']
+ "reliable-yum-install",
+]
ZYPPER_INSTALL = [
- 'zypper', '--non-interactive', '--gpg-auto-import-keys', 'install',
- '--auto-agree-with-licenses']
-
-DRY_DISTRO_INSTALL_PKG_CMD = {
- 'rocky': ['yum', 'install', '--assumeyes'],
- 'centos': ['yum', 'install', '--assumeyes'],
- 'eurolinux': ['yum', 'install', '--assumeyes'],
- 'miraclelinux': ['yum', 'install', '--assumeyes'],
- 'redhat': ['yum', 'install', '--assumeyes'],
+ "zypper",
+ "--non-interactive",
+ "--gpg-auto-import-keys",
+ "install",
+ "--auto-agree-with-licenses",
+]
+
+DRYRUN_DISTRO_INSTALL_PKG_CMD = {
+ "redhat": ["yum", "install", "--assumeyes"],
}
DISTRO_INSTALL_PKG_CMD = {
- 'rocky': MAYBE_RELIABLE_YUM_INSTALL,
- 'eurolinux': MAYBE_RELIABLE_YUM_INSTALL,
- 'miraclelinux': MAYBE_RELIABLE_YUM_INSTALL,
- 'centos': MAYBE_RELIABLE_YUM_INSTALL,
- 'redhat': MAYBE_RELIABLE_YUM_INSTALL,
- 'debian': ['apt', 'install', '-y'],
- 'ubuntu': ['apt', 'install', '-y'],
- 'opensuse': ZYPPER_INSTALL,
- 'suse': ZYPPER_INSTALL,
+ "redhat": MAYBE_RELIABLE_YUM_INSTALL,
+ "debian": ["apt", "install", "-y"],
+ "suse": ZYPPER_INSTALL,
}
-
# List of base system packages required to enable ci automation
CI_SYSTEM_BASE_PKGS = {
- 'common': ['make', 'sudo', 'tar'],
- 'eurolinux': ['python3-tox'],
- 'miraclelinux': ['python3-tox'],
- 'redhat': ['python3-tox'],
- 'centos': ['python3-tox'],
- 'ubuntu': ['devscripts', 'python3-dev', 'libssl-dev', 'tox', 'sbuild'],
- 'debian': ['devscripts', 'python3-dev', 'libssl-dev', 'tox', 'sbuild']}
+ "common": ["make", "sudo", "tar"],
+ "eurolinux": ["python3-tox"],
+ "miraclelinux": ["python3-tox"],
+ "redhat": ["python3-tox"],
+ "centos": ["python3-tox"],
+ "ubuntu": ["devscripts", "python3-dev", "libssl-dev", "tox", "sbuild"],
+ "debian": ["devscripts", "python3-dev", "libssl-dev", "tox", "sbuild"],
+}
# JSON definition of distro-specific package dependencies
@@ -107,36 +110,70 @@ def get_parser():
"""Return an argument parser for this command."""
parser = ArgumentParser(description=__doc__)
parser.add_argument(
- '-r', '--requirements-file', type=str, dest='req_files',
- action='append', default=None,
- help='pip-style requirements file [default=%s]' % DEFAULT_REQUIREMENTS)
+ "-r",
+ "--requirements-file",
+ type=str,
+ dest="req_files",
+ action="append",
+ default=None,
+ help="pip-style requirements file [default=%s]" % DEFAULT_REQUIREMENTS,
+ )
parser.add_argument(
- '-d', '--distro', type=str, choices=DISTRO_PKG_TYPE_MAP.keys(),
- help='The name of the distro to generate package deps for.')
+ "-d",
+ "--distro",
+ type=str,
+ choices=DISTRO_PKG_TYPE_MAP.keys(),
+ help="The name of the distro to generate package deps for.",
+ )
deptype = parser.add_mutually_exclusive_group()
deptype.add_argument(
- '-R', '--runtime-requires', action='store_true', default=False,
- dest='runtime_requires',
- help='Print only runtime required packages')
+ "-R",
+ "--runtime-requires",
+ action="store_true",
+ default=False,
+ dest="runtime_requires",
+ help="Print only runtime required packages",
+ )
deptype.add_argument(
- '-b', '--build-requires', action='store_true', default=False,
- dest='build_requires', help='Print only buildtime required packages')
+ "-b",
+ "--build-requires",
+ action="store_true",
+ default=False,
+ dest="build_requires",
+ help="Print only buildtime required packages",
+ )
parser.add_argument(
- '--dry-run', action='store_true', default=False, dest='dry_run',
- help='Dry run the install, making no package changes.')
+ "--dry-run",
+ action="store_true",
+ default=False,
+ dest="dry_run",
+ help="Dry run the install, making no package changes.",
+ )
parser.add_argument(
- '-s', '--system-pkg-names', action='store_true', default=False,
- dest='system_pkg_names',
- help='Generate distribution package names (python3-pkgname).')
+ "-s",
+ "--system-pkg-names",
+ action="store_true",
+ default=False,
+ dest="system_pkg_names",
+ help="Generate distribution package names (python3-pkgname).",
+ )
parser.add_argument(
- '-i', '--install', action='store_true', default=False,
- dest='install',
- help='When specified, install the required system packages.')
+ "-i",
+ "--install",
+ action="store_true",
+ default=False,
+ dest="install",
+ help="When specified, install the required system packages.",
+ )
parser.add_argument(
- '-t', '--test-distro', action='store_true', default=False,
- dest='test_distro',
- help='Additionally install continuous integration system packages '
- 'required for build and test automation.')
+ "-t",
+ "--test-distro",
+ action="store_true",
+ default=False,
+ dest="test_distro",
+ help="Additionally install continuous integration system packages "
+ "required for build and test automation.",
+ )
return parser
@@ -150,7 +187,7 @@ def get_package_deps_from_json(topdir, distro):
@return: Dict containing "requires", "build-requires" and "rename" lists
for a given distribution.
"""
- with open(os.path.join(topdir, DISTRO_PKG_DEPS_PATH), 'r') as stream:
+ with open(os.path.join(topdir, DISTRO_PKG_DEPS_PATH), "r") as stream:
deps = json.loads(stream.read())
if distro is None:
return {}
@@ -170,11 +207,11 @@ def parse_pip_requirements(requirements_path):
continue
# remove pip-style markers
- dep = line.split(';')[0]
+ dep = line.split(";")[0]
# remove version requirements
- if re.search('[>=.<]+', dep):
- dep_names.append(re.split(r'[>=.<]+', dep)[0].strip())
+ if re.search("[>=.<]+", dep):
+ dep_names.append(re.split(r"[>=.<]+", dep)[0].strip())
else:
dep_names.append(dep)
return dep_names
@@ -197,16 +234,15 @@ def translate_pip_to_system_pkg(pip_requires, renames):
if rename:
translated_names.append(rename)
else:
- translated_names.append(
- standard_pkg_name.format(prefix, pip_name))
+ translated_names.append(standard_pkg_name.format(prefix, pip_name))
return translated_names
def main(distro):
parser = get_parser()
args = parser.parse_args()
- if 'CLOUD_INIT_TOP_D' in os.environ:
- topd = os.path.realpath(os.environ.get('CLOUD_INIT_TOP_D'))
+ if "CLOUD_INIT_TOP_D" in os.environ:
+ topd = os.path.realpath(os.environ.get("CLOUD_INIT_TOP_D"))
else:
topd = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
@@ -215,45 +251,52 @@ def main(distro):
if args.req_files:
sys.stderr.write(
"Parameter --test-distro overrides --requirements-file. Use "
- "one or the other.\n")
+ "one or the other.\n"
+ )
sys.exit(1)
- args.req_files = [os.path.join(topd, DEFAULT_REQUIREMENTS),
- os.path.join(topd, 'test-' + DEFAULT_REQUIREMENTS)]
+ args.req_files = [
+ os.path.join(topd, DEFAULT_REQUIREMENTS),
+ os.path.join(topd, "test-" + DEFAULT_REQUIREMENTS),
+ ]
args.install = True
if args.req_files is None:
args.req_files = [os.path.join(topd, DEFAULT_REQUIREMENTS)]
if not os.path.isfile(args.req_files[0]):
- sys.stderr.write("Unable to locate '%s' file that should "
- "exist in cloud-init root directory." %
- args.req_files[0])
+ sys.stderr.write(
+ "Unable to locate '%s' file that should "
+ "exist in cloud-init root directory." % args.req_files[0]
+ )
sys.exit(1)
bad_files = [r for r in args.req_files if not os.path.isfile(r)]
if bad_files:
sys.stderr.write(
- "Unable to find requirements files: %s\n" % ','.join(bad_files))
+ "Unable to find requirements files: %s\n" % ",".join(bad_files)
+ )
sys.exit(1)
pip_pkg_names = set()
for req_path in args.req_files:
pip_pkg_names.update(set(parse_pip_requirements(req_path)))
deps_from_json = get_package_deps_from_json(topd, args.distro)
- renames = deps_from_json.get('renames', {})
- translated_pip_names = translate_pip_to_system_pkg(
- pip_pkg_names, renames)
+ renames = deps_from_json.get("renames", {})
+ translated_pip_names = translate_pip_to_system_pkg(pip_pkg_names, renames)
all_deps = []
select_requires = [args.build_requires, args.runtime_requires]
if args.distro:
if not any(select_requires):
all_deps.extend(
- translated_pip_names + deps_from_json['requires'] +
- deps_from_json['build-requires'])
+ translated_pip_names
+ + deps_from_json["requires"]
+ + deps_from_json["build-requires"]
+ )
else:
if args.build_requires:
- all_deps.extend(deps_from_json['build-requires'])
+ all_deps.extend(deps_from_json["build-requires"])
else:
all_deps.extend(
- translated_pip_names + deps_from_json['requires'])
+ translated_pip_names + deps_from_json["requires"]
+ )
else:
if args.system_pkg_names:
all_deps = translated_pip_names
@@ -263,33 +306,45 @@ def main(distro):
if args.install:
pkg_install(all_deps, args.distro, args.test_distro, args.dry_run)
else:
- print('\n'.join(all_deps))
+ print("\n".join(all_deps))
def pkg_install(pkg_list, distro, test_distro=False, dry_run=False):
"""Install a list of packages using the DISTRO_INSTALL_PKG_CMD."""
if test_distro:
- pkg_list = list(pkg_list) + CI_SYSTEM_BASE_PKGS['common']
+ pkg_list = list(pkg_list) + CI_SYSTEM_BASE_PKGS["common"]
distro_base_pkgs = CI_SYSTEM_BASE_PKGS.get(distro, [])
pkg_list += distro_base_pkgs
- print('Installing deps: {0}{1}'.format(
- '(dryrun)' if dry_run else '', ' '.join(pkg_list)))
+ print(
+ "Installing deps: {0}{1}".format(
+ "(dryrun)" if dry_run else "", " ".join(pkg_list)
+ )
+ )
install_cmd = []
if dry_run:
- install_cmd.append('echo')
+ install_cmd.append("echo")
if os.geteuid() != 0:
- install_cmd.append('sudo')
+ install_cmd.append("sudo")
- cmd = DISTRO_INSTALL_PKG_CMD[distro]
- if dry_run and distro in DRY_DISTRO_INSTALL_PKG_CMD:
- cmd = DRY_DISTRO_INSTALL_PKG_CMD[distro]
+ distro_family = DISTRO_PKG_TYPE_MAP[distro]
+ if dry_run and distro_family in DRYRUN_DISTRO_INSTALL_PKG_CMD:
+ cmd = DRYRUN_DISTRO_INSTALL_PKG_CMD[distro_family]
+ else:
+ cmd = DISTRO_INSTALL_PKG_CMD[distro_family]
install_cmd.extend(cmd)
- if distro in ['centos', 'redhat', 'rocky', 'eurolinux']:
+ if distro in ["centos", "redhat", "rocky", "eurolinux"]:
# CentOS and Redhat need epel-release to access oauthlib and jsonschema
- subprocess.check_call(install_cmd + ['epel-release'])
- if distro in ['suse', 'opensuse', 'redhat', 'rocky', 'centos', 'eurolinux']:
- pkg_list.append('rpm-build')
+ subprocess.check_call(install_cmd + ["epel-release"])
+ if distro in [
+ "suse",
+ "opensuse",
+ "redhat",
+ "rocky",
+ "centos",
+ "eurolinux",
+ ]:
+ pkg_list.append("rpm-build")
subprocess.check_call(install_cmd + pkg_list)
diff --git a/tools/read-version b/tools/read-version
index 9eaecb33..5a71e6c7 100755
--- a/tools/read-version
+++ b/tools/read-version
@@ -5,10 +5,10 @@ import json
import subprocess
import sys
-if "avoid-pep8-E402-import-not-top-of-file":
- _tdir = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
- sys.path.insert(0, _tdir)
- from cloudinit import version as ci_version
+_tdir = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
+sys.path.insert(0, _tdir)
+
+from cloudinit import version as ci_version # noqa: E402
def tiny_p(cmd):
@@ -55,13 +55,17 @@ use_tags = "--tags" in sys.argv or os.environ.get("CI_RV_TAGS")
output_json = "--json" in sys.argv
src_version = ci_version.version_string()
-version_long = None
+# upstream/MM.NN.x tracks our patch level releases so ignore trailing '.x'
+major_minor_version = ".".join(src_version.split(".")[:2])
+version_long = ""
# If we're performing CI for a new release branch (which our tooling creates
# with an "upstream/" prefix), then we don't want to enforce strict version
# matching because we know it will fail.
github_ci_release_br = bool(
- os.environ.get("GITHUB_HEAD_REF", "").startswith(f"upstream/{src_version}")
+ os.environ.get("GITHUB_HEAD_REF", "").startswith(
+ f"upstream/{major_minor_version}"
+ )
)
travis_ci_release_br = bool(
os.environ.get("TRAVIS_PULL_REQUEST_BRANCH", "").startswith("upstream/")
@@ -72,39 +76,33 @@ if is_gitdir(_tdir) and which("git") and not is_release_branch_ci:
# This cmd can be simplified to ["git", "branch", "--show-current"]
# after bionic EOL.
branch_name = tiny_p(["git", "rev-parse", "--abbrev-ref", "HEAD"]).strip()
- if branch_name.startswith(f"upstream/{src_version}"):
+ if branch_name.startswith(f"upstream/{major_minor_version}"):
version = src_version
- version_long = None
+ version_long = ""
else:
flags = []
if use_tags:
flags = ["--tags"]
- cmd = ["git", "describe", "--abbrev=8", "--match=[0-9]*"] + flags
+ cmd = [
+ "git",
+ "describe",
+ branch_name,
+ ] + flags
try:
version = tiny_p(cmd).strip()
- except RuntimeError:
- version = None
-
- if version is None or not version.startswith(src_version):
- sys.stderr.write(
- f"git describe version ({version}) differs from "
- f"cloudinit.version ({src_version})\n"
- )
- sys.stderr.write(
- "Please get the latest upstream tags.\n"
- "As an example, this can be done with the following:\n"
- "$ git remote add upstream https://git.launchpad.net/"
- "cloud-init\n"
- "$ git fetch upstream --tags\n"
- )
- sys.exit(1)
-
- version_long = tiny_p(cmd + ["--long"]).strip()
+ version_long = tiny_p(cmd + ["--long"]).strip()
+ except subprocess.CalledProcessError as e:
+ if "No tags can describe" in e.stderr:
+ print(f"{cmd} found no tags. Using cloudinit.verison.py ")
+ version = src_version
+ version_long = ""
+ else:
+ raise
else:
version = src_version
- version_long = None
+ version_long = ""
# version is X.Y.Z[+xxx.gHASH]
# version_long is None or X.Y.Z-xxx-gHASH
@@ -115,7 +113,7 @@ distance = None
if version_long:
info = version_long.partition("-")[2]
- extra = "-" + info
+ extra = f"-{info}"
distance, commit = info.split("-")
# remove the 'g' from gHASH
commit = commit[1:]
@@ -133,8 +131,6 @@ data = {
if output_json:
sys.stdout.write(json.dumps(data, indent=1) + "\n")
else:
- sys.stdout.write(release + "\n")
+ sys.stdout.write(version + "\n")
sys.exit(0)
-
-# vi: ts=4 expandtab
diff --git a/tools/render-cloudcfg b/tools/render-cloudcfg
index c04daead..6551875f 100755
--- a/tools/render-cloudcfg
+++ b/tools/render-cloudcfg
@@ -26,11 +26,13 @@ def main():
"netbsd",
"openbsd",
"openEuler",
+ "OpenCloudOS",
"openmandriva",
"photon",
"rhel",
"suse",
"rocky",
+ "TencentOS",
"ubuntu",
"unknown",
"virtuozzo",
diff --git a/tools/run-container b/tools/run-container
index 182db0e9..328ed933 100755
--- a/tools/run-container
+++ b/tools/run-container
@@ -24,7 +24,7 @@ Usage: ${0##*/} [ options ] [images:]image-ref
To see images available, run 'lxc image list images:'
Example input:
centos/7
- opensuse/42.3
+ opensuse/15.4
debian/10
options:
@@ -249,7 +249,7 @@ install_packages() {
get_os_info || return
case "$OS_NAME" in
centos|rocky*) yum_install "$@";;
- opensuse) zypper_install "$@";;
+ opensuse*) zypper_install "$@";;
debian|ubuntu) apt_install "$@";;
*) error "Do not know how to install packages on ${OS_NAME}";
return 1;;
@@ -497,8 +497,8 @@ main() {
local build_pkg="" build_srcpkg="" pkg_ext="" distflag=""
case "$OS_NAME" in
- centos|rocky) distflag="--distro=redhat";;
- opensuse) distflag="--distro=suse";;
+ centos|rocky*) distflag="--distro=redhat";;
+ opensuse*) distflag="--distro=suse";;
esac
case "$OS_NAME" in
@@ -506,7 +506,7 @@ main() {
build_pkg="./packages/bddeb -d"
build_srcpkg="./packages/bddeb -S -d"
pkg_ext=".deb";;
- centos|opensuse|rocky)
+ centos|opensuse*|rocky*)
build_pkg="./packages/brpm $distflag"
build_srcpkg="./packages/brpm $distflag --srpm"
pkg_ext=".rpm";;
diff --git a/tox.ini b/tox.ini
index dd7973b7..b49272ac 100644
--- a/tox.ini
+++ b/tox.ini
@@ -302,6 +302,7 @@ markers =
ec2: test will only run on EC2 platform
gce: test will only run on GCE platform
hypothesis_slow: hypothesis test too slow to run as unit test
+ ibm: test will only run on IBM platform
instance_name: the name to be used for the test instance
integration_cloud_args: args for IntegrationCloud customization
is_iscsi: whether is an instance has iscsi net cfg or not
@@ -318,3 +319,4 @@ markers =
ubuntu: this test should run on Ubuntu
unstable: skip this test because it is flakey
user_data: the user data to be passed to the test instance
+ allow_dns_lookup: disable autochecking for host network configuration